tahoe_lafs-1.20.0/.coveragerc0000644000000000000000000000134413615410400012742 0ustar00# -*- mode: conf -*- [run] # only record trace data for allmydata.* source = allmydata # and don't trace the test files themselves, or generated files omit = */allmydata/test/* */allmydata/_version.py parallel = True branch = True [report] show_missing = True skip_covered = True [paths] source = # It looks like this in the checkout src/ # It looks like this in the GitHub Actions Windows build environment D:/a/tahoe-lafs/tahoe-lafs/.tox/py*-coverage/Lib/site-packages/ # Although sometimes it looks like this instead. Also it looks like this on macOS. .tox/py*-coverage/lib/python*/site-packages/ # On some Linux CI jobs it looks like this /tmp/tahoe-lafs.tox/py*-coverage/lib/python*/site-packages/ tahoe_lafs-1.20.0/CREDITS0000644000000000000000000001374113615410400011645 0ustar00This is at least a partial credits-file of people that have contributed to the Tahoe-LAFS project. It is formatted to allow easy grepping and beautification by scripts. The fields are: name (N), email (E), web-address (W), PGP key ID and fingerprint (P), physical location (S), and description (D). Thanks. ---------- N: Brian Warner E: warner@lothar.com P: A476 E2E6 1188 0C98 5B3C 3A39 0386 E81B 11CA A07A D: main developer N: Zooko E: zooko@zooko.com D: main developer N: Daira Hopwood E: daira@jacaranda.org P: 3D6A 08E9 1262 3E9A 00B2 1BDC 067F 4920 98CF 2762 S: Manchester, UK D: main developer N: Faried Nawaz E: self@node.pk W: http://www.hungry.com/~fn/ P: 0x09ECEC06, 19 41 1B 3E 25 98 F5 0A 0D 50 F9 37 1B 98 1A FF 09 EC EC 06 D: added private publish, added display of file size N: Arno Waschk E: hamamatsu@gmx.de W: www.arnowaschk.de D: improve logging, documentation, testing/porting/packaging (cygwin) N: Arc O Median D: bug reports N: RobK D: code N: Nathan Wilcox E: nejucomo@gmail.com D: unit tests, attack example, documentation, Linux FUSE interface N: Mike Booker D: documentation (README.win32) N: David Reid E: dreid@dreid.org D: make the provisioning page work in py2exe and py2app packages N: Paul Gerhardt D: don't emit error to stdout when testing for error in make check-deps N: Armin Rigo D: Linux FUSE interface "b" N: Justin Boreta D: user testing and docs N: Chris Galvan E: cgalvan@enthought.com D: packaging, buildbot N: François Deppierraz E: francois@ctrlaltdel.ch D: encodings, duplicity, debugging, FUSE, docs, FreeBSD, WUI, ARM, NEWS N: Larry Hosken E: tahoe@lahosken.san-francisco.ca.us D: make cp -r ignore dangling symlinks N: Toby Murray E: toby.murray@comlab.ox.ac.uk D: add web.ambient_upload_authority option, remove it N: Shawn Willden E: shawn-tahoe@willden.org D: mathematical analysis, code review, Win32 documentation N: Nils Durner E: ndurner@googlemail.com D: security bug report, darcsver fix, Windows build docs, ftpd docs, bzr patch N: Kevin Reid E: kpreid@switchb.org D: security bug report, W3 standards, new improved WUI style N: Alberto Berti E: alberto@metapensiero.it W: http://www.metapensiero.it S: via Filatoi, 1 38068 Rovereto (TN), Italy D: improvements to the CLI: exclude patterns, usage text N: DarKnesS_WOlF D: patch Makefile to build .debs for Ubuntu Intrepid N: Kevan Carstensen E: kevan@isnotajoke.com D: Tahoe-LAFS Hacker; MDMF, security, other improvements, code review, docs N: Marc Tooley W: http://rune.ca P: 0xD5A7EE69911DF5CF D: port to NetBSD, help debugging Crypto++ bug N: Sam Mason D: edited docs/running.rst N: Jacob Appelbaum E: jacob@appelbaum.com W: http://www.appelbaum.net/ P: 12E4 04FF D3C9 31F9 3405 2D06 B884 1A91 9D0F ACE4 D: Debian packaging including init scripts D: Note that contributions from Jacob Appelbaum (ioerror) are no longer welcome D: due to behavior unacceptable to community standards in Tor and other projects N: Jeremy Visser D: Ubuntu packaging, usability testing N: Jeremy Fitzhardinge D: better support for HTTP range queries N: Frédéric Marti E: freestorm77@gmail.com P: 0xD703AE08, F1 82 35 BB FF D8 96 0B 68 E2 91 2F C4 B8 6A 42 D7 03 AE 08 S: Lausanne - Switzerland D: fix layout issue and server version numbers in WUI N: Jacob Lyles E: jacob.lyles@gmail.com D: fixed bug in WUI with Python 2.5 and a system clock set far in the past N: Ravi Pinjala E: ravi@p-static.net D: converted docs from .txt to .rst N: Josh Wilcox D: docs, return None from get_stats() when there aren't enough samples N: Vince_II D: fix incorrect name of other doc file in docs/configuration.rst N: Markus Reichelt E: mr@mareichelt.com W: http://mareichelt.com/ P: DCB3 281F 38B0 711A 41C0 DC20 EE8D 363D 1687 9738 D: packaging for Slackware on SlackBuilds.org, bug reports N: Peter Le Bek E: peter@hyperplex.org P: 0x9BAC3E97, 79CA 34B3 7272 A3CF 82AC 5655 F55A 5B63 9BAC 3E97 D: mtime in ftpd N: Andrew Miller E: amiller@dappervision.com W: http://soc1024.com P: 0xE3787A7250538F3F DEB3 132A 7FBA 37A5 03AC A462 E378 7A72 5053 8F3F D: bugfixes, patches N: Frederick B E: freddy@flashpad ? D: fix wrong index in tahoe cp --verbose N: Patrick McDonald E: marlowe@antagonism.org D: documentation N: Mark Berger E: mark.berger.j@gmail.com D: servers of happiness N: Leif Ryge E: leif@synthesize.us D: bugfixes, documentation, web UI N: Tony Arcieri E: tony.arcieri@gmail.com D: web UI improvements N: A. Montero E: amontero@tinet.org D: documentation N: Ramakrishnan Muthukrishnan E: ram@rkrishnan.org D: Mac OS X packaging, Debian package maintainer N: Loose Cannon E: lcstyle@gmail.com D: fix the Download! button on the Welcome page N: Jean-Paul Calderone E: exarkun@twistedmatrix.com D: support SFTP public key authentication. N: David Stainton E: dstainton415@gmail.com D: various bug-fixes and features N: meejah E: meejah@meejah.ca P: 0xC2602803128069A7, 9D5A 2BD5 688E CB88 9DEB CD3F C260 2803 1280 69A7 D: various bug-fixes and features N: Chad Whitacre E: chad@zetaweb.com D: Python3 porting N: Itamar Turner-Trauring E: itamar@pythonspeed.com D: Python3 porting N: Jason R. Coombs E: jaraco@jaraco.com D: Python3 porting N: Maciej Fijalkowski E: fijall@gmail.com D: Python3 porting N: Ross Patterson E: me@rpatterson.net D: Python3 porting N: Sajith Sasidharan E: sajith@hcoop.net D: Python3 porting N: Pete Fein E: pete@snake.dev D: Python3 porting N: Viktoriia Savchuk W: https://twitter.com/viktoriiasvchk D: Developer community focused improvements on the README file. N: Lukas Pirl E: tahoe@lukas-pirl.de W: http://lukas-pirl.de D: Buildslaves (Debian, Fedora, CentOS; 2016-2021) N: Anxhelo Lushka E: anxhelo1995@gmail.com D: Web site design and updates N: Fon E. Noel E: fenn25.fn@gmail.com D: bug-fixes and refactoring N: Jehad Baeth E: jehad@leastauthority.com D: Documentation improvement N: May-Lee Sia E: mayleesia@gmail.com D: Community-manager and documentation improvements N: Yash Nayani E: yashaswi.nram@gmail.com D: Installation Guide improvements N: Florian Sesser E: florian@private.storage D: OpenMetrics supporttahoe_lafs-1.20.0/Makefile0000644000000000000000000002213513615410400012262 0ustar00# Tahoe LFS Development and maintenance tasks # # NOTE: this Makefile requires GNU make ### Defensive settings for make: # https://tech.davis-hansson.com/p/make/ SHELL := bash .ONESHELL: .SHELLFLAGS := -xeu -o pipefail -c .SILENT: .DELETE_ON_ERROR: MAKEFLAGS += --warn-undefined-variables MAKEFLAGS += --no-builtin-rules # Local target variables PYTHON=python export PYTHON PYFLAKES=flake8 export PYFLAKES VIRTUAL_ENV=./.tox/py37 SOURCES=src/allmydata static misc setup.py APPNAME=tahoe-lafs TEST_SUITE=allmydata # Top-level, phony targets .PHONY: default default: @echo "no default target" .PHONY: test ## Run all tests and code reports test: .tox/create-venvs.log # Run codechecks first since it takes the least time to report issues early. tox --develop -e codechecks # Run all the test environments in parallel to reduce run-time tox --develop -p auto -e 'py37' .PHONY: test-venv-coverage ## Run all tests with coverage collection and reporting. test-venv-coverage: # Special handling for reporting coverage even when the test run fails rm -f ./.coverage.* test_exit= $(VIRTUAL_ENV)/bin/coverage run -m twisted.trial --rterrors --reporter=timing \ $(TEST_SUITE) || test_exit="$$?" $(VIRTUAL_ENV)/bin/coverage combine $(VIRTUAL_ENV)/bin/coverage xml || true $(VIRTUAL_ENV)/bin/coverage report if [ ! -z "$$test_exit" ]; then exit "$$test_exit"; fi .PHONY: test-py3-all ## Run all tests under Python 3 test-py3-all: .tox/create-venvs.log tox --develop -e py37 allmydata # This is necessary only if you want to automatically produce a new # _version.py file from the current git history (without doing a build). .PHONY: make-version make-version: $(PYTHON) ./setup.py update_version # Build OS X pkg packages. .PHONY: build-osx-pkg build-osx-pkg: misc/build_helpers/build-osx-pkg.sh $(APPNAME) .PHONY: test-osx-pkg test-osx-pkg: $(PYTHON) misc/build_helpers/test-osx-pkg.py .PHONY: upload-osx-pkg upload-osx-pkg: # [Failure instance: Traceback: : [('SSL routines', 'ssl3_read_bytes', 'tlsv1 alert unknown ca'), ('SSL routines', 'ssl3_write_bytes', 'ssl handshake failure')] # # @echo "uploading to ~tahoe-tarballs/OS-X-packages/ via flappserver" # @if [ "X${BB_BRANCH}" = "Xmaster" ] || [ "X${BB_BRANCH}" = "X" ]; then \ # flappclient --furlfile ~/.tahoe-osx-pkg-upload.furl upload-file tahoe-lafs-*-osx.pkg; \ # else \ # echo not uploading tahoe-lafs-osx-pkg because this is not trunk but is branch \"${BB_BRANCH}\" ; \ # fi .PHONY: code-checks #code-checks: build version-and-path check-interfaces check-miscaptures -find-trailing-spaces -check-umids pyflakes code-checks: check-interfaces check-debugging check-miscaptures -find-trailing-spaces -check-umids pyflakes .PHONY: check-interfaces check-interfaces: $(PYTHON) misc/coding_tools/check-interfaces.py 2>&1 |tee violations.txt @echo .PHONY: check-debugging check-debugging: $(PYTHON) misc/coding_tools/check-debugging.py @echo .PHONY: check-miscaptures check-miscaptures: $(PYTHON) misc/coding_tools/check-miscaptures.py $(SOURCES) 2>&1 |tee miscaptures.txt @echo .PHONY: pyflakes pyflakes: $(PYFLAKES) $(SOURCES) |sort |uniq @echo .PHONY: check-umids check-umids: $(PYTHON) misc/coding_tools/check-umids.py `find $(SOURCES) -name '*.py' -not -name 'old.py'` @echo .PHONY: -check-umids -check-umids: -$(PYTHON) misc/coding_tools/check-umids.py `find $(SOURCES) -name '*.py' -not -name 'old.py'` @echo .PHONY: doc-checks doc-checks: check-rst .PHONY: check-rst check-rst: @for x in `find *.rst docs -name "*.rst"`; do rst2html -v $${x} >/dev/null; done 2>&1 |grep -v 'Duplicate implicit target name:' @echo .PHONY: count-lines count-lines: @echo -n "files: " @find src -name '*.py' |grep -v /build/ |wc -l @echo -n "lines: " @cat `find src -name '*.py' |grep -v /build/` |wc -l @echo -n "TODO: " @grep TODO `find src -name '*.py' |grep -v /build/` | wc -l @echo -n "XXX: " @grep XXX `find src -name '*.py' |grep -v /build/` | wc -l # Here is a list of testing tools that can be run with 'python' from a # virtualenv in which Tahoe has been installed. There used to be Makefile # targets for each, but the exact path to a suitable python is now up to the # developer. But as a hint, after running 'tox', ./.tox/py37/bin/python will # probably work. # src/allmydata/test/bench_dirnode.py # The check-grid target also uses a pre-established client node, along with a # long-term directory that contains some well-known files. See the docstring # in src/allmydata/test/check_grid.py to see how to set this up. ##.PHONY: check-grid ##check-grid: .built ## if [ -z '$(TESTCLIENTDIR)' ]; then exit 1; fi ## $(TAHOE) @src/allmydata/test/check_grid.py $(TESTCLIENTDIR) bin/tahoe .PHONY: test-get-ignore test-git-ignore: $(MAKE) $(PYTHON) misc/build_helpers/test-git-ignore.py .PHONY: test-clean test-clean: find . |grep -vEe "allfiles.tmp|src/allmydata/_(version|appname).py" |sort >allfiles.tmp.old $(MAKE) $(MAKE) distclean find . |grep -vEe "allfiles.tmp|src/allmydata/_(version|appname).py" |sort >allfiles.tmp.new diff allfiles.tmp.old allfiles.tmp.new # It would be nice if 'make clean' deleted any automatically-generated # _version.py too, so that 'make clean; make all' could be useable as a # "what the heck is going on, get me back to a clean state', but we need # 'make clean' to work on non-checkout trees without destroying useful information. # Use 'make distclean' instead to delete all generated files. .PHONY: clean clean: rm -rf build _trial_temp .built rm -f `find src *.egg -name '*.so' -or -name '*.pyc'` rm -rf support dist rm -rf `ls -d *.egg | grep -vEe"setuptools-|setuptools_darcs-|darcsver-"` rm -rf *.pyc rm -f *.pkg .PHONY: distclean distclean: clean rm -rf src/*.egg-info rm -f src/allmydata/_version.py rm -f src/allmydata/_appname.py rm -rf ./.tox/ .PHONY: find-trailing-spaces find-trailing-spaces: $(PYTHON) misc/coding_tools/find-trailing-spaces.py -r $(SOURCES) @echo .PHONY: -find-trailing-spaces -find-trailing-spaces: -$(PYTHON) misc/coding_tools/find-trailing-spaces.py -r $(SOURCES) @echo .PHONY: fetch-and-unpack-deps fetch-and-unpack-deps: @echo "test-and-unpack-deps is obsolete" .PHONY: test-desert-island test-desert-island: @echo "test-desert-island is obsolete" .PHONY: test-pip-install test-pip-install: @echo "test-pip-install is obsolete" # TARBALL GENERATION .PHONY: tarballs tarballs: # delegated to tox, so setup.py can update setuptools if needed tox -e tarballs .PHONY: upload-tarballs upload-tarballs: @if [ "X${BB_BRANCH}" = "Xmaster" ] || [ "X${BB_BRANCH}" = "X" ]; then for f in dist/*; do flappclient --furlfile ~/.tahoe-tarball-upload.furl upload-file $$f; done ; else echo not uploading tarballs because this is not trunk but is branch \"${BB_BRANCH}\" ; fi # Real targets src/allmydata/_version.py: $(MAKE) make-version .tox/create-venvs.log: tox.ini setup.py tox --notest -p all | tee -a "$(@)" # to make a new release: # - create a ticket for the release in Trac # - ensure local copy is up-to-date # - create a branch like "XXXX.release" from up-to-date master # - in the branch, run "make release" # - run "make release-test" # - perform any other sanity-checks on the release # - run "make release-upload" # Note that several commands below hard-code "meejah"; if you are # someone else please adjust them. release: @echo "Is checkout clean?" git diff-files --quiet git diff-index --quiet --cached HEAD -- @echo "Clean docs build area" rm -rf docs/_build/ @echo "Install required build software" python3 -m pip install --editable .[build] @echo "Test README" python3 setup.py check -r -s @echo "Update NEWS" python3 -m towncrier build --yes --version `python3 misc/build_helpers/update-version.py --no-tag` git add -u git commit -m "update NEWS for release" # note that this always bumps the "middle" number, e.g. from 1.17.1 -> 1.18.0 # and produces a tag into the Git repository @echo "Bump version and create tag" python3 misc/build_helpers/update-version.py @echo "Build and sign wheel" python3 setup.py bdist_wheel gpg --pinentry=loopback -u meejah@meejah.ca --armor --detach-sign dist/tahoe_lafs-`git describe | cut -b 12-`-py3-none-any.whl ls dist/*`git describe | cut -b 12-`* @echo "Build and sign source-dist" python3 setup.py sdist gpg --pinentry=loopback -u meejah@meejah.ca --armor --detach-sign dist/tahoe-lafs-`git describe | cut -b 12-`.tar.gz ls dist/*`git describe | cut -b 12-`* # basically just a bare-minimum smoke-test that it installs and runs release-test: gpg --verify dist/tahoe-lafs-`git describe | cut -b 12-`.tar.gz.asc gpg --verify dist/tahoe_lafs-`git describe | cut -b 12-`-py3-none-any.whl.asc virtualenv testmf_venv testmf_venv/bin/pip install dist/tahoe_lafs-`git describe | cut -b 12-`-py3-none-any.whl testmf_venv/bin/tahoe --version rm -rf testmf_venv release-upload: scp dist/*`git describe | cut -b 12-`* meejah@tahoe-lafs.org:/home/source/downloads git push origin_push tahoe-lafs-`git describe | cut -b 12-` twine upload dist/tahoe_lafs-`git describe | cut -b 12-`-py3-none-any.whl dist/tahoe_lafs-`git describe | cut -b 12-`-py3-none-any.whl.asc dist/tahoe-lafs-`git describe | cut -b 12-`.tar.gz dist/tahoe-lafs-`git describe | cut -b 12-`.tar.gz.asc tahoe_lafs-1.20.0/NEWS.rst0000644000000000000000000061206013615410400012132 0ustar00.. -*- coding: utf-8-with-signature -*- ================================== User-Visible Changes in Tahoe-LAFS ================================== .. towncrier start line Release 1.20.0 (2024-12-03) ''''''''''''''''''''''''''' Backwards Incompatible Changes ------------------------------ - Properly interpret "tahoe create --storage-dir" as an option. Versions 1.19.0 and older interpreted "--storage-dir" as a "flag" and thus wouldn't work properly. (`#4110 `_) Features -------- - Continued work to make Tahoe-LAFS take advantage of multiple CPUs. (`#4072 `_) - Mutable directories can now be created with a pre-determined "signature key" via the web API using the "private-key=..." parameter. The "private-key" value must be a DER-encoded 2048-bit RSA private key in urlsafe base64 encoding. (`#4094 `_) Bug Fixes --------- - Upgrade CBOR, fixing encoding on 65KiB++ strings (`#4087 `_) - Stop using the C version of the cbor2 decoder. (`#4088 `_) - Fix incompatibility with cryptography 43. (`#4100 `_) - Fix incompatibility with attrs 24.1. (`#4101 `_) Dependency/Installation Changes ------------------------------- - Now using the "hatch" build system, and pyproject.toml (exclusively) (`#4133 `_) Documentation Changes --------------------- - Tahoe-LAFS manual's table of contents page has been reorganized. (`#3636 `_) - Add a global Sphinx generated glossary. Link the static GBS glossary to the global glossary. (`#4116 `_) Misc/Other ---------- - `#3636 `_, `#4076 `_, `#4082 `_, `#4085 `_, `#4090 `_, `#4091 `_, `#4092 `_, `#4093 `_, `#4114 `_, `#4115 `_, `#4126 `_, `#4130 `_, `#4132 `_, `#4134 `_, `#4141 `_ Release 1.19.0 (2024-01-04) ''''''''''''''''''''''''''' Features -------- - Tahoe-LAFS now includes a new "Grid Manager" specification and implementation adding more options to control which storage servers a client will use for uploads. (`#2916 `_) - Added support for Python 3.12, and work with Eliot 1.15 (`#3072 `_) - `tahoe run ...` will now exit when its stdin is closed. This facilitates subprocess management, specifically cleanup. When a parent process is running tahoe and exits without time to do "proper" cleanup at least the stdin descriptor will be closed. Subsequently "tahoe run" notices this and exits. (`#3921 `_) - Mutable objects can now be created with a pre-determined "signature key" using the ``tahoe put`` CLI or the HTTP API. This enables deterministic creation of mutable capabilities. This feature must be used with care to preserve the normal security and reliability properties. (`#3962 `_) - Added support for Python 3.11. (`#3982 `_) - tahoe run now accepts --allow-stdin-close to mean "keep running if stdin closes" (`#4036 `_) - The storage server and client now support a new, HTTPS-based protocol. (`#4041 `_) - Started work on a new end-to-end benchmarking framework. (`#4060 `_) - Some operations now run in threads, improving the responsiveness of Tahoe nodes. (`#4068 `_) - Logs are now written in a thread, which should make the application more responsive under load. (`#4804 `_) Bug Fixes --------- - Provide better feedback from plugin configuration errors Local errors now print a useful message and exit. Announcements that only contain invalid / unusable plugins now show a message in the Welcome page. (`#3899 `_) - Work with (and require) newer versions of pycddl. (`#3938 `_) - Uploading immutables will now better use available bandwidth, which should allow for faster uploads in many cases. (`#3939 `_) - Downloads of large immutables should now finish much faster. (`#3946 `_) - Fix incompatibility with transitive dependency charset_normalizer >= 3 when using PyInstaller. (`#3966 `_) - A bug where Introducer nodes configured to listen on Tor or I2P would not actually do so has been fixed. (`#3999 `_) - The (still off-by-default) HTTP storage client will now use Tor when Tor-based client-side anonymity was requested. Previously it would use normal TCP connections and not be anonymous. (`#4029 `_) - Provide our own copy of attrs' "provides()" validator This validator is deprecated and slated for removal; that project's suggestion is to copy the code to our project. (`#4056 `_) - Fix a race condition with SegmentFetcher (`#4078 `_) Dependency/Installation Changes ------------------------------- - tenacity is no longer a dependency. (`#3989 `_) Documentation Changes --------------------- - Several minor errors in the Great Black Swamp proposed specification document have been fixed. (`#3922 `_) - Document the ``force_foolscap`` configuration options for ``[storage]`` and ``[client]``. (`#4039 `_) Removed Features ---------------- - Python 3.7 is no longer supported, and Debian 10 and Ubuntu 18.04 are no longer tested. (`#3964 `_) Other Changes ------------- - The integration test suite now includes a set of capability test vectors (``integration/vectors/test_vectors.yaml``) which can be used to verify compatibility between Tahoe-LAFS and other implementations. (`#3961 `_) Misc/Other ---------- - `#3508 `_, `#3622 `_, `#3783 `_, `#3870 `_, `#3874 `_, `#3880 `_, `#3904 `_, `#3910 `_, `#3914 `_, `#3917 `_, `#3927 `_, `#3928 `_, `#3935 `_, `#3936 `_, `#3937 `_, `#3940 `_, `#3942 `_, `#3944 `_, `#3947 `_, `#3950 `_, `#3952 `_, `#3953 `_, `#3954 `_, `#3956 `_, `#3958 `_, `#3959 `_, `#3960 `_, `#3965 `_, `#3967 `_, `#3968 `_, `#3969 `_, `#3970 `_, `#3971 `_, `#3974 `_, `#3975 `_, `#3976 `_, `#3978 `_, `#3987 `_, `#3988 `_, `#3991 `_, `#3993 `_, `#3994 `_, `#3996 `_, `#3998 `_, `#4000 `_, `#4001 `_, `#4002 `_, `#4003 `_, `#4004 `_, `#4005 `_, `#4006 `_, `#4009 `_, `#4010 `_, `#4012 `_, `#4014 `_, `#4015 `_, `#4016 `_, `#4018 `_, `#4019 `_, `#4020 `_, `#4022 `_, `#4023 `_, `#4024 `_, `#4026 `_, `#4027 `_, `#4028 `_, `#4035 `_, `#4038 `_, `#4040 `_, `#4042 `_, `#4044 `_, `#4046 `_, `#4047 `_, `#4049 `_, `#4050 `_, `#4051 `_, `#4052 `_, `#4055 `_, `#4059 `_, `#4061 `_, `#4062 `_, `#4063 `_, `#4065 `_, `#4066 `_, `#4070 `_, `#4074 `_, `#4075 `_ Release 1.18.0 (2022-10-02) ''''''''''''''''''''''''''' Backwards Incompatible Changes ------------------------------ - Python 3.6 is no longer supported, as it has reached end-of-life and is no longer receiving security updates. (`#3865 `_) - Python 3.7 or later is now required; Python 2 is no longer supported. (`#3873 `_) - Share corruption reports stored on disk are now always encoded in UTF-8. (`#3879 `_) - Record both the PID and the process creation-time: a new kind of pidfile in `running.process` records both the PID and the creation-time of the process. This facilitates automatic discovery of a "stale" pidfile that points to a currently-running process. If the recorded creation-time matches the creation-time of the running process, then it is a still-running `tahoe run` process. Otherwise, the file is stale. The `twistd.pid` file is no longer present. (`#3926 `_) Features -------- - The implementation of SDMF and MDMF (mutables) now requires RSA keys to be exactly 2048 bits, aligning them with the specification. Some code existed to allow tests to shorten this and it's conceptually possible a modified client produced mutables with different key-sizes. However, the spec says that they must be 2048 bits. If you happen to have a capability with a key-size different from 2048 you may use 1.17.1 or earlier to read the content. (`#3828 `_) - "make" based release automation (`#3846 `_) Misc/Other ---------- - `#3327 `_, `#3526 `_, `#3697 `_, `#3709 `_, `#3786 `_, `#3788 `_, `#3802 `_, `#3816 `_, `#3855 `_, `#3858 `_, `#3859 `_, `#3860 `_, `#3867 `_, `#3868 `_, `#3871 `_, `#3872 `_, `#3875 `_, `#3876 `_, `#3877 `_, `#3881 `_, `#3882 `_, `#3883 `_, `#3889 `_, `#3890 `_, `#3891 `_, `#3893 `_, `#3895 `_, `#3896 `_, `#3898 `_, `#3900 `_, `#3909 `_, `#3913 `_, `#3915 `_, `#3916 `_ Release 1.17.1 (2022-01-07) ''''''''''''''''''''''''''' Bug Fixes --------- - Fixed regression on Python 3 causing the JSON version of the Welcome page to sometimes produce a 500 error (`#3852 `_) - Fixed regression on Python 3 where JSON HTTP POSTs failed to be processed. (`#3854 `_) Misc/Other ---------- - `#3848 `_, `#3849 `_, `#3850 `_, `#3856 `_ Release 1.17.0 (2021-12-06) ''''''''''''''''''''''''''' Security-related Changes ------------------------ - The introducer server no longer writes the sensitive introducer fURL value to its log at startup time. Instead it writes the well-known path of the file from which this value can be read. (`#3819 `_) - The storage protocol operation ``add_lease`` now safely rejects an attempt to add a 4,294,967,296th lease to an immutable share. Previously this failed with an error after recording the new lease in the share file, resulting in the share file losing track of a one previous lease. (`#3821 `_) - The storage protocol operation ``readv`` now safely rejects attempts to read negative lengths. Previously these read requests were satisfied with the complete contents of the share file (including trailing metadata) starting from the specified offset. (`#3822 `_) - The storage server implementation now respects the ``reserved_space`` configuration value when writing lease information and recording corruption advisories. Previously, new leases could be created and written to disk even when the storage server had less remaining space than the configured reserve space value. Now this operation will fail with an exception and the lease will not be created. Similarly, if there is no space available, corruption advisories will be logged but not written to disk. (`#3823 `_) - The storage server implementation no longer records corruption advisories about storage indexes for which it holds no shares. (`#3824 `_) - The lease-checker now uses JSON instead of pickle to serialize its state. tahoe will now refuse to run until you either delete all pickle files or migrate them using the new command:: tahoe admin migrate-crawler This will migrate all crawler-related pickle files. (`#3825 `_) - The SFTP server no longer accepts password-based credentials for authentication. Public/private key-based credentials are now the only supported authentication type. This removes plaintext password storage from the SFTP credentials file. It also removes a possible timing side-channel vulnerability which might have allowed attackers to discover an account's plaintext password. (`#3827 `_) - The storage server now keeps hashes of lease renew and cancel secrets for immutable share files instead of keeping the original secrets. (`#3839 `_) - The storage server now keeps hashes of lease renew and cancel secrets for mutable share files instead of keeping the original secrets. (`#3841 `_) Features -------- - Tahoe-LAFS releases now have just a .tar.gz source release and a (universal) wheel (`#3735 `_) - tahoe-lafs now provides its statistics also in OpenMetrics format (for Prometheus et. al.) at `/statistics?t=openmetrics`. (`#3786 `_) - If uploading an immutable hasn't had a write for 30 minutes, the storage server will abort the upload. (`#3807 `_) Bug Fixes --------- - When uploading an immutable, overlapping writes that include conflicting data are rejected. In practice, this likely didn't happen in real-world usage. (`#3801 `_) Dependency/Installation Changes ------------------------------- - Tahoe-LAFS now supports running on NixOS 21.05 with Python 3. (`#3808 `_) Documentation Changes --------------------- - The news file for future releases will include a section for changes with a security impact. (`#3815 `_) Removed Features ---------------- - The little-used "control port" has been removed from all node types. (`#3814 `_) Other Changes ------------- - Tahoe-LAFS no longer runs its Tor integration test suite on Python 2 due to the increased complexity of obtaining compatible versions of necessary dependencies. (`#3837 `_) Misc/Other ---------- - `#3525 `_, `#3527 `_, `#3754 `_, `#3758 `_, `#3784 `_, `#3792 `_, `#3793 `_, `#3795 `_, `#3797 `_, `#3798 `_, `#3799 `_, `#3800 `_, `#3805 `_, `#3806 `_, `#3810 `_, `#3812 `_, `#3820 `_, `#3829 `_, `#3830 `_, `#3831 `_, `#3832 `_, `#3833 `_, `#3834 `_, `#3835 `_, `#3836 `_, `#3838 `_, `#3842 `_, `#3843 `_, `#3847 `_ Release 1.16.0 (2021-09-17) ''''''''''''''''''''''''''' Backwards Incompatible Changes ------------------------------ - The Tahoe command line now always uses UTF-8 to decode its arguments, regardless of locale. (`#3588 `_) - tahoe backup's --exclude-from has been renamed to --exclude-from-utf-8, and correspondingly requires the file to be UTF-8 encoded. (`#3716 `_) Features -------- - Added 'typechecks' environment for tox running mypy and performing static typechecks. (`#3399 `_) - The NixOS-packaged Tahoe-LAFS now knows its own version. (`#3629 `_) Bug Fixes --------- - Fix regression that broke flogtool results on Python 2. (`#3509 `_) - Fix a logging regression on Python 2 involving unicode strings. (`#3510 `_) - Certain implementation-internal weakref KeyErrors are now handled and should no longer cause user-initiated operations to fail. (`#3539 `_) - SFTP public key auth likely works more consistently, and SFTP in general was previously broken. (`#3584 `_) - Fixed issue where redirecting old-style URIs (/uri/?uri=...) didn't work. (`#3590 `_) - ``tahoe invite`` will now read share encoding/placement configuration values from a Tahoe client node configuration file if they are not given on the command line, instead of raising an unhandled exception. (`#3650 `_) - Fix regression where uploading files with non-ASCII names failed. (`#3738 `_) - Fixed annoying UnicodeWarning message on Python 2 when running CLI tools. (`#3739 `_) - Fixed bug where share corruption events were not logged on storage servers running on Windows. (`#3779 `_) Dependency/Installation Changes ------------------------------- - Tahoe-LAFS now requires Twisted 19.10.0 or newer. As a result, it now has a transitive dependency on bcrypt. (`#1549 `_) - Debian 8 support has been replaced with Debian 10 support. (`#3326 `_) - Tahoe-LAFS no longer depends on Nevow. (`#3433 `_) - Tahoe-LAFS now requires the `netifaces` Python package and no longer requires the external `ip`, `ifconfig`, or `route.exe` executables. (`#3486 `_) - The Tahoe-LAFS project no longer commits to maintaining binary packages for all dependencies at . Please use PyPI instead. (`#3497 `_) - Tahoe-LAFS now uses a forked version of txi2p (named txi2p-tahoe) with Python 3 support. (`#3633 `_) - The Nix package now includes correct version information. (`#3712 `_) - Use netifaces 0.11.0 wheel package from PyPI.org if you use 64-bit Python 2.7 on Windows. VCPython27 downloads are no longer available at Microsoft's website, which has made building Python 2.7 wheel packages of Python libraries with C extensions (such as netifaces) on Windows difficult. (`#3733 `_) Configuration Changes --------------------- - The ``[client]introducer.furl`` configuration item is now deprecated in favor of the ``private/introducers.yaml`` file. (`#3504 `_) Documentation Changes --------------------- - Documentation now has its own towncrier category. (`#3664 `_) - `tox -e docs` will treat warnings about docs as errors. (`#3666 `_) - The visibility of the Tahoe-LAFS logo has been improved for "dark" themed viewing. (`#3677 `_) - A cheatsheet-style document for contributors was created at CONTRIBUTORS.rst (`#3682 `_) - Our IRC channel, #tahoe-lafs, has been moved to irc.libera.chat. (`#3721 `_) - Tahoe-LAFS project is now registered with Libera.Chat IRC network. (`#3726 `_) - Rewriting the installation guide for Tahoe-LAFS. (`#3747 `_) - Documentation and installation links in the README have been fixed. (`#3749 `_) - The Great Black Swamp proposed specification now includes sample interactions to demonstrate expected usage patterns. (`#3764 `_) - The Great Black Swamp proposed specification now includes a glossary. (`#3765 `_) - The Great Black Swamp specification now allows parallel upload of immutable share data. (`#3769 `_) - There is now a specification for the scheme which Tahoe-LAFS storage clients use to derive their lease renewal secrets. (`#3774 `_) - The Great Black Swamp proposed specification now has a simplified interface for reading data from immutable shares. (`#3777 `_) - tahoe-dev mailing list is now at tahoe-dev@lists.tahoe-lafs.org. (`#3782 `_) - The Great Black Swamp specification now describes the required authorization scheme. (`#3785 `_) - The "Great Black Swamp" proposed specification has been expanded to include two lease management APIs. (`#3037 `_) - The specification section of the Tahoe-LAFS documentation now includes explicit discussion of the security properties of Foolscap "fURLs" on which it depends. (`#3503 `_) - The README, revised by Viktoriia with feedback from the team, is now more focused on the developer community and provides more information about Tahoe-LAFS, why it's important, and how someone can use it or start contributing to it. (`#3545 `_) - The "Great Black Swamp" proposed specification has been changed use ``v=1`` as the URL version identifier. (`#3644 `_) - You can run `make livehtml` in docs directory to invoke sphinx-autobuild. (`#3663 `_) Removed Features ---------------- - Announcements delivered through the introducer system are no longer automatically annotated with copious information about the Tahoe-LAFS software version nor the versions of its dependencies. (`#3518 `_) - The stats gatherer, broken since at least Tahoe-LAFS 1.13.0, has been removed. The ``[client]stats_gatherer.furl`` configuration item in ``tahoe.cfg`` is no longer allowed. The Tahoe-LAFS project recommends using a third-party metrics aggregation tool instead. (`#3549 `_) - The deprecated ``tahoe`` start, restart, stop, and daemonize sub-commands have been removed. (`#3550 `_) - FTP is no longer supported by Tahoe-LAFS. Please use the SFTP support instead. (`#3583 `_) - Removed support for the Account Server frontend authentication type. (`#3652 `_) Other Changes ------------- - Refactored test_introducer in web tests to use custom base test cases (`#3757 `_) Misc/Other ---------- - `#2928 `_, `#3283 `_, `#3314 `_, `#3384 `_, `#3385 `_, `#3390 `_, `#3404 `_, `#3428 `_, `#3432 `_, `#3434 `_, `#3435 `_, `#3454 `_, `#3459 `_, `#3460 `_, `#3465 `_, `#3466 `_, `#3467 `_, `#3468 `_, `#3470 `_, `#3471 `_, `#3472 `_, `#3473 `_, `#3474 `_, `#3475 `_, `#3477 `_, `#3478 `_, `#3479 `_, `#3481 `_, `#3482 `_, `#3483 `_, `#3485 `_, `#3488 `_, `#3490 `_, `#3491 `_, `#3492 `_, `#3493 `_, `#3496 `_, `#3499 `_, `#3500 `_, `#3501 `_, `#3502 `_, `#3511 `_, `#3513 `_, `#3514 `_, `#3515 `_, `#3517 `_, `#3520 `_, `#3521 `_, `#3522 `_, `#3523 `_, `#3524 `_, `#3528 `_, `#3529 `_, `#3532 `_, `#3533 `_, `#3534 `_, `#3536 `_, `#3537 `_, `#3542 `_, `#3544 `_, `#3546 `_, `#3547 `_, `#3551 `_, `#3552 `_, `#3553 `_, `#3555 `_, `#3557 `_, `#3558 `_, `#3560 `_, `#3563 `_, `#3564 `_, `#3565 `_, `#3566 `_, `#3567 `_, `#3568 `_, `#3572 `_, `#3574 `_, `#3575 `_, `#3576 `_, `#3577 `_, `#3578 `_, `#3579 `_, `#3580 `_, `#3582 `_, `#3587 `_, `#3588 `_, `#3589 `_, `#3591 `_, `#3592 `_, `#3593 `_, `#3594 `_, `#3595 `_, `#3596 `_, `#3599 `_, `#3600 `_, `#3603 `_, `#3605 `_, `#3606 `_, `#3607 `_, `#3608 `_, `#3611 `_, `#3612 `_, `#3613 `_, `#3615 `_, `#3616 `_, `#3617 `_, `#3618 `_, `#3619 `_, `#3620 `_, `#3621 `_, `#3623 `_, `#3624 `_, `#3625 `_, `#3626 `_, `#3628 `_, `#3630 `_, `#3631 `_, `#3632 `_, `#3634 `_, `#3635 `_, `#3637 `_, `#3638 `_, `#3640 `_, `#3642 `_, `#3645 `_, `#3646 `_, `#3647 `_, `#3648 `_, `#3649 `_, `#3651 `_, `#3653 `_, `#3654 `_, `#3655 `_, `#3656 `_, `#3657 `_, `#3658 `_, `#3662 `_, `#3667 `_, `#3669 `_, `#3670 `_, `#3671 `_, `#3672 `_, `#3674 `_, `#3675 `_, `#3676 `_, `#3678 `_, `#3679 `_, `#3681 `_, `#3683 `_, `#3686 `_, `#3687 `_, `#3691 `_, `#3692 `_, `#3699 `_, `#3700 `_, `#3701 `_, `#3702 `_, `#3703 `_, `#3704 `_, `#3705 `_, `#3707 `_, `#3708 `_, `#3709 `_, `#3711 `_, `#3713 `_, `#3714 `_, `#3715 `_, `#3717 `_, `#3718 `_, `#3722 `_, `#3723 `_, `#3727 `_, `#3728 `_, `#3729 `_, `#3730 `_, `#3731 `_, `#3732 `_, `#3734 `_, `#3735 `_, `#3736 `_, `#3741 `_, `#3743 `_, `#3744 `_, `#3745 `_, `#3746 `_, `#3751 `_, `#3759 `_, `#3760 `_, `#3763 `_, `#3773 `_, `#3781 `_ Release 1.15.1 '''''''''''''' Misc/Other ---------- - `#3469 `_, `#3608 `_ Release 1.15.0 (2020-10-13) ''''''''''''''''''''''''''' Features -------- - PyPy is now a supported platform. (`#1792 `_) - allmydata.testing.web, a new module, now offers a supported Python API for testing Tahoe-LAFS web API clients. (`#3317 `_) Bug Fixes --------- - Make directory page links work. (`#3312 `_) - Use last known revision of Chutney that is known to work with Python 2 for Tor integration tests. (`#3348 `_) - Mutable files now use RSA exponent 65537 (`#3349 `_) Dependency/Installation Changes ------------------------------- - Tahoe-LAFS now supports CentOS 8 and no longer supports CentOS 7. (`#3296 `_) - Tahoe-LAFS now supports Ubuntu 20.04. (`#3328 `_) Removed Features ---------------- - The Magic Folder frontend has been split out into a stand-alone project. The functionality is no longer part of Tahoe-LAFS itself. Learn more at . (`#3284 `_) - Slackware 14.2 is no longer a Tahoe-LAFS supported platform. (`#3323 `_) Other Changes ------------- - The Tahoe-LAFS project has adopted a formal code of conduct. (`#2755 `_) - (`#3263 `_, `#3324 `_) - The "coverage" tox environment has been replaced by the "py27-coverage" and "py36-coverage" environments. (`#3355 `_) Misc/Other ---------- - `#3247 `_, `#3254 `_, `#3277 `_, `#3278 `_, `#3287 `_, `#3288 `_, `#3289 `_, `#3290 `_, `#3291 `_, `#3292 `_, `#3293 `_, `#3294 `_, `#3297 `_, `#3298 `_, `#3299 `_, `#3300 `_, `#3302 `_, `#3303 `_, `#3304 `_, `#3305 `_, `#3306 `_, `#3308 `_, `#3309 `_, `#3313 `_, `#3315 `_, `#3316 `_, `#3320 `_, `#3325 `_, `#3326 `_, `#3329 `_, `#3330 `_, `#3331 `_, `#3332 `_, `#3333 `_, `#3334 `_, `#3335 `_, `#3336 `_, `#3338 `_, `#3339 `_, `#3340 `_, `#3341 `_, `#3342 `_, `#3343 `_, `#3344 `_, `#3346 `_, `#3351 `_, `#3353 `_, `#3354 `_, `#3356 `_, `#3357 `_, `#3358 `_, `#3359 `_, `#3361 `_, `#3364 `_, `#3365 `_, `#3366 `_, `#3367 `_, `#3368 `_, `#3370 `_, `#3372 `_, `#3373 `_, `#3374 `_, `#3375 `_, `#3376 `_, `#3377 `_, `#3378 `_, `#3380 `_, `#3381 `_, `#3382 `_, `#3383 `_, `#3386 `_, `#3387 `_, `#3388 `_, `#3389 `_, `#3391 `_, `#3392 `_, `#3393 `_, `#3394 `_, `#3395 `_, `#3396 `_, `#3397 `_, `#3398 `_, `#3401 `_, `#3403 `_, `#3406 `_, `#3408 `_, `#3409 `_, `#3411 `_, `#3415 `_, `#3416 `_, `#3417 `_, `#3421 `_, `#3422 `_, `#3423 `_, `#3424 `_, `#3425 `_, `#3426 `_, `#3427 `_, `#3429 `_, `#3430 `_, `#3431 `_, `#3436 `_, `#3437 `_, `#3438 `_, `#3439 `_, `#3440 `_, `#3442 `_, `#3443 `_, `#3446 `_, `#3448 `_, `#3449 `_, `#3450 `_, `#3451 `_, `#3452 `_, `#3453 `_, `#3455 `_, `#3456 `_, `#3458 `_, `#3462 `_, `#3463 `_, `#3464 `_ Release 1.14.0 (2020-03-11) ''''''''''''''''''''''''''' Features -------- - Magic-Folders are now supported on macOS. (`#1432 `_) - Add a "tox -e draftnews" which runs towncrier in draft mode (`#2942 `_) - Fedora 29 is now tested as part of the project's continuous integration system. (`#2955 `_) - The Magic-Folder frontend now emits structured, causal logs. This makes it easier for developers to make sense of its behavior and for users to submit useful debugging information alongside problem reports. (`#2972 `_) - The `tahoe` CLI now accepts arguments for configuring structured logging messages which Tahoe-LAFS is being converted to emit. This change does not introduce any new defaults for on-filesystem logging. (`#2975 `_) - The web API now publishes streaming Eliot logs via a token-protected WebSocket at /private/logs/v1. (`#3006 `_) - End-to-end in-memory tests for websocket features (`#3041 `_) - allmydata.interfaces.IFoolscapStoragePlugin has been introduced, an extension point for customizing the storage protocol. (`#3049 `_) - Static storage server "announcements" in ``private/servers.yaml`` are now individually logged and ignored if they cannot be interpreted. (`#3051 `_) - Storage servers can now be configured to load plugins for allmydata.interfaces.IFoolscapStoragePlugin and offer them to clients. (`#3053 `_) - Storage clients can now be configured to load plugins for allmydata.interfaces.IFoolscapStoragePlugin and use them to negotiate with servers. (`#3054 `_) - The [storage] configuration section now accepts a boolean *anonymous* item to enable or disable anonymous storage access. The default behavior remains unchanged. (`#3184 `_) - Enable the helper when creating a node with `tahoe create-node --helper` (`#3235 `_) Bug Fixes --------- - refactor initialization code to be more async-friendly (`#2870 `_) - Configuration-checking code wasn't being called due to indenting (`#2935 `_) - refactor configuration handling out of Node into _Config (`#2936 `_) - "tox -e codechecks" no longer dirties the working tree. (`#2941 `_) - Updated the Tor release key, used by the integration tests. (`#2944 `_) - `tahoe backup` no longer fails with an unhandled exception when it encounters a special file (device, fifo) in the backup source. (`#2950 `_) - Magic-Folders now creates spurious conflict files in fewer cases. In particular, if files are added to the folder while a client is offline, that client will not create conflict files for all those new files when it starts up. (`#2965 `_) - The confusing and misplaced sub-command group headings in `tahoe --help` output have been removed. (`#2976 `_) - The Magic-Folder frontend is now more responsive to subtree changes on Windows. (`#2997 `_) - remove ancient bundled jquery and d3, and the "dowload timeline" feature they support (`#3228 `_) Dependency/Installation Changes ------------------------------- - Tahoe-LAFS no longer makes start-up time assertions about the versions of its dependencies. It is the responsibility of the administrator of the installation to ensure the correct version of dependencies are supplied. (`#2749 `_) - Tahoe-LAFS now depends on Twisted 16.6 or newer. (`#2957 `_) Removed Features ---------------- - "tahoe rm", an old alias for "tahoe unlink", has been removed. (`#1827 `_) - The direct dependencies on pyutil and zbase32 have been removed. (`#2098 `_) - Untested and unmaintained code for running Tahoe-LAFS as a Windows service has been removed. (`#2239 `_) - The redundant "pypywin32" dependency has been removed. (`#2392 `_) - Fedora 27 is no longer tested as part of the project's continuous integration system. (`#2955 `_) - "tahoe start", "tahoe daemonize", "tahoe restart", and "tahoe stop" are now deprecated in favor of using "tahoe run", possibly with a third-party process manager. (`#3273 `_) Other Changes ------------- - Tahoe-LAFS now tests for PyPy compatibility on CI. (`#2479 `_) - Tahoe-LAFS now requires Twisted 18.4.0 or newer. (`#2771 `_) - Tahoe-LAFS now uses towncrier to maintain the NEWS file. (`#2908 `_) - The release process document has been updated. (`#2920 `_) - allmydata.test.test_system.SystemTest is now more reliable with respect to bound address collisions. (`#2933 `_) - The Tox configuration has been fixed to work around a problem on Windows CI. (`#2956 `_) - The PyInstaller CI job now works around a pip/pyinstaller incompatibility. (`#2958 `_) - Some CI jobs for integration tests have been moved from TravisCI to CircleCI. (`#2959 `_) - Several warnings from a new release of pyflakes have been fixed. (`#2960 `_) - Some Slackware 14.2 continuous integration problems have been resolved. (`#2961 `_) - Some macOS continuous integration failures have been fixed. (`#2962 `_) - The NoNetworkGrid implementation has been somewhat improved. (`#2966 `_) - A bug in the test suite for the create-alias command has been fixed. (`#2967 `_) - The integration test suite has been updated to use pytest-twisted instead of deprecated pytest APIs. (`#2968 `_) - The magic-folder integration test suite now performs more aggressive cleanup of the processes it launches. (`#2969 `_) - The integration tests now correctly document the `--keep-tempdir` option. (`#2970 `_) - A misuse of super() in the integration tests has been fixed. (`#2971 `_) - Several utilities to facilitate the use of the Eliot causal logging library have been introduced. (`#2973 `_) - The Windows CI configuration has been tweaked. (`#2974 `_) - The Magic-Folder frontend has had additional logging improvements. (`#2977 `_) - (`#2981 `_, `#2982 `_) - Added a simple sytax checker so that once a file has reached python3 compatibility, it will not regress. (`#3001 `_) - Converted all uses of the print statement to the print function in the ./misc/ directory. (`#3002 `_) - The contributor guidelines are now linked from the GitHub pull request creation page. (`#3003 `_) - Updated the testing code to use the print function instead of the print statement. (`#3008 `_) - Replaced print statement with print fuction for all tahoe_* scripts. (`#3009 `_) - Replaced all remaining instances of the print statement with the print function. (`#3010 `_) - Replace StringIO imports with six.moves. (`#3011 `_) - Updated all Python files to use PEP-3110 exception syntax for Python3 compatibility. (`#3013 `_) - Update raise syntax for Python3 compatibility. (`#3014 `_) - Updated instances of octal literals to use the format 0o123 for Python3 compatibility. (`#3015 `_) - allmydata.test.no_network, allmydata.test.test_system, and allmydata.test.web.test_introducer are now more reliable with respect to bound address collisions. (`#3016 `_) - Removed tuple unpacking from function and lambda definitions for Python3 compatibility. (`#3019 `_) - Updated Python2 long numeric literals for Python3 compatibility. (`#3020 `_) - CircleCI jobs are now faster as a result of pre-building configured Docker images for the CI jobs. (`#3024 `_) - Removed used of backticks for "repr" for Python3 compatibility. (`#3027 `_) - Updated string literal syntax for Python3 compatibility. (`#3028 `_) - Updated CI to enforce Python3 syntax for entire repo. (`#3030 `_) - Replaced pycryptopp with cryptography. (`#3031 `_) - All old-style classes ported to new-style. (`#3042 `_) - Whitelisted "/bin/mv" as command for codechecks performed by tox. This fixes a current warning and prevents future errors (for tox 4). (`#3043 `_) - Progress towards Python 3 compatibility is now visible at . (`#3152 `_) - Collect coverage information from integration tests (`#3234 `_) - NixOS is now a supported Tahoe-LAFS platform. (`#3266 `_) Misc/Other ---------- - `#1893 `_, `#2266 `_, `#2283 `_, `#2766 `_, `#2980 `_, `#2985 `_, `#2986 `_, `#2987 `_, `#2988 `_, `#2989 `_, `#2990 `_, `#2991 `_, `#2992 `_, `#2995 `_, `#3000 `_, `#3004 `_, `#3005 `_, `#3007 `_, `#3012 `_, `#3017 `_, `#3021 `_, `#3023 `_, `#3025 `_, `#3026 `_, `#3029 `_, `#3036 `_, `#3038 `_, `#3048 `_, `#3086 `_, `#3097 `_, `#3111 `_, `#3118 `_, `#3119 `_, `#3227 `_, `#3229 `_, `#3232 `_, `#3233 `_, `#3237 `_, `#3238 `_, `#3239 `_, `#3240 `_, `#3242 `_, `#3243 `_, `#3245 `_, `#3246 `_, `#3248 `_, `#3250 `_, `#3252 `_, `#3255 `_, `#3256 `_, `#3259 `_, `#3261 `_, `#3262 `_, `#3263 `_, `#3264 `_, `#3265 `_, `#3267 `_, `#3268 `_, `#3271 `_, `#3272 `_, `#3274 `_, `#3275 `_, `#3276 `_, `#3279 `_, `#3281 `_, `#3282 `_, `#3285 `_ Release 1.13.0 (05-August-2018) ''''''''''''''''''''''''''''''' New Features ------------ The ``tahoe list-aliases`` command gained the ``--readonly-uri`` option in `PR400`_, which lists read-only capabilities (the default shows read/write capabilities if available). This command also gained a ``--json`` option in `PR452`_, providing machine-readable output. A new command ``tahoe status`` is added, showing some statistics and currently active operations (similar to the ``/status`` page in the Web UI). See also `PR502`_. Immutable uploads now use the "servers of happiness" algorithm for uploading shares. This means better placement of shares on available servers. See `PR416`_. To join a new client to a grid, the command ``tahoe invite`` was added. This uses `magic wormhole`_ to connect two computers and exchange the required information to start the client. The "client side" of this command is the also new option ``tahoe create-client --join=``. Together, this provides a way to provision a new client without having to securely transmit the fURL and other details. `PR418`_ ``tahoe backup`` now reports progress. `PR474`_ The ``tub.port=`` option can now accept ``listen:i2p`` or ``listen:tor`` options to use popular anonymity networks with storage servers. See `PR437`_ The place where storage servers put shares (the "storage path") is now configurable (`PR472`_). A PyInstaller-based build is now available (`PR421`_). A "Docker compose" setup for development purposes is now available (`PR445`_). There is now a recommended workflow for Zcash-based donations to support storage server operators (`PR506`_). Bug Fixes in Core ----------------- Some bugs with pidfile handling were fixed (`PR440`_ and `PR450`_) meaning invalid pidfiles are now deleted. Error-messages related to ``tahoe.cfg`` now include the full path to the file. `PR501`_ fixes "address already in use" test failures. `PR502`_ fixes ticket #2926 ("tahoe status" failures). `PR487`_ fixes ticket #1455 (setting ``X-Frame-Options: DENY``) Web UI Changes -------------- We set the "Referrer-Policy: no-referrer" header on all requests. The Welcome page now understands the JSON option (`PR430`_) and OPTIONS requests are handled (`PR447`_). Magic Folder Changes -------------------- Multiple magic-folders in a single Tahoe client are now supported. Bugs with ``.backup`` files have been fixed, meaning spurious ``.backup`` files will be produced less often (`PR448`_, `PR475`_). Handling of default umask on new magic-folder files is fixed in `PR458`_. The user mtime value is now correctly preserved (`PR457`_). A bug in ``tahoe magic-folder status`` causing active operations to sometimes not show up is fixed (`PR461`_). If a directory is missing, it is created (`PR492`_). Raw Pull Requests ----------------- In total, 50 Pull Requests were merged for this release, including contributions of code or review from 15 different GitHub users. Thanks everyone! A complete list of these PRs and contributions: `PR380`_: `daira`_ `PR400`_: `meejah`_ (with `warner`_) `PR403`_: `meejah`_ `PR405`_: `meejah`_ (with `warner`_) `PR406`_: `meejah`_ (with `warner`_) `PR407`_: `david415`_ (with `meejah`_, `warner`_) `PR409`_: `str4d`_ (with `warner`_) `PR410`_: `tpltnt`_ (with `warner`_) `PR411`_: `tpltnt`_ (with `warner`_, `meejah`_) `PR412`_: `tpltnt`_ (with `warner`_) `PR414`_: `tpltnt`_ (with `meejah`_, `warner`_) `PR416`_: `david415`_, `meejah`_, `markberger`_, `warner`_ `PR417`_: `meejah`_ (with `pataquets`_, `warner`_) `PR418`_: `meejah`_ (with `crwood`_, `exarkun`_, `warner`_) `PR419`_: `tpltnt`_ (with `warner`_) `PR420`_: `ValdikSS`_ (with `warner`_) `PR421`_: `crwood`_ (with `meejah`_, `warner`_) `PR423`_: `warner`_ `PR428`_: `warner`_ `PR429`_: `exarkun`_ (with `warner`_) `PR430`_: `david415`_, `exarkun`_ (with `warner`_) `PR432`_: `exarkun`_ (with `meejah`_) `PR433`_: `exarkun`_ (with `warner`_) `PR434`_: `exarkun`_ (with `warner`_) `PR437`_: `warner`_ `PR438`_: `warner`_ (with `meejah`_) `PR440`_: `exarkun`_, `lpirl`_ (with `meejah`_) `PR444`_: `AnBuKu`_ (with `warner`_) `PR445`_: `bookchin`_ (with `warner`_) `PR447`_: `meejah`_ (with `tpltnt`_, `meejah`_) `PR448`_: `meejah`_ (with `warner`_) `PR450`_: `exarkun`_, `meejah`_, `lpirl`_ `PR452`_: `meejah`_ (with `tpltnt`_) `PR453`_: `meejah`_ `PR454`_: `meejah`_ (with `tpltnt`_, `meejah`_, `warner`_) `PR455`_: `tpltnt`_ (with `meejah`_) `PR456`_: `meejah`_ (with `meejah`_) `PR457`_: `meejah`_ (with `crwood`_, `tpltnt`_) `PR458`_: `meejah`_ (with `tpltnt`_) `PR460`_: `tpltnt`_ (with `exarkun`_, `meejah`_) `PR462`_: `meejah`_ (with `crwood`_) `PR464`_: `meejah`_ `PR470`_: `meejah`_ (with `exarkun`_, `tpltnt`_, `warner`_) `PR472`_: `exarkun`_, `meskio`_ `PR474`_: `exarkun`_ `PR475`_: `meejah`_ (with `exarkun`_) `PR482`_: `crwood`_ (with `warner`_) `PR485`_: `warner`_ `PR486`_: `exarkun`_ (with `warner`_) `PR487`_: `exarkun`_ (with `tpltnt`_) `PR489`_: `exarkun`_ `PR490`_: `exarkun`_ `PR491`_: `exarkun`_ (with `meejah`_) `PR492`_: `exarkun`_ (with `meejah`_, `tpltnt`_) `PR493`_: `exarkun`_ (with `meejah`_) `PR494`_: `exarkun`_ (with `meejah`_) `PR497`_: `meejah`_ (with `multikatt`_, `exarkun`_) `PR499`_: `exarkun`_ (with `meejah`_) `PR501`_: `exarkun`_ (with `meejah`_) `PR502`_: `exarkun`_ (with `meejah`_) `PR506`_: `exarkun`_ (with `crwood`_, `nejucomo`_) Developer and Internal Changes ------------------------------ People hacking on Tahoe-LAFS code will be interested in some internal improvements which shouldn't have any user-visible effects: * internal: skip some unicode tests on non-unicode platforms #2912 * internal: tox: pre-install Incremental to workaround setuptools bug #2913 * internal: fix PyInstaller builds `PR482`_ * internal: use @implementer instead of implements `PR406`_ * internal: improve happiness integration test #2895 `PR432`_ * web internal: refactor response-format (?t=) logic #2893 `PR429`_ * internal: fix pyflakes issues #2898 `PR434`_ * internal: setup.py use find_packages #2897 `PR433`_ * internal: ValueOrderedDict fixes #2891 * internal: remove unnused NumDict #2891 `PR438`_ * internal: setup.py use python_requires= so tox3 works #2876 * internal: rewrite tahoe stop/start/daemonize refs #1148 #275 #1121 #1377 #2149 #719 `PR417`_ * internal: add docs links to RFCs/etc `PR456`_ * internal: magic-folder test improvement `PR453`_ * internal: pytest changes `PR462`_ * internal: upload appveyor generated wheels as artifacts #2903 * internal: fix tox-vs-setuptools-upgrade #2910 * deps: require txi2p>=0.3.2 to work around TLS who-is-client issue #2861 `PR409`_ * deps: now need libyaml-dev from system before build `PR420`_ * deps: twisted>=16.4.0 for "python -m twisted.trial" `PR454`_ * deps: pin pypiwin32 to 219 until upstream bug resolved `PR464`_ * deps: setuptools >=28.8.0 for something `PR470`_ * deps: use stdlib "json" instead of external "simplejson" #2766 `PR405`_ * complain more loudly in setup.py under py3 `PR414`_ * rename "filesystem" to "file store" #2345 `PR380`_ * replace deprecated twisted.web.client with treq #2857 `PR428`_ * improve/stablize some test coverage #2891 * TODO: can we remove this now? pypiwin32 is now at 223 * use secure mkstemp() `PR460`_ * test "tahoe list-aliases --readonly-uri" #2863 `PR403`_ * #455: remove outdated comment * `PR407`_ fix stopService calls * `PR410`_ explicit python2.7 virtualenv * `PR419`_ fix list of supported OSes * `PR423`_ switch travis to a supported Ubuntu * deps: no longer declare a PyCrypto dependency (actual use vanished long ago) `PR514`_ .. _PR380: https://github.com/tahoe-lafs/tahoe-lafs/pull/380 .. _PR400: https://github.com/tahoe-lafs/tahoe-lafs/pull/400 .. _PR403: https://github.com/tahoe-lafs/tahoe-lafs/pull/403 .. _PR405: https://github.com/tahoe-lafs/tahoe-lafs/pull/405 .. _PR406: https://github.com/tahoe-lafs/tahoe-lafs/pull/406 .. _PR407: https://github.com/tahoe-lafs/tahoe-lafs/pull/407 .. _PR409: https://github.com/tahoe-lafs/tahoe-lafs/pull/409 .. _PR410: https://github.com/tahoe-lafs/tahoe-lafs/pull/410 .. _PR412: https://github.com/tahoe-lafs/tahoe-lafs/pull/412 .. _PR414: https://github.com/tahoe-lafs/tahoe-lafs/pull/414 .. _PR416: https://github.com/tahoe-lafs/tahoe-lafs/pull/416 .. _PR417: https://github.com/tahoe-lafs/tahoe-lafs/pull/417 .. _PR418: https://github.com/tahoe-lafs/tahoe-lafs/pull/418 .. _PR419: https://github.com/tahoe-lafs/tahoe-lafs/pull/419 .. _PR420: https://github.com/tahoe-lafs/tahoe-lafs/pull/420 .. _PR421: https://github.com/tahoe-lafs/tahoe-lafs/pull/421 .. _PR423: https://github.com/tahoe-lafs/tahoe-lafs/pull/423 .. _PR428: https://github.com/tahoe-lafs/tahoe-lafs/pull/428 .. _PR429: https://github.com/tahoe-lafs/tahoe-lafs/pull/429 .. _PR430: https://github.com/tahoe-lafs/tahoe-lafs/pull/430 .. _PR432: https://github.com/tahoe-lafs/tahoe-lafs/pull/432 .. _PR433: https://github.com/tahoe-lafs/tahoe-lafs/pull/433 .. _PR434: https://github.com/tahoe-lafs/tahoe-lafs/pull/434 .. _PR437: https://github.com/tahoe-lafs/tahoe-lafs/pull/437 .. _PR438: https://github.com/tahoe-lafs/tahoe-lafs/pull/438 .. _PR440: https://github.com/tahoe-lafs/tahoe-lafs/pull/440 .. _PR444: https://github.com/tahoe-lafs/tahoe-lafs/pull/444 .. _PR445: https://github.com/tahoe-lafs/tahoe-lafs/pull/445 .. _PR447: https://github.com/tahoe-lafs/tahoe-lafs/pull/447 .. _PR448: https://github.com/tahoe-lafs/tahoe-lafs/pull/448 .. _PR450: https://github.com/tahoe-lafs/tahoe-lafs/pull/450 .. _PR452: https://github.com/tahoe-lafs/tahoe-lafs/pull/452 .. _PR453: https://github.com/tahoe-lafs/tahoe-lafs/pull/453 .. _PR454: https://github.com/tahoe-lafs/tahoe-lafs/pull/454 .. _PR456: https://github.com/tahoe-lafs/tahoe-lafs/pull/456 .. _PR457: https://github.com/tahoe-lafs/tahoe-lafs/pull/457 .. _PR458: https://github.com/tahoe-lafs/tahoe-lafs/pull/458 .. _PR460: https://github.com/tahoe-lafs/tahoe-lafs/pull/460 .. _PR462: https://github.com/tahoe-lafs/tahoe-lafs/pull/462 .. _PR464: https://github.com/tahoe-lafs/tahoe-lafs/pull/464 .. _PR470: https://github.com/tahoe-lafs/tahoe-lafs/pull/470 .. _PR472: https://github.com/tahoe-lafs/tahoe-lafs/pull/472 .. _PR474: https://github.com/tahoe-lafs/tahoe-lafs/pull/474 .. _PR482: https://github.com/tahoe-lafs/tahoe-lafs/pull/482 .. _PR502: https://github.com/tahoe-lafs/tahoe-lafs/pull/502 .. _PR506: https://github.com/tahoe-lafs/tahoe-lafs/pull/506 .. _PR514: https://github.com/tahoe-lafs/tahoe-lafs/pull/514 .. _AnBuKu: https://github.com/AnBuKu .. _ValdikSS: https://github.com/ValdikSS .. _bookchin: https://github.com/bookchin .. _crwood: https://github.com/crwood .. _nejucomo: https://github.com/nejucomo .. _daira: https://github.com/daira .. _david415: https://github.com/david415 .. _exarkun: https://github.com/exarkun .. _lpirl: https://github.com/lpirl .. _markberger: https://github.com/markberger .. _meejah: https://github.com/meejah .. _meskio: https://github.com/meskio .. _multikatt: https://github.com/multikatt .. _pataquets: https://github.com/pataquets .. _str4d: https://github.com/str4d .. _tpltnt: https://github.com/tpltnt .. _warner: https://github.com/warner Release 1.12.1 (18-Jan-2017) '''''''''''''''''''''''''''' This fixes a few small problems discovered just after 1.12.0 was released. * ``introducers.yaml`` was entirely broken (due to a unicode-vs-ascii problem), and the documentation recommended an invalid syntax. Both have been fixed. (#2862) * Creating a node with ``--hide-ip`` shouldn't set ``tcp = tor`` if txtorcon is unavailable. I2P-only systems should get ``tcp = disabled``. (#2860) * As a result, we now require foolscap-0.12.6 . * setup.py now creates identical wheels on win32 and unix. Previously wheels created on windows got an unconditional dependency upon ``pypiwin32``, making them uninstallable on unix. Now that dependency is marked as ``sys_platform=win32`` only. (#2763) Some other small changes include: * The deep-stats t=json response now includes an "api-version" field, currently set to 1. (#567) * WUI Directory listings use ``rel=noreferrer`` to avoid leaking the dircap to the JS contents of the target file. (#151, #378) * Remove the dependency on ``shutilwhich`` (#2856) Release 1.12.0 (17-Dec-2016) '''''''''''''''''''''''''''' New Features ------------ This release features improved Tor/I2P integration. It is now easy to:: * use Tor to hide your IP address during external network activity * connect to Tor/I2P-based storage servers * run an Introducer or a storage node as a Tor "onion service" See docs/anonymity-configuration.rst for instructions and new node-creation arguments (--hide-ip, --listen=tor), which include ways to use SOCKS servers for outbound connections. Tor/I2P/Socks support requires extra python libraries to be installed (e.g. 'pip install tahoe-lafs[tor]'), as well as matching (non-python) daemons available on the host system. (tickets #517, #2490, #2838) Nodes can use multiple introducers by adding entries to a new ``private/introducers.yaml`` file, or stop using introduction entirely by omitting the ``introducer.furl`` key from tahoe.cfg (introducerless clients will need static servers configured to connect anywhere). Server announcements are sent to all connected Introducers, and clients merge all announcements they see, which can improve grid reliability. (#68) In addition, nodes now cache the announcements they receive in a YAML file, and use their cached information at startup until the Introducer connection is re-established. This makes nodes more tolerant of Introducers that are temporarily offline. Nodes admins can copy text from the cache into a new ``private/servers.yaml`` file to add "static servers", which augment/override what the Introducer offers. This can modify aspects of the server, or use servers that were never announced in the first place. (#2788) Nodes now use a separate Foolscap "Tub" for each server connection, so ``servers.yaml`` can override the connection rules (Tor vs direct-TCP) for each one independently. This offers a slight privacy improvement, but slows down connections slightly (perhaps 75ms per server), and breaks an obscure NAT-bypass trick which enabled storage servers to run behind NAT boxes (but only when all the *clients* of the storage server had public IP addresses, and were also configured as servers). (#2759, #517) "Magic Folders" is an experimental two-way directory synchronization tool, contributed by Least Authority Enterprises, which replaces the previous experimental (one-way) "drop-upload" feature. This allows multiple users to keep a single directory in-sync, using Tahoe as the backing store. See docs/frontends/magic-folder.rst for details and configuration instructions. Compatibility Issues -------------------- The old version-1 Introducer protocol has been removed. Tahoe has used the version-2 protocol since 1.10 (released in 2013), but all nodes (clients, servers, and the Introducer itself) provided backwards-compatibility translations when encountering older peers. These translations were finally removed, so Tahoe nodes at 1.12 or later will not be able to interact with nodes at 1.9 or older. (#2784) The versions of Tahoe (1.11.0) and Foolscap (0.6.5) that shipped in Debian/Jesse (the most recent stable release, as of December 2016) are regrettably not forwards-compatible with this new version. Nodes running Jesse will not be able to connect to servers or introducers created with this release because they cannot parse the new ``tcp:HOST:PORT`` hint syntax (this syntax has been around for a while, but this is the first Tahoe release to automatically generate such hints). If you need to work around this, then after creating your new node, edit the tahoe.cfg of your new server/introducer: in ``[node] tub.location``, make each connection hint look like ``HOST:PORT`` instead of ``tcp:HOST:PORT``. If your grid only has nodes with Foolscap-0.7.0 or later, you will not need this workaround. (#2831) Nodes now use an Ed25519 public key as a serverid, instead of a Foolscap "tub id", so status displays will report a different serverid after upgrade. For the most part this should be self-consistent, however if you have an old (1.11) client talking to a new (1.12) Helper, then the client's upload results (on the "Recent Uploads And Downloads" web page) will show unusual server ids. (#1363) Dependency/Installation changes ------------------------------- Tahoe now requires Twisted >= 16.1.0, so ensure that unit tests do not fail because of uncancelled timers left running by HostnameEndpoint. It also requires the Tor/I2P supporting code from Foolscap >= 0.12.5 . (#2781) Configuration Changes --------------------- Some small changes were made to the way Tahoe-LAFS is configured, via ``tahoe.cfg`` and other files. In general, node behavior should now be more predictable, and errors should be surfaced earlier. * ``tub.port`` is now an Endpoint server specification string (which is pretty much just like a strports string, but can be extended by plugins). It now rejects "tcp:0" and "0". The tahoe.cfg value overrides anything stored on disk (in client.port). This should have no effect on most old nodes (which did not set tub.port in tahoe.cfg, and which wrote an allocated port number to client.port the first time they launched). Folks who want to listen on a specific port number typically set tub.port to "tcp:12345" or "12345", not "0". (ticket #2491) * This should enable IPv6 on servers, either via AAAA records or colon-hex addresses. (#2827) * The "portnumfile" (e.g. NODEDIR/client.port) is written as soon as the port is allocated, before the tub is created, and only if "tub.port" was empty. The old code wrote to it unconditionally, and after Tub startup. So if the user allows NODEDIR/client.port to be written, then later modifies tahoe.cfg to set "tub.port" to a different value, this difference will persist (and the node will honor tahoe.cfg "tub.port" exclusively). * We now encourage static allocation of tub.port, and pre-configuration of the node's externally-reachable IP address or hostname (by setting tub.location). Automatic IP-address detection is deprecated. Automatic port allocation is discouraged. Both are managed by the new arguments to "tahoe create-node". * "tahoe start" now creates the Tub, and all primary software components, before the child process daemonizes. Many configuration errors which would previously have been reported in a logfile (after node startup), will now be signalled immediately, via stderr. In these cases, the "tahoe start" process will exit with a non-zero return code. (#2491) * Unrecognized tahoe.cfg options are rejected at startup, not ignored (#2809) * ``tub.port`` can take multple (comma-separated) endpoints, to listen on multiple ports at the same time, useful for dual IPv4+IPv6 servers. (#867) * An empty ``tub.port`` means don't listen at all, which is appropriate for client-only nodes (#2816) * A new setting, ``reveal-ip-address = false``, acts as a safety belt, causing an error to be thrown if any other setting might reveal the node's IP address (i.e. it requires Tor or I2P to be used, rather than direct TCP connections). This is set automatically by ``tahoe create-client --hide-ip``. (#1010) Server-like nodes (Storage Servers and Introducers), created with ``tahoe create-node`` and ``tahoe create-introducer``, now accept new arguments to control how they listen for connections, and how they advertise themselves to other nodes. You can use ``--listen=tcp`` and ``--hostname=`` to choose a port automatically, or ``--listen=tor`` / ``--listen=i2p`` to use Tor/I2P hidden services instead. You can also use ``--port=`` and ``--location=`` to explicitly control the listener and the advertised location. (#2773, #2490) The "stats-gatherer", used by enterprise deployments to collect runtime statistics from a fleet of Tahoe storage servers, must now be assigned a hostname, or location+port pair, at creation time. It will no longer attempt to guess its location (with /sbin/ifconfig). The "tahoe create-stats-gatherer" command requires either "--hostname=", or both "--location=" and "--port". (#2773) To keep your old stats-gatherers working, with their original FURL, you must determine a suitable --location and --port, and write their values into NODEDIR/location and NODEDIR/port, respectively. Or you could simply rebuild it by re-running "tahoe create-stats-gatherer" with the new arguments. The stats gatherer now updates a JSON file named "stats.json", instead of a Pickle named "stats.pickle". The munin plugins in misc/operations_helpers/munin/ have been updated to match, and must be re-installed and re-configured if you use munin. Removed Features ---------------- The "key-generator" node type has been removed. This was a standalone process that maintained a queue of RSA keys, and clients could offload their key-generation work by adding "key_generator.furl=" in their tahoe.cfg files, to create mutable files and directories faster. This seemed important back in 2006, but these days computers are faster and RSA key generation only takes about 90ms. This removes the "tahoe create-key-generator" command. Any "key_generator.furl" settings in tahoe.cfg will log a warning and are otherwise ignored. Attempts to "tahoe start" a previously-generated key-generator node will result in an error. (#2783) Tahoe's HTTP Web-API (aka "the WAPI") had an endpoint named "/file/". This has been deprecated, and applications should use "/named/" instead. (#1903) The little-used "manhole" debugging feature has been removed. This allowed you to SSH or Telnet "into" a Tahoe node, providing an interactive Read-Eval-Print-Loop (REPL) that executed inside the context of the running process. (#2367) The "tahoe debug trial" and "tahoe debug repl" CLI commands were removed, as "tox" is now the preferred way to run tests. (#2735) One of the "recent uploads and downloads" status pages was using a Google-hosted API to draw a timing chart of the "mapupdate" operation. This has been removed, both for privacy (to avoid revealing the serverids to Google) and because the API was deprecated several years ago. (#1942) The "_appname.py" feature was removed. Early in Tahoe's history (at AllMyData), this file allowed the "tahoe" executable to be given a different name depending upon which Darcs patches were included in the particular source tree (one for production, another for development, etc). We haven't needed this for a long time, so it was removed. (#2754) Other Changes ------------- Documentation is now hosted at http://tahoe-lafs.readthedocs.io/ (not .org). Tahoe's testing-only dependencies can now be installed by asking for the [test] extra, so if you want to set up a virtualenv for testing, use "pip install -e .[test]" instead just of "pip install -e ." . This includes "tox", "coverage", "pyflakes", "mock", and all the Tor/I2P extras. Most developer tooling (code-checks, documentation builds, deprecation warnings, etc) have been moved from a Makefile into tox environments. (#2776) The "Welcome" (web) page now shows more detail about the introducer and storage-server connections, including which connection handler is being used (tcp/tor/i2p) and why specific connection hints failed to connect. (#2818, #2819) The little-used "control port" now uses a separate (ephemeral) Tub. This means the FURL changes each time the node is restarted, and it only listens on the loopback (127.0.0.1) interface, on a random port. As the control port is only used by some automated tests (check_memory, check_speed), this shouldn't affect anyone. (#2794) The slightly-more-used "log port" now also uses a separate (ephemeral) Tub, with the same consequences. The lack of a stable (and externally-reachable) logport.furl means it is no longer possible to use ``flogtool tail FURL`` against a distant Tahoe server, however ``flogtool tail .../nodedir/private/logport.furl`` still works just fine (and is the more common use case anyways). We might bring back the ability to configure the port and location of the logport in the future, if there is sufficient demand, but for now it seems better to avoid the complexity. The default tahoe.cfg setting of ``web.static = public_html``, when ``NODEDIR/public_html/`` does not exist, no longer causes web browsers to display a traceback which reveals somewhat-private information like the value of NODEDIR, and the Python/OS versions in use. Instead it just shows a plain 404 error. (#1720) Release 1.11.0 (30-Mar-2016) '''''''''''''''''''''''''''' New Build Process ----------------- ``pip install`` (in a virtualenv) is now the recommended way to install Tahoe-LAFS. The old "bin/tahoe" script (created inside the source tree, rather than in a virtualenv) has been removed, as has the ancient "zetuptoolz" fork of setuptools. Tahoe was started in 2006, and predates pip and virtualenv. From the very beginning it used a home-made build process that attempted to make ``setup.py build`` behave somewhat like a modern ``pip install --editable .``. It included a local copy of ``setuptools`` (to avoid requiring it to be pre-installed), which was then forked as ``zetuptoolz`` to fix bugs during the bad old days of setuptools non-maintenance. The pseudo-virtualenv used a script named ``bin/tahoe``, created during ``setup.py build``, to set up the $PATH and $PYTHONPATH as necessary. Starting with this release, all the custom build process has been removed, and Tahoe should be installable with standard modern tools. You will need ``virtualenv`` installed (which provides ``pip`` and setuptools). Many Python installers include ``virtualenv`` already, and Debian-like systems can use ``apt-get install python-virtualenv``. If the command is not available on your system, follow the installation instructions at https://virtualenv.pypa.io/en/latest/ . Then, to install the latest version, create a virtualenv and use ``pip``:: virtualenv venv . venv/bin/activate (venv) pip install tahoe-lafs (venv) tahoe --version To run Tahoe from a source checkout (so you can hack on Tahoe), use ``pip install --editable .`` from the git tree:: git clone https://github.com/tahoe-lafs/tahoe-lafs.git cd tahoe-lafs virtualenv venv . venv/bin/activate (venv) pip install --editable . (venv) tahoe --version The ``pip install`` will download and install all necessary Python dependencies. Some dependencies require a C compiler and system libraries to build: on Debian/Ubuntu-like systems, use ``apt-get install build-essential python-dev libffi-dev libssl-dev``. On Windows and OS-X platforms, we provide pre-compiled binary wheels at ``https://tahoe-lafs.org/deps/``, removing the need for a compiler. (#1582, #2445, also helped to close: #142, #709, #717, #799, #1220, #1260, #1270, #1403, #1450, #1451, #1504, #1896, #2044, #2221, #2021, #2028, #2066, #2077, #2247, #2255, #2286, #2306, #2473, #2475, #2530, #657, #2446, #2439, #2317, #1753, #1009, #1168, #1238, #1258, #1334, #1346, #1464, #2356, #2570) New PyPI Distribution Name -------------------------- Tahoe-LAFS is now known on PyPI as ``tahoe-lafs``. It was formerly known as ``allmydata-tahoe``. This affects ``pip install`` commands. (#2011) Because of this change, if you use a git checkout, you may need to run ``make distclean`` (to delete the machine-generated ``src/allmydata/_appname.py`` file). You may also need to remove ``allmydata-tahoe`` from any virtualenvs you've created, before installing ``tahoe-lafs`` into them. If all else fails, make a new git checkout, and use a new virtualenv. Note that the importable *package* name is still ``allmydata``, but this only affects developers, not end-users. This name scheduled to be changed in a future release. (#1950) Compatibility and Dependency Updates ------------------------------------ Tahoe now requires Python 2.7 on all platforms. (#2445) Tahoe now requires Foolscap 0.10.1, which fixes incompatibilities with recent Twisted releases. (#2510, #2722, #2567) Tahoe requires Twisted 15.1.0 or later, so it can request the ``Twisted[tls]`` "extra" (this asks Twisted to ask for everything it needs to provide proper TLS support). (#2760) Tests should now work with both Nevow 0.11 and 0.12 . (#2663) Binary wheels for Windows and OS-X (for all dependencies) have been built and are hosted at https://tahoe-lafs.org/deps . Use ``pip install --find-links=URL tahoe-lafs`` to take advantage of them. (#2001) We've removed the SUMO and tahoe-deps tarballs. Please see docs/desert-island.rst for instructions to build tahoe from offline systems. (#1009, #2530, #2446, #2439) Configuration Changes --------------------- A new "peers.preferred" item was added to the ``[client]`` section. This identifies servers that will be promoted to the front of the peer-selection list when uploading or downloading files. Servers are identified by their Node ID (visible on the welcome page). This may be useful to ensure that one full set of shares are placed on nearby servers, making later downloads fast (and avoid using scarce remote bandwidth). The remaining shares can go to distant backup servers. (git commit 96eaca6) Aliases can now be unicode. (git commit 46719a8b) The introducer's "set_encoding_parameters" feature was removed. Once upon a time, the Introducer could recommend encoding parameters (shares.needed and shares.total) to all clients, the idea being that the Introducer had a slightly better idea about the expected size of the storage server pool than clients might. Client-side support for this was removed long ago, but the Introducer itself kept delivering recommendations until this release. (git commit 56a9f5ad) Other Fixes ----------- The OS-X .pkg installer has been improved slightly, to clean up after previous installations better. (#2493) All WUI (Web UI) timestamps should now be a consistent format, using the gateway machine's local time zone. (#1077) The web "welcome page" has been improved: it shows how long a server has been connected (in minutes/hours/days, instead of the date+time when the connection was most recently established). The "announced" column has been replaced with "Last RX" column that shows when we last heard anything from the server. The mostly-useless "storage" column has been removed. (#1973) In the ``tahoe ls`` command, the ``-u`` shortcut for ``--uri`` has been removed, leaving the shortcut free for the global ``--node-url`` option. (#1949, #2137) Some internal logging was disabled, to avoid a temporary bug that caused excessive (perhaps infinite) log messages to be created. (#2567) Other non-user-visible tickets were fixed. (#2499, #2511, #2556, #2663, #2723, #2543) Release 1.10.2 (2015-07-30) ''''''''''''''''''''''''''' Packaging Changes ----------------- This release no longer requires the ``mock`` library (which was previously used in the unit test suite). Shortly after the Tahoe-LAFS 1.10.1 release, a new version of ``mock`` was released (1.1.0) that proved to be incompatible with Tahoe's fork of setuptools, preventing Tahoe-1.10.1 from building at all. `#2465`_ The ``tahoe --version`` output is now less likely to include scary diagnostic warnings that look deceptively like stack traces. `#2436`_ The pyasn1 requirement was increased to >= 0.1.8. .. _`#2465`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2465 .. _`#2436`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2436 Other Fixes ----------- A WebAPI ``GET`` would sometimes hang when using the HTTP Range header to read just part of the file. `#2459`_ Using ``tahoe cp`` to copy two different files of the same name into the same target directory now raises an error, rather than silently overwriting one of them. `#2447`_ All tickets closed in this release: 2328 2436 2446 2447 2459 2460 2461 2462 2465 2470. .. _`#2459`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2459 .. _`#2447`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2447 Release 1.10.1 (2015-06-15) ''''''''''''''''''''''''''' User Interface / Configuration Changes -------------------------------------- The "``tahoe cp``" CLI command's ``--recursive`` option is now more predictable, but behaves slightly differently than before. See below for details. Tickets `#712`_, `#2329`_. The SFTP server can now use public-key authentication (instead of only password-based auth). Public keys are configured through an "account file", just like passwords. See docs/frontends/FTP-and-SFTP for examples of the format. `#1411`_ The Tahoe node can now be configured to disable automatic IP-address detection. Using "AUTO" in tahoe.cfg [node]tub.location= (which is now the default) triggers autodetection. Omit "AUTO" to disable autodetection. "AUTO" can be combined with static addresses to e.g. use both a stable UPnP-configured tunneled address and a DHCP-assigned dynamic (local subnet only) address. See `configuration.rst`_ for details. `#754`_ The web-based user interface ("WUI") Directory and Welcome pages have been redesigned, with improved CSS for narrow windows and more-accessible icons (using distinctive shapes instead of just colors). `#1931`_ `#1961`_ `#1966`_ `#1972`_ `#1901`_ .. _`#712`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/712 .. _`#754`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/754 .. _`#1411`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1411 .. _`#1901`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1901 .. _`#1931`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1931 .. _`#1961`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1961 .. _`#1966`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1966 .. _`#1972`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1972 .. _`#2329`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2329 .. _`configuration.rst`: docs/configuration.rst "tahoe cp" changes ------------------ The many ``cp``-like tools in the Unix world (POSIX ``/bin/cp``, the ``scp`` provided by SSH, ``rsync``) all behave slightly differently in unusual circumstances, especially when copying whole directories into a target that may or may not already exist. The most common difference is whether the user is referring to the source directory as a whole, or to its contents. For example, should "``cp -r foodir bardir``" create a new directory named "``bardir/foodir``"? Or should it behave more like "``cp -r foodir/* bardir``"? Some tools use the presence of a trailing slash to indicate which behavior you want. Others ignore trailing slashes. "``tahoe cp``" is no exception to having exceptional cases. This release fixes some bad behavior and attempts to establish a consistent rationale for its behavior. The new rule is: - If the thing being copied is a directory, and it has a name (e.g. it's not a raw Tahoe-LAFS directorycap), then you are referring to the directory itself. - If the thing being copied is an unnamed directory (e.g. raw dircap or alias), then you are referring to the contents. - Trailing slashes do not affect the behavior of the copy (although putting a trailing slash on a file-like target is an error). - The "``-r``" (``--recursive``) flag does not affect the behavior of the copy (although omitting ``-r`` when the source is a directory is an error). - If the target refers to something that does not yet exist: - and if the source is a single file, then create a new file; - otherwise, create a directory. There are two main cases where the behavior of Tahoe-LAFS v1.10.1 differs from that of the previous v1.10.0 release: - "``cp DIRCAP/file.txt ./local/missing``" , where "``./local``" is a directory but "``./local/missing``" does not exist. The implication is that you want Tahoe to create a new file named "``./local/missing``" and fill it with the contents of the Tahoe-side ``DIRCAP/file.txt``. In v1.10.0, a plain "``cp``" would do just this, but "``cp -r``" would do "``mkdir ./local/missing``" and then create a file named "``./local/missing/file.txt``". In v1.10.1, both "``cp``" and "``cp -r``" create a file named "``./local/missing``". - "``cp -r PARENTCAP/dir ./local/missing``", where ``PARENTCAP/dir/`` contains "``file.txt``", and again "``./local``" is a directory but "``./local/missing``" does not exist. In both v1.10.0 and v1.10.1, this first does "``mkdir ./local/missing``". In v1.10.0, it would then copy the contents of the source directory into the new directory, resulting in "``./local/missing/file.txt``". In v1.10.1, following the new rule of "a named directory source refers to the directory itself", the tool creates "``./local/missing/dir/file.txt``". Compatibility and Dependency Updates ------------------------------------ Windows now requires Python 2.7. Unix/OS-X platforms can still use either Python 2.6 or 2.7, however this is probably the last release that will support 2.6 (it is no longer receiving security updates, and most OS distributions have switched to 2.7). Tahoe-LAFS now has the following dependencies: - Twisted >= 13.0.0 - Nevow >= 0.11.1 - foolscap >= 0.8.0 - service-identity - characteristic >= 14.0.0 - pyasn1 >= 0.1.4 - pyasn1-modules >= 0.0.5 On Windows, if pywin32 is not installed then the dependencies on Twisted and Nevow become: - Twisted >= 11.1.0, <= 12.1.0 - Nevow >= 0.9.33, <= 0.10 On all platforms, if pyOpenSSL >= 0.14 is installed, then it will be used, but if not then only pyOpenSSL >= 0.13, <= 0.13.1 will be built when directly invoking `setup.py build` or `setup.py install`. We strongly advise OS packagers to take the option of making a tahoe-lafs package depend on pyOpenSSL >= 0.14. In order for that to work, the following additional Python dependencies are needed: - cryptography - cffi >= 0.8 - six >= 1.4.1 - enum34 - pycparser as well as libffi (for Debian/Ubuntu, the name of the needed OS package is `libffi6`). Tahoe-LAFS is now compatible with Setuptools version 8 and Pip version 6 or later, which should fix execution on Ubuntu 15.04 (it now tolerates PEP440 semantics in dependency specifications). `#2354`_ `#2242`_ Tahoe-LAFS now depends upon foolscap-0.8.0, which creates better private keys and certificates than previous versions. To benefit from the improvements (2048-bit RSA keys and SHA256-based certificates), you must re-generate your Tahoe nodes (which changes their TubIDs and FURLs). `#2400`_ .. _`#2242`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2242 .. _`#2354`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2354 .. _`#2400`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2400 Packaging --------- A preliminary OS-X package, named "``tahoe-lafs-VERSION-osx.pkg``", is now being generated. It is a standard double-clickable installer, which creates ``/Applications/tahoe.app`` that embeds a complete runtime tree. However launching the ``.app`` only brings up a notice on how to run tahoe from the command line. A future release may turn this into a fully-fledged application launcher. `#182`_ `#2393`_ `#2323`_ Preliminary Docker support was added. Tahoe container images may be available on DockerHub. `PR#165`_ `#2419`_ `#2421`_ Old and obsolete Debian packaging tools have been removed. `#2282`_ .. _`#182`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/182 .. _`#2282`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2282 .. _`#2323`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2323 .. _`#2393`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2393 .. _`#2419`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2419 .. _`#2421`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2421 .. _`PR#165`: https://github.com/tahoe-lafs/tahoe-lafs/pull/165 Minor Changes ------------- - Welcome page: add per-server "(space) Available" column. `#648`_ - check/deep-check learned to accept multiple location arguments. `#740`_ - Checker reports: remove needs-rebalancing, add count-happiness. `#1784`_ `#2105`_ - CLI ``--help``: cite (but don't list) global options on each command. `#2233`_ - Fix ftp "``ls``" to work with Twisted 15.0.0. `#2394`_ .. _`#648`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/648 .. _`#740`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/740 .. _`#1784`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1784 .. _`#2105`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2105 .. _`#2233`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2233 .. _`#2394`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2394 Roughly 75 tickets were closed in this release: 623 648 712 740 754 898 1146 1159 1336 1381 1411 1634 1674 1698 1707 1717 1737 1784 1800 1807 1842 1847 1901 1918 1953 1960 1961 1966 1969 1972 1974 1988 1992 2005 2008 2023 2027 2028 2034 2048 2067 2086 2105 2121 2128 2165 2193 2208 2209 2233 2235 2242 2245 2248 2249 2249 2280 2281 2282 2290 2305 2312 2323 2340 2354 2380 2393 2394 2398 2400 2415 2416 2417 2433. Another dozen were referenced but not closed: 182 666 982 1064 1258 1531 1536 1742 1834 1931 1935 2286. Roughly 40 GitHub pull-requests were closed: 32 48 50 56 57 61 62 62 63 64 69 73 81 82 84 85 87 91 94 95 96 103 107 109 112 114 120 122 125 126 133 135 136 137 142 146 149 152 165. For more information about any ticket, visit e.g. https://tahoe-lafs.org/trac/tahoe-lafs/ticket/754 Release 1.10.0 (2013-05-01) ''''''''''''''''''''''''''' New Features ------------ - The Welcome page has been redesigned. This is a preview of the design style that is likely to be used in other parts of the WUI in future Tahoe-LAFS versions. (`#1713`_, `#1457`_, `#1735`_) - A new extensible Introducer protocol has been added, as the basis for future improvements such as accounting. Compatibility with older nodes is not affected. When server, introducer, and client are all upgraded, the welcome page will show node IDs that start with "v0-" instead of the old tubid. See ``__ for details. (`#466`_) - The web-API has a new ``relink`` operation that supports directly moving files between directories. (`#1579`_) Security Improvements --------------------- - The ``introducer.furl`` for new Introducers is now unguessable. In previous releases, this FURL used a predictable swissnum, allowing a network eavesdropper who observes any node connecting to the Introducer to access the Introducer themselves, and thus use servers or offer storage service to clients (i.e. "join the grid"). In the new code, the only way to join a grid is to be told the ``introducer.furl`` by someone who already knew it. Note that pre-existing introducers are not changed. To force an introducer to generate a new FURL, delete the existing ``introducer.furl`` file and restart it. After doing this, the ``[client]introducer.furl`` setting of every client and server that should connect to that introducer must be updated. Note that other users of a shared machine may be able to read ``introducer.furl`` from your ``tahoe.cfg`` file unless you configure the file permissions to prevent them. (`#1802`_) - Both ``introducer.furl`` and ``helper.furl`` are now censored from the Welcome page, to prevent users of your gateway from learning enough to create gateway nodes of their own. For existing guessable introducer FURLs, the ``introducer`` swissnum is still displayed to show that a guessable FURL is in use. (`#860`_) Command-line Syntax Changes --------------------------- - Global options to ``tahoe``, such as ``-d``/``--node-directory``, must now come before rather than after the command name (for example, ``tahoe -d BASEDIR cp -r foo: bar:`` ). (`#166`_) Notable Bugfixes ---------------- - In earlier versions, if a connection problem caused a download failure for an immutable file, subsequent attempts to download the same file could also fail. This is now fixed. (`#1679`_) - Filenames in WUI directory pages are now displayed correctly when they contain characters that require HTML escaping. (`#1143`_) - Non-ASCII node nicknames no longer cause WUI errors. (`#1298`_) - Checking a LIT file using ``tahoe check`` no longer results in an exception. (`#1758`_) - The SFTP frontend now works with recent versions of Twisted, rather than giving errors or warnings about use of ``IFinishableConsumer``. (`#1926`_, `#1564`_, `#1525`_) - ``tahoe cp --verbose`` now counts the files being processed correctly. (`#1805`_, `#1783`_) - Exceptions no longer trigger an unhelpful crash reporter on Ubuntu 12.04 ("Precise") or later. (`#1746`_) - The error message displayed when a CLI tool cannot connect to a gateway has been improved. (`#974`_) - Other minor fixes: `#1781`_, `#1812`_, `#1915`_, `#1484`_, `#1525`_ Compatibility and Dependencies ------------------------------ - Python >= 2.6, except Python 3 (`#1658`_) - Twisted >= 11.0.0 (`#1771`_) - mock >= 0.8 (for unit tests) - pycryptopp >= 0.6.0 (for Ed25519 signatures) - zope.interface >= 3.6.0 (except 3.6.3 or 3.6.4) Other Changes ------------- - The ``flogtool`` utility, used to read detailed event logs, can now be accessed as ``tahoe debug flogtool`` even when Foolscap is not installed system-wide. (`#1693`_) - The provisioning/reliability pages were removed from the main client's web interface, and moved into a standalone web-based tool. Use the ``run.py`` script in ``misc/operations_helpers/provisioning/`` to access them. - Web clients can now cache (ETag) immutable directory pages. (`#443`_) - ``__ was added to document the adminstration of convergence secrets. (`#1761`_) Precautions when Upgrading -------------------------- - When upgrading a grid from a recent revision of trunk, follow the precautions from this `message to the tahoe-dev mailing list`_, to ensure that announcements to the Introducer are recognized after the upgrade. This is not necessary when upgrading from a previous release like 1.9.2. .. _`#166`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/166 .. _`#443`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/443 .. _`#466`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/466 .. _`#860`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/860 .. _`#974`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/974 .. _`#1143`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1143 .. _`#1298`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1298 .. _`#1457`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1457 .. _`#1484`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1484 .. _`#1525`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1525 .. _`#1564`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1564 .. _`#1579`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1579 .. _`#1658`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1658 .. _`#1679`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1679 .. _`#1693`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1693 .. _`#1713`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1713 .. _`#1735`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1735 .. _`#1746`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1746 .. _`#1758`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1758 .. _`#1761`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1761 .. _`#1771`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1771 .. _`#1781`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1781 .. _`#1783`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1783 .. _`#1802`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1802 .. _`#1805`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1805 .. _`#1812`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1812 .. _`#1915`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1915 .. _`#1926`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1926 .. _`message to the tahoe-dev mailing list`: https://lists.tahoe-lafs.org/pipermail/tahoe-dev/2013-March/008079.html Release 1.9.2 (2012-07-03) '''''''''''''''''''''''''' Notable Bugfixes ---------------- - Several regressions in support for reading (`#1636`_), writing/modifying (`#1670`_, `#1749`_), verifying (`#1628`_) and repairing (`#1655`_, `#1669`_, `#1676`_, `#1689`_) mutable files have been fixed. - FTP can now list directories containing mutable files, although it still does not support reading or writing mutable files. (`#680`_) - The FTP frontend would previously show Jan 1 1970 for all timestamps; now it shows the correct modification time of the directory entry. (`#1688`_) - If a node is configured to report incidents to a log gatherer, but the gatherer is offline when some incidents occur, it would previously not "catch up" with those incidents as intended. (`#1725`_) - OpenBSD 5 is now supported. (`#1584`_) - The ``count-good-share-hosts`` field of file check results is now computed correctly. (`#1115`_) Configuration/Behavior Changes ------------------------------ - The capability of the upload directory for the drop-upload frontend is now specified in the file ``private/drop_upload_dircap`` under the gateway's node directory, rather than in its ``tahoe.cfg``. (`#1593`_) Packaging Changes ----------------- - Tahoe-LAFS can be built correctly from a git repository as well as from darcs. Compatibility and Dependencies ------------------------------ - foolscap >= 0.6.3 is required, in order to make Tahoe-LAFS compatible with Twisted >= 11.1.0. (`#1788`_) - Versions 2.0.1 and 2.4 of PyCrypto are excluded. (`#1631`_, `#1574`_) .. _`#680`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/680 .. _`#1115`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1115 .. _`#1574`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1574 .. _`#1584`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1584 .. _`#1593`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1593 .. _`#1628`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1628 .. _`#1631`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1631 .. _`#1636`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1636 .. _`#1655`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1655 .. _`#1669`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1669 .. _`#1670`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1670 .. _`#1676`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1676 .. _`#1688`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1688 .. _`#1689`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1689 .. _`#1725`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1725 .. _`#1749`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1749 .. _`#1788`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1788 Release 1.9.1 (2012-01-12) '''''''''''''''''''''''''' Security-related Bugfix ----------------------- - Fix flaw that would allow servers to cause undetected corruption when retrieving the contents of mutable files (both SDMF and MDMF). (`#1654`_) .. _`#1654`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1654 Release 1.9.0 (2011-10-30) '''''''''''''''''''''''''' New Features ------------ - The most significant new feature in this release is MDMF: "Medium-size Distributed Mutable Files". Unlike standard SDMF files, these provide efficient partial-access (reading and modifying small portions of the file instead of the whole thing). MDMF is opt-in (it is not yet the default format for mutable files), both to ensure compatibility with previous versions, and because the algorithm does not yet meet memory-usage goals. Enable it with ``--format=MDMF`` in the CLI (``tahoe put`` and ``tahoe mkdir``), or the "format" radioboxes in the web interface. See ``__ for more details (`#393`_, `#1507`_) - A "blacklist" feature allows blocking access to specific files through a particular gateway. See the "Access Blacklist" section of ``__ for more details. (`#1425`_) - A "drop-upload" feature has been added, which allows you to upload files to a Tahoe-LAFS directory just by writing them to a local directory. This feature is experimental and should not be relied on to store the only copy of valuable data. It is currently available only on Linux. See ``__ for documentation. (`#1429`_) - The timeline of immutable downloads can be viewed using a zoomable and pannable JavaScript-based visualization. This is accessed using the 'timeline' link on the File Download Status page for the download, which can be reached from the Recent Uploads and Downloads page. Configuration/Behavior Changes ------------------------------ - Prior to Tahoe-LAFS v1.3, the configuration of some node options could be specified using individual config files rather than via ``tahoe.cfg``. These files now cause an error if present. (`#1385`_) - Storage servers now calculate their remaining space based on the filesystem containing the ``storage/shares/`` directory. Previously they looked at the filesystem containing the ``storage/`` directory. This allows ``storage/shares/``, rather than ``storage/``, to be a mount point or a symlink pointing to another filesystem. (`#1384`_) - ``tahoe cp xyz MUTABLE`` will modify the existing mutable file instead of creating a new one. (`#1304`_) - The button for unlinking a file from its directory on a WUI directory listing is now labelled "unlink" rather than "del". (`#1104`_) Notable Bugfixes ---------------- - The security bugfix for the vulnerability allowing deletion of shares, detailed in the news for v1.8.3 below, is also included in this release. (`#1528`_) - Some cases of immutable upload, for example using the ``tahoe put`` and ``tahoe cp`` commands or SFTP, did not appear in the history of Recent Uploads and Downloads. (`#1079`_) - The memory footprint of the verifier has been reduced by serializing block fetches. (`#1395`_) - Large immutable downloads are now a little faster than in v1.8.3 (about 5% on a fast network). (`#1268`_) Packaging Changes ----------------- - The files related to Debian packaging have been removed from the Tahoe source tree, since they are now maintained as part of the official Debian packages. (`#1454`_) - The unmaintained FUSE plugins were removed from the source tree. See ``docs/frontends/FTP-and-SFTP.rst`` for how to mount a Tahoe filesystem on Unix via sshfs. (`#1409`_) - The Tahoe licenses now give explicit permission to combine Tahoe-LAFS with code distributed under the following additional open-source licenses (any version of each): * Academic Free License * Apple Public Source License * BitTorrent Open Source License * Lucent Public License * Jabber Open Source License * Common Development and Distribution License * Microsoft Public License * Microsoft Reciprocal License * Sun Industry Standards Source License * Open Software License Compatibility and Dependencies ------------------------------ - To resolve an incompatibility between Nevow and zope.interface (versions 3.6.3 and 3.6.4), Tahoe-LAFS now requires an earlier or later version of zope.interface. (`#1435`_) - The Twisted dependency has been raised to version 10.1 to ensure we no longer require pywin32 on Windows, the new drop-upload feature has the required support from Twisted on Linux, and that it is never necessary to patch Twisted in order to use the FTP frontend. (`#1274`_, `#1429`_, `#1438`_) - An explicit dependency on pyOpenSSL has been added, replacing the indirect dependency via the "secure_connections" option of foolscap. (`#1383`_) Minor Changes ------------- - A ``man`` page has been added (`#1420`_). All other docs are in ReST format. - The ``tahoe_files`` munin plugin reported an incorrect count of the number of share files. (`#1391`_) - Minor documentation updates: #627, #1104, #1225, #1297, #1342, #1404 - Other minor changes: #636, #1355, #1363, #1366, #1388, #1392, #1412, #1344, #1347, #1359, #1389, #1441, #1442, #1446, #1474, #1503 .. _`#393`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/393 .. _`#1079`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1079 .. _`#1104`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1104 .. _`#1268`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1268 .. _`#1274`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1274 .. _`#1304`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1304 .. _`#1383`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1383 .. _`#1384`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1384 .. _`#1385`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1385 .. _`#1391`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1391 .. _`#1395`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1395 .. _`#1409`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1409 .. _`#1420`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1420 .. _`#1425`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1425 .. _`#1429`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1429 .. _`#1435`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1435 .. _`#1438`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1438 .. _`#1454`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1454 .. _`#1507`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1507 Release 1.8.3 (2011-09-13) '''''''''''''''''''''''''' Security-related Bugfix ----------------------- - Fix flaw that would allow a person who knows a storage index of a file to delete shares of that file. (`#1528`_) - Remove corner cases in mutable file bounds management which could expose extra lease info or old share data (from prior versions of the mutable file) if someone with write authority to that mutable file exercised these corner cases in a way that no actual Tahoe-LAFS client does. (Probably not exploitable.) (`#1528`_) .. _`#1528`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1528 Release 1.8.2 (2011-01-30) '''''''''''''''''''''''''' Compatibility and Dependencies ------------------------------ - Tahoe is now compatible with Twisted-10.2 (released last month), as well as with earlier versions. The previous Tahoe-1.8.1 release failed to run against Twisted-10.2, raising an AttributeError on StreamServerEndpointService (`#1286`_) - Tahoe now depends upon the "mock" testing library, and the foolscap dependency was raised to 0.6.1 . It no longer requires pywin32 (which was used only on windows). Future developers should note that reactor.spawnProcess and derivatives may no longer be used inside Tahoe code. Other Changes ------------- - the default reserved_space value for new storage nodes is 1 GB (`#1208`_) - documentation is now in reStructuredText (.rst) format - "tahoe cp" should now handle non-ASCII filenames - the unmaintained Mac/Windows GUI applications have been removed (`#1282`_) - tahoe processes should appear in top and ps as "tahoe", not "python", on some unix platforms. (`#174`_) - "tahoe debug trial" can be used to run the test suite (`#1296`_) - the SFTP frontend now reports unknown sizes as "0" instead of "?", to improve compatibility with clients like FileZilla (`#1337`_) - "tahoe --version" should now report correct values in situations where 1.8.1 might have been wrong (`#1287`_) .. _`#1208`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1208 .. _`#1282`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1282 .. _`#1286`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1286 .. _`#1287`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1287 .. _`#1296`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1296 .. _`#1337`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1337 Release 1.8.1 (2010-10-28) '''''''''''''''''''''''''' Bugfixes and Improvements ------------------------- - Allow the repairer to improve the health of a file by uploading some shares, even if it cannot achieve the configured happiness threshold. This fixes a regression introduced between v1.7.1 and v1.8.0. (`#1212`_) - Fix a memory leak in the ResponseCache which is used during mutable file/directory operations. (`#1045`_) - Fix a regression and add a performance improvement in the downloader. This issue caused repair to fail in some special cases. (`#1223`_) - Fix a bug that caused 'tahoe cp' to fail for a grid-to-grid copy involving a non-ASCII filename. (`#1224`_) - Fix a rarely-encountered bug involving printing large strings to the console on Windows. (`#1232`_) - Perform ~ expansion in the --exclude-from filename argument to 'tahoe backup'. (`#1241`_) - The CLI's 'tahoe mv' and 'tahoe ln' commands previously would try to use an HTTP proxy if the HTTP_PROXY environment variable was set. These now always connect directly to the WAPI, thus avoiding giving caps to the HTTP proxy (and also avoiding failures in the case that the proxy is failing or requires authentication). (`#1253`_) - The CLI now correctly reports failure in the case that 'tahoe mv' fails to unlink the file from its old location. (`#1255`_) - 'tahoe start' now gives a more positive indication that the node has started. (`#71`_) - The arguments seen by 'ps' or other tools for node processes are now more useful (in particular, they include the path of the 'tahoe' script, rather than an obscure tool named 'twistd'). (`#174`_) Removed Features ---------------- - The tahoe start/stop/restart and node creation commands no longer accept the -m or --multiple option, for consistency between platforms. (`#1262`_) Packaging --------- - We now host binary packages so that users on certain operating systems can install without having a compiler. - Use a newer version of a dependency if needed, even if an older version is installed. This would previously cause a VersionConflict error. (`#1190`_) - Use a precompiled binary of a dependency if one with a sufficiently high version number is available, instead of attempting to compile the dependency from source, even if the source version has a higher version number. (`#1233`_) Documentation ------------- - All current documentation in .txt format has been converted to .rst format. (`#1225`_) - Added docs/backdoors.rst declaring that we won't add backdoors to Tahoe-LAFS, or add anything to facilitate government access to data. (`#1216`_) .. _`#71`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/71 .. _`#174`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/174 .. _`#1212`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1212 .. _`#1045`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1045 .. _`#1190`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1190 .. _`#1216`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1216 .. _`#1223`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1223 .. _`#1224`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1224 .. _`#1225`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1225 .. _`#1232`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1232 .. _`#1233`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1233 .. _`#1241`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1241 .. _`#1253`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1253 .. _`#1255`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1255 .. _`#1262`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1262 Release 1.8.0 (2010-09-23) '''''''''''''''''''''''''' New Features ------------ - A completely new downloader which improves performance and robustness of immutable-file downloads. It uses the fastest K servers to download the data in K-way parallel. It automatically fails over to alternate servers if servers fail in mid-download. It allows seeking to arbitrary locations in the file (the previous downloader which would only read the entire file sequentially from beginning to end). It minimizes unnecessary round trips and unnecessary bytes transferred to improve performance. It sends requests to fewer servers to reduce the load on servers (the previous one would send a small request to every server for every download) (`#287`_, `#288`_, `#448`_, `#798`_, `#800`_, `#990`_, `#1170`_, `#1191`_) - Non-ASCII command-line arguments and non-ASCII outputs now work on Windows. In addition, the command-line tool now works on 64-bit Windows. (`#1074`_) Bugfixes and Improvements ------------------------- - Document and clean up the command-line options for specifying the node's base directory. (`#188`_, `#706`_, `#715`_, `#772`_, `#1108`_) - The default node directory for Windows is ".tahoe" in the user's home directory, the same as on other platforms. (`#890`_) - Fix a case in which full cap URIs could be logged. (`#685`_, `#1155`_) - Fix bug in WUI in Python 2.5 when the system clock is set back to 1969. Now you can use Tahoe-LAFS with Python 2.5 and set your system clock to 1969 and still use the WUI. (`#1055`_) - Many improvements in code organization, tests, logging, documentation, and packaging. (`#983`_, `#1074`_, `#1108`_, `#1127`_, `#1129`_, `#1131`_, `#1166`_, `#1175`_) Dependency Updates ------------------ - on x86 and x86-64 platforms, pycryptopp >= 0.5.20 - pycrypto 2.2 is excluded due to a bug .. _`#188`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/188 .. _`#288`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/288 .. _`#448`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/448 .. _`#685`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/685 .. _`#706`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/706 .. _`#715`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/715 .. _`#772`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/772 .. _`#798`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/798 .. _`#800`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/800 .. _`#890`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/890 .. _`#983`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/983 .. _`#990`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/990 .. _`#1055`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1055 .. _`#1074`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1074 .. _`#1108`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1108 .. _`#1155`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1155 .. _`#1170`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1170 .. _`#1191`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1191 .. _`#1127`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1127 .. _`#1129`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1129 .. _`#1131`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1131 .. _`#1166`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1166 .. _`#1175`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1175 Release 1.7.1 (2010-07-18) '''''''''''''''''''''''''' Bugfixes and Improvements ------------------------- - Fix bug in which uploader could fail with AssertionFailure or report that it had achieved servers-of-happiness when it hadn't. (`#1118`_) - Fix bug in which servers could get into a state where they would refuse to accept shares of a certain file (`#1117`_) - Add init scripts for managing the gateway server on Debian/Ubuntu (`#961`_) - Fix bug where server version number was always 0 on the welcome page (`#1067`_) - Add new command-line command "tahoe unlink" as a synonym for "tahoe rm" (`#776`_) - The FTP frontend now encrypts its temporary files, protecting their contents from an attacker who is able to read the disk. (`#1083`_) - Fix IP address detection on FreeBSD 7, 8, and 9 (`#1098`_) - Fix minor layout issue in the Web User Interface with Internet Explorer (`#1097`_) - Fix rarely-encountered incompatibility between Twisted logging utility and the new unicode support added in v1.7.0 (`#1099`_) - Forward-compatibility improvements for non-ASCII caps (`#1051`_) Code improvements ----------------- - Simplify and tidy-up directories, unicode support, test code (`#923`_, `#967`_, `#1072`_) .. _`#776`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/776 .. _`#923`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/923 .. _`#961`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/961 .. _`#967`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/967 .. _`#1051`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1051 .. _`#1067`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1067 .. _`#1072`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1072 .. _`#1083`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1083 .. _`#1097`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1097 .. _`#1098`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1098 .. _`#1099`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1099 .. _`#1117`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1117 .. _`#1118`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1118 Release 1.7.0 (2010-06-18) '''''''''''''''''''''''''' New Features ------------ - SFTP support (`#1037`_) Your Tahoe-LAFS gateway now acts like a full-fledged SFTP server. It has been tested with sshfs to provide a virtual filesystem in Linux. Many users have asked for this feature. We hope that it serves them well! See the `FTP-and-SFTP.rst`_ document to get started. - support for non-ASCII character encodings (`#534`_) Tahoe-LAFS now correctly handles filenames containing non-ASCII characters on all supported platforms: - when reading files in from the local filesystem (such as when you run "tahoe backup" to back up your local files to a Tahoe-LAFS grid); - when writing files out to the local filesystem (such as when you run "tahoe cp -r" to recursively copy files out of a Tahoe-LAFS grid); - when displaying filenames to the terminal (such as when you run "tahoe ls"), subject to limitations of the terminal and locale; - when parsing command-line arguments, except on Windows. - Servers of Happiness (`#778`_) Tahoe-LAFS now measures during immutable file upload to see how well distributed it is across multiple servers. It aborts the upload if the pieces of the file are not sufficiently well-distributed. This behavior is controlled by a configuration parameter called "servers of happiness". With the default settings for its erasure coding, Tahoe-LAFS generates 10 shares for each file, such that any 3 of those shares are sufficient to recover the file. The default value of "servers of happiness" is 7, which means that Tahoe-LAFS will guarantee that there are at least 7 servers holding some of the shares, such that any 3 of those servers can completely recover your file. The new upload code also distributes the shares better than the previous version in some cases and takes better advantage of pre-existing shares (when a file has already been previously uploaded). See the `architecture.rst`_ document [3] for details. Bugfixes and Improvements ------------------------- - Premature abort of upload if some shares were already present and some servers fail. (`#608`_) - python ./setup.py install -- can't create or remove files in install directory. (`#803`_) - Network failure => internal TypeError. (`#902`_) - Install of Tahoe on CentOS 5.4. (`#933`_) - CLI option --node-url now supports https url. (`#1028`_) - HTML/CSS template files were not correctly installed under Windows. (`#1033`_) - MetadataSetter does not enforce restriction on setting "tahoe" subkeys. (`#1034`_) - ImportError: No module named setuptools_darcs.setuptools_darcs. (`#1054`_) - Renamed Title in xhtml files. (`#1062`_) - Increase Python version dependency to 2.4.4, to avoid a critical CPython security bug. (`#1066`_) - Typo correction for the munin plugin tahoe_storagespace. (`#968`_) - Fix warnings found by pylint. (`#973`_) - Changing format of some documentation files. (`#1027`_) - the misc/ directory was tied up. (`#1068`_) - The 'ctime' and 'mtime' metadata fields are no longer written except by "tahoe backup". (`#924`_) - Unicode filenames in Tahoe-LAFS directories are normalized so that names that differ only in how accents are encoded are treated as the same. (`#1076`_) - Various small improvements to documentation. (`#937`_, `#911`_, `#1024`_, `#1082`_) Removals -------- - The 'tahoe debug consolidate' subcommand (for converting old allmydata Windows client backups to a newer format) has been removed. Dependency Updates ------------------ - the Python version dependency is raised to 2.4.4 in some cases (2.4.3 for Redhat-based Linux distributions, 2.4.2 for UCS-2 builds) (`#1066`_) - pycrypto >= 2.0.1 - pyasn1 >= 0.0.8a - mock (only required by unit tests) .. _`#534`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/534 .. _`#608`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/608 .. _`#778`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/778 .. _`#803`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/803 .. _`#902`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/902 .. _`#911`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/911 .. _`#924`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/924 .. _`#937`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/937 .. _`#933`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/933 .. _`#968`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/968 .. _`#973`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/973 .. _`#1024`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1024 .. _`#1027`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1027 .. _`#1028`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1028 .. _`#1033`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1033 .. _`#1034`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1034 .. _`#1037`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1037 .. _`#1054`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1054 .. _`#1062`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1062 .. _`#1066`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1066 .. _`#1068`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1068 .. _`#1076`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1076 .. _`#1082`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1082 .. _architecture.rst: docs/architecture.rst .. _FTP-and-SFTP.rst: docs/frontends/FTP-and-SFTP.rst Release 1.6.1 (2010-02-27) '''''''''''''''''''''''''' Bugfixes -------- - Correct handling of Small Immutable Directories Immutable directories can now be deep-checked and listed in the web UI in all cases. (In v1.6.0, some operations, such as deep-check, on a directory graph that included very small immutable directories, would result in an exception causing the whole operation to abort.) (`#948`_) Usability Improvements ---------------------- - Improved user interface messages and error reporting. (`#681`_, `#837`_, `#939`_) - The timeouts for operation handles have been greatly increased, so that you can view the results of an operation up to 4 days after it has completed. After viewing them for the first time, the results are retained for a further day. (`#577`_) Release 1.6.0 (2010-02-01) '''''''''''''''''''''''''' New Features ------------ - Immutable Directories Tahoe-LAFS can now create and handle immutable directories. (`#607`_, `#833`_, `#931`_) These are read just like normal directories, but are "deep-immutable", meaning that all their children (and everything reachable from those children) must be immutable objects (i.e. immutable or literal files, and other immutable directories). These directories must be created in a single webapi call that provides all of the children at once. (Since they cannot be changed after creation, the usual create/add/add sequence cannot be used.) They have URIs that start with "URI:DIR2-CHK:" or "URI:DIR2-LIT:", and are described on the human-facing web interface (aka the "WUI") with a "DIR-IMM" abbreviation (as opposed to "DIR" for the usual read-write directories and "DIR-RO" for read-only directories). Tahoe-LAFS releases before 1.6.0 cannot read the contents of an immutable directory. 1.5.0 will tolerate their presence in a directory listing (and display it as "unknown"). 1.4.1 and earlier cannot tolerate them: a DIR-IMM child in any directory will prevent the listing of that directory. Immutable directories are repairable, just like normal immutable files. The webapi "POST t=mkdir-immutable" call is used to create immutable directories. See `webapi.rst`_ for details. - "tahoe backup" now creates immutable directories, backupdb has dircache The "tahoe backup" command has been enhanced to create immutable directories (in previous releases, it created read-only mutable directories) (`#828`_). This is significantly faster, since it does not need to create an RSA keypair for each new directory. Also "DIR-IMM" immutable directories are repairable, unlike "DIR-RO" read-only mutable directories at present. (A future Tahoe-LAFS release should also be able to repair DIR-RO.) In addition, the backupdb (used by "tahoe backup" to remember what it has already copied) has been enhanced to store information about existing immutable directories. This allows it to re-use directories that have moved but still contain identical contents, or that have been deleted and later replaced. (The 1.5.0 "tahoe backup" command could only re-use directories that were in the same place as they were in the immediately previous backup.) With this change, the backup process no longer needs to read the previous snapshot out of the Tahoe-LAFS grid, reducing the network load considerably. (`#606`_) A "null backup" (in which nothing has changed since the previous backup) will require only two Tahoe-side operations: one to add an Archives/$TIMESTAMP entry, and a second to update the Latest/ link. On the local disk side, it will readdir() all your local directories and stat() all your local files. If you've been using "tahoe backup" for a while, you will notice that your first use of it after upgrading to 1.6.0 may take a long time: it must create proper immutable versions of all the old read-only mutable directories. This process won't take as long as the initial backup (where all the file contents had to be uploaded too): it will require time proportional to the number and size of your directories. After this initial pass, all subsequent passes should take a tiny fraction of the time. As noted above, Tahoe-LAFS versions earlier than 1.5.0 cannot list a directory containing an immutable subdirectory. Tahoe-LAFS versions earlier than 1.6.0 cannot read the contents of an immutable directory. The "tahoe backup" command has been improved to skip over unreadable objects (like device files, named pipes, and files with permissions that prevent the command from reading their contents), instead of throwing an exception and terminating the backup process. It also skips over symlinks, because these cannot be represented faithfully in the Tahoe-side filesystem. A warning message will be emitted each time something is skipped. (`#729`_, `#850`_, `#641`_) - "create-node" command added, "create-client" now implies --no-storage The basic idea behind Tahoe-LAFS's client+server and client-only processes is that you are creating a general-purpose Tahoe-LAFS "node" process, which has several components that can be activated. Storage service is one of these optional components, as is the Helper, FTP server, and SFTP server. Web gateway functionality is nominally on this list, but it is always active; a future release will make it optional. There are three special purpose servers that can't currently be run as a component in a node: introducer, key-generator, and stats-gatherer. So now "tahoe create-node" will create a Tahoe-LAFS node process, and after creation you can edit its tahoe.cfg to enable or disable the desired services. It is a more general-purpose replacement for "tahoe create-client". The default configuration has storage service enabled. For convenience, the "--no-storage" argument makes a tahoe.cfg file that disables storage service. (`#760`_) "tahoe create-client" has been changed to create a Tahoe-LAFS node without a storage service. It is equivalent to "tahoe create-node --no-storage". This helps to reduce the confusion surrounding the use of a command with "client" in its name to create a storage *server*. Use "tahoe create-client" to create a purely client-side node. If you want to offer storage to the grid, use "tahoe create-node" instead. In the future, other services will be added to the node, and they will be controlled through options in tahoe.cfg . The most important of these services may get additional --enable-XYZ or --disable-XYZ arguments to "tahoe create-node". - Performance Improvements Download of immutable files begins as soon as the downloader has located the K necessary shares (`#928`_, `#287`_). In both the previous and current releases, a downloader will first issue queries to all storage servers on the grid to locate shares before it begins downloading the shares. In previous releases of Tahoe-LAFS, download would not begin until all storage servers on the grid had replied to the query, at which point K shares would be chosen for download from among the shares that were located. In this release, download begins as soon as any K shares are located. This means that downloads start sooner, which is particularly important if there is a server on the grid that is extremely slow or even hung in such a way that it will never respond. In previous releases such a server would have a negative impact on all downloads from that grid. In this release, such a server will have no impact on downloads, as long as K shares can be found on other, quicker, servers. This also means that downloads now use the "best-alacrity" servers that they talk to, as measured by how quickly the servers reply to the initial query. This might cause downloads to go faster, especially on grids with heterogeneous servers or geographical dispersion. Minor Changes ------------- - The webapi acquired a new "t=mkdir-with-children" command, to create and populate a directory in a single call. This is significantly faster than using separate "t=mkdir" and "t=set-children" operations (it uses one gateway-to-grid roundtrip, instead of three or four). (`#533`_) - The t=set-children (note the hyphen) operation is now documented in webapi.rst, and is the new preferred spelling of the old t=set_children (with an underscore). The underscore version remains for backwards compatibility. (`#381`_, `#927`_) - The tracebacks produced by errors in CLI tools should now be in plain text, instead of HTML (which is unreadable outside of a browser). (`#646`_) - The [storage]reserved_space configuration knob (which causes the storage server to refuse shares when available disk space drops below a threshold) should work on Windows now, not just UNIX. (`#637`_) - "tahoe cp" should now exit with status "1" if it cannot figure out a suitable target filename, such as when you copy from a bare filecap. (`#761`_) - "tahoe get" no longer creates a zero-length file upon error. (`#121`_) - "tahoe ls" can now list single files. (`#457`_) - "tahoe deep-check --repair" should tolerate repair failures now, instead of halting traversal. (`#874`_, `#786`_) - "tahoe create-alias" no longer corrupts the aliases file if it had previously been edited to have no trailing newline. (`#741`_) - Many small packaging improvements were made to facilitate the "tahoe-lafs" package being included in Ubuntu. Several mac/win32 binary libraries were removed, some figleaf code-coverage files were removed, a bundled copy of darcsver-1.2.1 was removed, and additional licensing text was added. - Several DeprecationWarnings for python2.6 were silenced. (`#859`_) - The checker --add-lease option would sometimes fail for shares stored on old (Tahoe v1.2.0) servers. (`#875`_) - The documentation for installing on Windows (docs/quickstart.rst) has been improved. (`#773`_) For other changes not mentioned here, see . To include the tickets mentioned above, go to . .. _`#121`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/121 .. _`#287`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/287 .. _`#381`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/381 .. _`#457`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/457 .. _`#533`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/533 .. _`#577`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/577 .. _`#606`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/606 .. _`#607`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/607 .. _`#637`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/637 .. _`#641`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/641 .. _`#646`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/646 .. _`#681`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/681 .. _`#729`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/729 .. _`#741`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/741 .. _`#760`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/760 .. _`#761`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/761 .. _`#773`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/773 .. _`#786`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/786 .. _`#828`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/828 .. _`#833`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/833 .. _`#859`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/859 .. _`#874`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/874 .. _`#875`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/875 .. _`#931`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/931 .. _`#837`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/837 .. _`#850`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/850 .. _`#927`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/927 .. _`#928`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/928 .. _`#939`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/939 .. _`#948`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/948 .. _webapi.rst: docs/frontends/webapi.rst Release 1.5.0 (2009-08-01) '''''''''''''''''''''''''' Improvements ------------ - Uploads of immutable files now use pipelined writes, improving upload speed slightly (10%) over high-latency connections. (`#392`_) - Processing large directories has been sped up, by removing a O(N^2) algorithm from the dirnode decoding path and retaining unmodified encrypted entries. (`#750`_, `#752`_) - The human-facing web interface (aka the "WUI") received a significant CSS makeover by Kevin Reid, making it much prettier and easier to read. The WUI "check" and "deep-check" forms now include a "Renew Lease" checkbox, mirroring the CLI --add-lease option, so leases can be added or renewed from the web interface. - The CLI "tahoe mv" command now refuses to overwrite directories. (`#705`_) - The CLI "tahoe webopen" command, when run without arguments, will now bring up the "Welcome Page" (node status and mkdir/upload forms). - The 3.5MB limit on mutable files was removed, so it should be possible to upload arbitrarily-sized mutable files. Note, however, that the data format and algorithm remains the same, so using mutable files still requires bandwidth, computation, and RAM in proportion to the size of the mutable file. (`#694`_) - This version of Tahoe-LAFS will tolerate directory entries that contain filecap formats which it does not recognize: files and directories from the future. This should improve the user experience (for 1.5.0 users) when we add new cap formats in the future. Previous versions would fail badly, preventing the user from seeing or editing anything else in those directories. These unrecognized objects can be renamed and deleted, but obviously not read or written. Also they cannot generally be copied. (`#683`_) Bugfixes -------- - deep-check-and-repair now tolerates read-only directories, such as the ones produced by the "tahoe backup" CLI command. Read-only directories and mutable files are checked, but not repaired. Previous versions threw an exception when attempting the repair and failed to process the remaining contents. We cannot yet repair these read-only objects, but at least this version allows the rest of the check+repair to proceed. (`#625`_) - A bug in 1.4.1 which caused a server to be listed multiple times (and frequently broke all connections to that server) was fixed. (`#653`_) - The plaintext-hashing code was removed from the Helper interface, removing the Helper's ability to mount a partial-information-guessing attack. (`#722`_) Platform/packaging changes -------------------------- - Tahoe-LAFS now runs on NetBSD, OpenBSD, ArchLinux, and NixOS, and on an embedded system based on an ARM CPU running at 266 MHz. - Unit test timeouts have been raised to allow the tests to complete on extremely slow platforms like embedded ARM-based NAS boxes, which may take several hours to run the test suite. An ARM-specific data-corrupting bug in an older version of Crypto++ (5.5.2) was identified: ARM-users are encouraged to use recent Crypto++/pycryptopp which avoids this problem. - Tahoe-LAFS now requires a SQLite library, either the sqlite3 that comes built-in with python2.5/2.6, or the add-on pysqlite2 if you're using python2.4. In the previous release, this was only needed for the "tahoe backup" command: now it is mandatory. - Several minor documentation updates were made. - To help get Tahoe-LAFS into Linux distributions like Fedora and Debian, packaging improvements are being made in both Tahoe-LAFS and related libraries like pycryptopp and zfec. - The Crypto++ library included in the pycryptopp package has been upgraded to version 5.6.0 of Crypto++, which includes a more efficient implementation of SHA-256 in assembly for x86 or amd64 architectures. dependency updates ------------------ - foolscap-0.4.1 - no python-2.4.0 or 2.4.1 (2.4.2 is good) (they contained a bug in base64.b32decode) - avoid python-2.6 on windows with mingw: compiler issues - python2.4 requires pysqlite2 (2.5,2.6 does not) - no python-3.x - pycryptopp-0.5.15 .. _#392: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/392 .. _#625: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/625 .. _#653: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/653 .. _#683: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/683 .. _#694: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/694 .. _#705: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/705 .. _#722: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/722 .. _#750: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/750 .. _#752: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/752 Release 1.4.1 (2009-04-13) '''''''''''''''''''''''''' Garbage Collection ------------------ - The big feature for this release is the implementation of garbage collection, allowing Tahoe storage servers to delete shares for old deleted files. When enabled, this uses a "mark and sweep" process: clients are responsible for updating the leases on their shares (generally by running "tahoe deep-check --add-lease"), and servers are allowed to delete any share which does not have an up-to-date lease. The process is described in detail in `garbage-collection.rst`_. The server must be configured to enable garbage-collection, by adding directives to the [storage] section that define an age limit for shares. The default configuration will not delete any shares. Both servers and clients should be upgraded to this release to make the garbage-collection as pleasant as possible. 1.2.0 servers have code to perform the update-lease operation but it suffers from a fatal bug, while 1.3.0 servers have update-lease but will return an exception for unknown storage indices, causing clients to emit an Incident for each exception, slowing the add-lease process down to a crawl. 1.1.0 servers did not have the add-lease operation at all. Security/Usability Problems Fixed --------------------------------- - A super-linear algorithm in the Merkle Tree code was fixed, which previously caused e.g. download of a 10GB file to take several hours before the first byte of plaintext could be produced. The new "alacrity" is about 2 minutes. A future release should reduce this to a few seconds by fixing ticket `#442`_. - The previous version permitted a small timing attack (due to our use of strcmp) against the write-enabler and lease-renewal/cancel secrets. An attacker who could measure response-time variations of approximatly 3ns against a very noisy background time of about 15ms might be able to guess these secrets. We do not believe this attack was actually feasible. This release closes the attack by first hashing the two strings to be compared with a random secret. webapi changes -------------- - In most cases, HTML tracebacks will only be sent if an "Accept: text/html" header was provided with the HTTP request. This will generally cause browsers to get an HTMLized traceback but send regular text/plain tracebacks to non-browsers (like the CLI clients). More errors have been mapped to useful HTTP error codes. - The streaming webapi operations (deep-check and manifest) now have a way to indicate errors (an output line that starts with "ERROR" instead of being legal JSON). See `webapi.rst`_ for details. - The storage server now has its own status page (at /storage), linked from the Welcome page. This page shows progress and results of the two new share-crawlers: one which merely counts shares (to give an estimate of how many files/directories are being stored in the grid), the other examines leases and reports how much space would be freed if GC were enabled. The page also shows how much disk space is present, used, reserved, and available for the Tahoe server, and whether the server is currently running in "read-write" mode or "read-only" mode. - When a directory node cannot be read (perhaps because of insufficent shares), a minimal webapi page is created so that the "more-info" links (including a Check/Repair operation) will still be accessible. - A new "reliability" page was added, with the beginnings of work on a statistical loss model. You can tell this page how many servers you are using and their independent failure probabilities, and it will tell you the likelihood that an arbitrary file will survive each repair period. The "numpy" package must be installed to access this page. A partial paper, written by Shawn Willden, has been added to docs/proposed/lossmodel.lyx . CLI changes ----------- - "tahoe check" and "tahoe deep-check" now accept an "--add-lease" argument, to update a lease on all shares. This is the "mark" side of garbage collection. - In many cases, CLI error messages have been improved: the ugly HTMLized traceback has been replaced by a normal python traceback. - "tahoe deep-check" and "tahoe manifest" now have better error reporting. "tahoe cp" is now non-verbose by default. - "tahoe backup" now accepts several "--exclude" arguments, to ignore certain files (like editor temporary files and version-control metadata) during backup. - On windows, the CLI now accepts local paths like "c:\dir\file.txt", which previously was interpreted as a Tahoe path using a "c:" alias. - The "tahoe restart" command now uses "--force" by default (meaning it will start a node even if it didn't look like there was one already running). - The "tahoe debug consolidate" command was added. This takes a series of independent timestamped snapshot directories (such as those created by the allmydata.com windows backup program, or a series of "tahoe cp -r" commands) and creates new snapshots that used shared read-only directories whenever possible (like the output of "tahoe backup"). In the most common case (when the snapshots are fairly similar), the result will use significantly fewer directories than the original, allowing "deep-check" and similar tools to run much faster. In some cases, the speedup can be an order of magnitude or more. This tool is still somewhat experimental, and only needs to be run on large backups produced by something other than "tahoe backup", so it was placed under the "debug" category. - "tahoe cp -r --caps-only tahoe:dir localdir" is a diagnostic tool which, instead of copying the full contents of files into the local directory, merely copies their filecaps. This can be used to verify the results of a "consolidation" operation. other fixes ----------- - The codebase no longer rauses RuntimeError as a kind of assert(). Specific exception classes were created for each previous instance of RuntimeError. - Many unit tests were changed to use a non-network test harness, speeding them up considerably. - Deep-traversal operations (manifest and deep-check) now walk individual directories in alphabetical order. Occasional turn breaks are inserted to prevent a stack overflow when traversing directories with hundreds of entries. - The experimental SFTP server had its path-handling logic changed slightly, to accomodate more SFTP clients, although there are still issues (`#645`_). .. _#442: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/442 .. _#645: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/645 .. _garbage-collection.rst: docs/garbage-collection.rst Release 1.3.0 (2009-02-13) '''''''''''''''''''''''''' Checker/Verifier/Repairer ------------------------- - The primary focus of this release has been writing a checker / verifier / repairer for files and directories. "Checking" is the act of asking storage servers whether they have a share for the given file or directory: if there are not enough shares available, the file or directory will be unrecoverable. "Verifying" is the act of downloading and cryptographically asserting that the server's share is undamaged: it requires more work (bandwidth and CPU) than checking, but can catch problems that simple checking cannot. "Repair" is the act of replacing missing or damaged shares with new ones. - This release includes a full checker, a partial verifier, and a partial repairer. The repairer is able to handle missing shares: new shares are generated and uploaded to make up for the missing ones. This is currently the best application of the repairer: to replace shares that were lost because of server departure or permanent drive failure. - The repairer in this release is somewhat able to handle corrupted shares. The limitations are: - Immutable verifier is incomplete: not all shares are used, and not all fields of those shares are verified. Therefore the immutable verifier has only a moderate chance of detecting corrupted shares. - The mutable verifier is mostly complete: all shares are examined, and most fields of the shares are validated. - The storage server protocol offers no way for the repairer to replace or delete immutable shares. If corruption is detected, the repairer will upload replacement shares to other servers, but the corrupted shares will be left in place. - read-only directories and read-only mutable files must be repaired by someone who holds the write-cap: the read-cap is insufficient. Moreover, the deep-check-and-repair operation will halt with an error if it attempts to repair one of these read-only objects. - Some forms of corruption can cause both download and repair operations to fail. A future release will fix this, since download should be tolerant of any corruption as long as there are at least 'k' valid shares, and repair should be able to fix any file that is downloadable. - If the downloader, verifier, or repairer detects share corruption, the servers which provided the bad shares will be notified (via a file placed in the BASEDIR/storage/corruption-advisories directory) so their operators can manually delete the corrupted shares and investigate the problem. In addition, the "incident gatherer" mechanism will automatically report share corruption to an incident gatherer service, if one is configured. Note that corrupted shares indicate hardware failures, serious software bugs, or malice on the part of the storage server operator, so a corrupted share should be considered highly unusual. - By periodically checking/repairing all files and directories, objects in the Tahoe filesystem remain resistant to recoverability failures due to missing and/or broken servers. - This release includes a wapi mechanism to initiate checks on individual files and directories (with or without verification, and with or without automatic repair). A related mechanism is used to initiate a "deep-check" on a directory: recursively traversing the directory and its children, checking (and/or verifying/repairing) everything underneath. Both mechanisms can be run with an "output=JSON" argument, to obtain machine-readable check/repair status results. These results include a copy of the filesystem statistics from the "deep-stats" operation (including total number of files, size histogram, etc). If repair is possible, a "Repair" button will appear on the results page. - The client web interface now features some extra buttons to initiate check and deep-check operations. When these operations finish, they display a results page that summarizes any problems that were encountered. All long-running deep-traversal operations, including deep-check, use a start-and-poll mechanism, to avoid depending upon a single long-lived HTTP connection. `webapi.rst`_ has details. Efficient Backup ---------------- - The "tahoe backup" command is new in this release, which creates efficient versioned backups of a local directory. Given a local pathname and a target Tahoe directory, this will create a read-only snapshot of the local directory in $target/Archives/$timestamp. It will also create $target/Latest, which is a reference to the latest such snapshot. Each time you run "tahoe backup" with the same source and target, a new $timestamp snapshot will be added. These snapshots will share directories that have not changed since the last backup, to speed up the process and minimize storage requirements. In addition, a small database is used to keep track of which local files have been uploaded already, to avoid uploading them a second time. This drastically reduces the work needed to do a "null backup" (when nothing has changed locally), making "tahoe backup' suitable to run from a daily cronjob. Note that the "tahoe backup" CLI command must be used in conjunction with a 1.3.0-or-newer Tahoe client node; there was a bug in the 1.2.0 webapi implementation that would prevent the last step (create $target/Latest) from working. Large Files ----------- - The 12GiB (approximate) immutable-file-size limitation is lifted. This release knows how to handle so-called "v2 immutable shares", which permit immutable files of up to about 18 EiB (about 3*10^14). These v2 shares are created if the file to be uploaded is too large to fit into v1 shares. v1 shares are created if the file is small enough to fit into them, so that files created with tahoe-1.3.0 can still be read by earlier versions if they are not too large. Note that storage servers also had to be changed to support larger files, and this release is the first release in which they are able to do that. Clients will detect which servers are capable of supporting large files on upload and will not attempt to upload shares of a large file to a server which doesn't support it. FTP/SFTP Server --------------- - Tahoe now includes experimental FTP and SFTP servers. When configured with a suitable method to translate username+password into a root directory cap, it provides simple access to the virtual filesystem. Remember that FTP is completely unencrypted: passwords, filenames, and file contents are all sent over the wire in cleartext, so FTP should only be used on a local (127.0.0.1) connection. This feature is still in development: there are no unit tests yet, and behavior with respect to Unicode filenames is uncertain. Please see `FTP-and-SFTP.rst`_ for configuration details. (`#512`_, `#531`_) CLI Changes ----------- - This release adds the 'tahoe create-alias' command, which is a combination of 'tahoe mkdir' and 'tahoe add-alias'. This also allows you to start using a new tahoe directory without exposing its URI in the argv list, which is publicly visible (through the process table) on most unix systems. Thanks to Kevin Reid for bringing this issue to our attention. - The single-argument form of "tahoe put" was changed to create an unlinked file. I.e. "tahoe put bar.txt" will take the contents of a local "bar.txt" file, upload them to the grid, and print the resulting read-cap; the file will not be attached to any directories. This seemed a bit more useful than the previous behavior (copy stdin, upload to the grid, attach the resulting file into your default tahoe: alias in a child named 'bar.txt'). - "tahoe put" was also fixed to handle mutable files correctly: "tahoe put bar.txt URI:SSK:..." will read the contents of the local bar.txt and use them to replace the contents of the given mutable file. - The "tahoe webopen" command was modified to accept aliases. This means "tahoe webopen tahoe:" will cause your web browser to open to a "wui" page that gives access to the directory associated with the default "tahoe:" alias. It should also accept leading slashes, like "tahoe webopen tahoe:/stuff". - Many esoteric debugging commands were moved down into a "debug" subcommand: - tahoe debug dump-cap - tahoe debug dump-share - tahoe debug find-shares - tahoe debug catalog-shares - tahoe debug corrupt-share The last command ("tahoe debug corrupt-share") flips a random bit of the given local sharefile. This is used to test the file verifying/repairing code, and obviously should not be used on user data. The cli might not correctly handle arguments which contain non-ascii characters in Tahoe v1.3 (although depending on your platform it might, especially if your platform can be configured to pass such characters on the command-line in utf-8 encoding). See https://tahoe-lafs.org/trac/tahoe-lafs/ticket/565 for details. Web changes ----------- - The "default webapi port", used when creating a new client node (and in the getting-started documentation), was changed from 8123 to 3456, to reduce confusion when Tahoe accessed through a Firefox browser on which the "Torbutton" extension has been installed. Port 8123 is occasionally used as a Tor control port, so Torbutton adds 8123 to Firefox's list of "banned ports" to avoid CSRF attacks against Tor. Once 8123 is banned, it is difficult to diagnose why you can no longer reach a Tahoe node, so the Tahoe default was changed. Note that 3456 is reserved by IANA for the "vat" protocol, but there are argueably more Torbutton+Tahoe users than vat users these days. Note that this will only affect newly-created client nodes. Pre-existing client nodes, created by earlier versions of tahoe, may still be listening on 8123. - All deep-traversal operations (start-manifest, start-deep-size, start-deep-stats, start-deep-check) now use a start-and-poll approach, instead of using a single (fragile) long-running synchronous HTTP connection. All these "start-" operations use POST instead of GET. The old "GET manifest", "GET deep-size", and "POST deep-check" operations have been removed. - The new "POST start-manifest" operation, when it finally completes, results in a table of (path,cap), instead of the list of verifycaps produced by the old "GET manifest". The table is available in several formats: use output=html, output=text, or output=json to choose one. The JSON output also includes stats, and a list of verifycaps and storage-index strings. The "return_to=" and "when_done=" arguments have been removed from the t=check and deep-check operations. - The top-level status page (/status) now has a machine-readable form, via "/status/?t=json". This includes information about the currently-active uploads and downloads, which may be useful for frontends that wish to display progress information. There is no easy way to correlate the activities displayed here with recent wapi requests, however. - Any files in BASEDIR/public_html/ (configurable) will be served in response to requests in the /static/ portion of the URL space. This will simplify the deployment of javascript-based frontends that can still access wapi calls by conforming to the (regrettable) "same-origin policy". - The welcome page now has a "Report Incident" button, which is tied into the "Incident Gatherer" machinery. If the node is attached to an incident gatherer (via log_gatherer.furl), then pushing this button will cause an Incident to be signalled: this means recent log events are aggregated and sent in a bundle to the gatherer. The user can push this button after something strange takes place (and they can provide a short message to go along with it), and the relevant data will be delivered to a centralized incident-gatherer for later processing by operations staff. - The "HEAD" method should now work correctly, in addition to the usual "GET", "PUT", and "POST" methods. "HEAD" is supposed to return exactly the same headers as "GET" would, but without any of the actual response body data. For mutable files, this now does a brief mapupdate (to figure out the size of the file that would be returned), without actually retrieving the file's contents. - The "GET" operation on files can now support the HTTP "Range:" header, allowing requests for partial content. This allows certain media players to correctly stream audio and movies out of a Tahoe grid. The current implementation uses a disk-based cache in BASEDIR/private/cache/download , which holds the plaintext of the files being downloaded. Future implementations might not use this cache. GET for immutable files now returns an ETag header. - Each file and directory now has a "Show More Info" web page, which contains much of the information that was crammed into the directory page before. This includes readonly URIs, storage index strings, object type, buttons to control checking/verifying/repairing, and deep-check/deep-stats buttons (for directories). For mutable files, the "replace contents" upload form has been moved here too. As a result, the directory page is now much simpler and cleaner, and several potentially-misleading links (like t=uri) are now gone. - Slashes are discouraged in Tahoe file/directory names, since they cause problems when accessing the filesystem through the wapi. However, there are a couple of accidental ways to generate such names. This release tries to make it easier to correct such mistakes by escaping slashes in several places, allowing slashes in the t=info and t=delete commands, and in the source (but not the target) of a t=rename command. Packaging --------- - Tahoe's dependencies have been extended to require the "[secure_connections]" feature from Foolscap, which will cause pyOpenSSL to be required and/or installed. If OpenSSL and its development headers are already installed on your system, this can occur automatically. Tahoe now uses pollreactor (instead of the default selectreactor) to work around a bug between pyOpenSSL and the most recent release of Twisted (8.1.0). This bug only affects unit tests (hang during shutdown), and should not impact regular use. - The Tahoe source code tarballs now come in two different forms: regular and "sumo". The regular tarball contains just Tahoe, nothing else. When building from the regular tarball, the build process will download any unmet dependencies from the internet (starting with the index at PyPI) so it can build and install them. The "sumo" tarball contains copies of all the libraries that Tahoe requires (foolscap, twisted, zfec, etc), so using the "sumo" tarball should not require any internet access during the build process. This can be useful if you want to build Tahoe while on an airplane, a desert island, or other bandwidth-limited environments. - Similarly, tahoe-lafs.org now hosts a "tahoe-deps" tarball which contains the latest versions of all these dependencies. This tarball, located at https://tahoe-lafs.org/source/tahoe/deps/tahoe-deps.tar.gz, can be unpacked in the tahoe source tree (or in its parent directory), and the build process should satisfy its downloading needs from it instead of reaching out to PyPI. This can be useful if you want to build Tahoe from a darcs checkout while on that airplane or desert island. - Because of the previous two changes ("sumo" tarballs and the "tahoe-deps" bundle), most of the files have been removed from misc/dependencies/ . This brings the regular Tahoe tarball down to 2MB (compressed), and the darcs checkout (without history) to about 7.6MB. A full darcs checkout will still be fairly large (because of the historical patches which included the dependent libraries), but a 'lazy' one should now be small. - The default "make" target is now an alias for "setup.py build", which itself is an alias for "setup.py develop --prefix support", with some extra work before and after (see setup.cfg). Most of the complicated platform-dependent code in the Makefile was rewritten in Python and moved into setup.py, simplifying things considerably. - Likewise, the "make test" target now delegates most of its work to "setup.py test", which takes care of getting PYTHONPATH configured to access the tahoe code (and dependencies) that gets put in support/lib/ by the build_tahoe step. This should allow unit tests to be run even when trial (which is part of Twisted) wasn't already installed (in this case, trial gets installed to support/bin because Twisted is a dependency of Tahoe). - Tahoe is now compatible with the recently-released Python 2.6 , although it is recommended to use Tahoe on Python 2.5, on which it has received more thorough testing and deployment. - Tahoe is now compatible with simplejson-2.0.x . The previous release assumed that simplejson.loads always returned unicode strings, which is no longer the case in 2.0.x . Grid Management Tools --------------------- - Several tools have been added or updated in the misc/ directory, mostly munin plugins that can be used to monitor a storage grid. - The misc/spacetime/ directory contains a "disk watcher" daemon (startable with 'tahoe start'), which can be configured with a set of HTTP URLs (pointing at the wapi '/statistics' page of a bunch of storage servers), and will periodically fetch disk-used/disk-available information from all the servers. It keeps this information in an Axiom database (a sqlite-based library available from divmod.org). The daemon computes time-averaged rates of disk usage, as well as a prediction of how much time is left before the grid is completely full. - The misc/munin/ directory contains a new set of munin plugins (tahoe_diskleft, tahoe_diskusage, tahoe_doomsday) which talk to the disk-watcher and provide graphs of its calculations. - To support the disk-watcher, the Tahoe statistics component (visible through the wapi at the /statistics/ URL) now includes disk-used and disk-available information. Both are derived through an equivalent of the unix 'df' command (i.e. they ask the kernel for the number of free blocks on the partition that encloses the BASEDIR/storage directory). In the future, the disk-available number will be further influenced by the local storage policy: if that policy says that the server should refuse new shares when less than 5GB is left on the partition, then "disk-available" will report zero even though the kernel sees 5GB remaining. - The 'tahoe_overhead' munin plugin interacts with an allmydata.com-specific server which reports the total of the 'deep-size' reports for all active user accounts, compares this with the disk-watcher data, to report on overhead percentages. This provides information on how much space could be recovered once Tahoe implements some form of garbage collection. Configuration Changes: single INI-format tahoe.cfg file ------------------------------------------------------- - The Tahoe node is now configured with a single INI-format file, named "tahoe.cfg", in the node's base directory. Most of the previous multiple-separate-files are still read for backwards compatibility (the embedded SSH debug server and the advertised_ip_addresses files are the exceptions), but new directives will only be added to tahoe.cfg . The "tahoe create-client" command will create a tahoe.cfg for you, with sample values commented out. (ticket `#518`_) - tahoe.cfg now has controls for the foolscap "keepalive" and "disconnect" timeouts (`#521`_). - tahoe.cfg now has controls for the encoding parameters: "shares.needed" and "shares.total" in the "[client]" section. The default parameters are still 3-of-10. - The inefficient storage 'sizelimit' control (which established an upper bound on the amount of space that a storage server is allowed to consume) has been replaced by a lightweight 'reserved_space' control (which establishes a lower bound on the amount of remaining space). The storage server will reject all writes that would cause the remaining disk space (as measured by a '/bin/df' equivalent) to drop below this value. The "[storage]reserved_space=" tahoe.cfg parameter controls this setting. (note that this only affects immutable shares: it is an outstanding bug that reserved_space does not prevent the allocation of new mutable shares, nor does it prevent the growth of existing mutable shares). Other Changes ------------- - Clients now declare which versions of the protocols they support. This is part of a new backwards-compatibility system: https://tahoe-lafs.org/trac/tahoe-lafs/wiki/Versioning . - The version strings for human inspection (as displayed on the Welcome web page, and included in logs) now includes a platform identifer (frequently including a linux distribution name, processor architecture, etc). - Several bugs have been fixed, including one that would cause an exception (in the logs) if a wapi download operation was cancelled (by closing the TCP connection, or pushing the "stop" button in a web browser). - Tahoe now uses Foolscap "Incidents", writing an "incident report" file to logs/incidents/ each time something weird occurs. These reports are available to an "incident gatherer" through the flogtool command. For more details, please see the Foolscap logging documentation. An incident-classifying plugin function is provided in misc/incident-gatherer/classify_tahoe.py . - If clients detect corruption in shares, they now automatically report it to the server holding that share, if it is new enough to accept the report. These reports are written to files in BASEDIR/storage/corruption-advisories . - The 'nickname' setting is now defined to be a UTF-8 -encoded string, allowing non-ascii nicknames. - The 'tahoe start' command will now accept a --syslog argument and pass it through to twistd, making it easier to launch non-Tahoe nodes (like the cpu-watcher) and have them log to syslogd instead of a local file. This is useful when running a Tahoe node out of a USB flash drive. - The Mac GUI in src/allmydata/gui/ has been improved. .. _#512: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/512 .. _#518: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/518 .. _#521: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/521 .. _#531: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/531 Release 1.2.0 (2008-07-21) '''''''''''''''''''''''''' Security -------- - This release makes the immutable-file "ciphertext hash tree" mandatory. Previous releases allowed the uploader to decide whether their file would have an integrity check on the ciphertext or not. A malicious uploader could use this to create a readcap that would download as one file or a different one, depending upon which shares the client fetched first, with no errors raised. There are other integrity checks on the shares themselves, preventing a storage server or other party from violating the integrity properties of the read-cap: this failure was only exploitable by the uploader who gives you a carefully constructed read-cap. If you download the file with Tahoe 1.2.0 or later, you will not be vulnerable to this problem. `#491`_ This change does not introduce a compatibility issue, because all existing versions of Tahoe will emit the ciphertext hash tree in their shares. Dependencies ------------ - Tahoe now requires Foolscap-0.2.9 . It also requires pycryptopp 0.5 or newer, since earlier versions had a bug that interacted with specific compiler versions that could sometimes result in incorrect encryption behavior. Both packages are included in the Tahoe source tarball in misc/dependencies/ , and should be built automatically when necessary. Web API ------- - Web API directory pages should now contain properly-slash-terminated links to other directories. They have also stopped using absolute links in forms and pages (which interfered with the use of a front-end load-balancing proxy). - The behavior of the "Check This File" button changed, in conjunction with larger internal changes to file checking/verification. The button triggers an immediate check as before, but the outcome is shown on its own page, and does not get stored anywhere. As a result, the web directory page no longer shows historical checker results. - A new "Deep-Check" button has been added, which allows a user to initiate a recursive check of the given directory and all files and directories reachable from it. This can cause quite a bit of work, and has no intermediate progress information or feedback about the process. In addition, the results of the deep-check are extremely limited. A later release will improve this behavior. - The web server's behavior with respect to non-ASCII (unicode) filenames in the "GET save=true" operation has been improved. To achieve maximum compatibility with variously buggy web browsers, the server does not try to figure out the character set of the inbound filename. It just echoes the same bytes back to the browser in the Content-Disposition header. This seems to make both IE7 and Firefox work correctly. Checker/Verifier/Repairer ------------------------- - Tahoe is slowly acquiring convenient tools to check up on file health, examine existing shares for errors, and repair files that are not fully healthy. This release adds a mutable checker/verifier/repairer, although testing is very limited, and there are no web interfaces to trigger repair yet. The "Check" button next to each file or directory on the wapi page will perform a file check, and the "deep check" button on each directory will recursively check all files and directories reachable from there (which may take a very long time). Future releases will improve access to this functionality. Operations/Packaging -------------------- - A "check-grid" script has been added, along with a Makefile target. This is intended (with the help of a pre-configured node directory) to check upon the health of a Tahoe grid, uploading and downloading a few files. This can be used as a monitoring tool for a deployed grid, to be run periodically and to signal an error if it ever fails. It also helps with compatibility testing, to verify that the latest Tahoe code is still able to handle files created by an older version. - The munin plugins from misc/munin/ are now copied into any generated debian packages, and are made executable (and uncompressed) so they can be symlinked directly from /etc/munin/plugins/ . - Ubuntu "Hardy" was added as a supported debian platform, with a Makefile target to produce hardy .deb packages. Some notes have been added to `debian.rst`_ about building Tahoe on a debian/ubuntu system. - Storage servers now measure operation rates and latency-per-operation, and provides results through the /statistics web page as well as the stats gatherer. Munin plugins have been added to match. Other ----- - Tahoe nodes now use Foolscap "incident logging" to record unusual events to their NODEDIR/logs/incidents/ directory. These incident files can be examined by Foolscap logging tools, or delivered to an external log-gatherer for further analysis. Note that Tahoe now requires Foolscap-0.2.9, since 0.2.8 had a bug that complained about "OSError: File exists" when trying to create the incidents/ directory for a second time. - If no servers are available when retrieving a mutable file (like a directory), the node now reports an error instead of hanging forever. Earlier releases would not only hang (causing the wapi directory listing to get stuck half-way through), but the internal dirnode serialization would cause all subsequent attempts to retrieve or modify the same directory to hang as well. `#463`_ - A minor internal exception (reported in logs/twistd.log, in the "stopProducing" method) was fixed, which complained about "self._paused_at not defined" whenever a file download was stopped from the web browser end. .. _#463: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/463 .. _#491: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/491 .. _debian.rst: docs/debian.rst Release 1.1.0 (2008-06-11) '''''''''''''''''''''''''' CLI: new "alias" model ---------------------- - The new CLI code uses an scp/rsync -like interface, in which directories in the Tahoe storage grid are referenced by a colon-suffixed alias. The new commands look like: - tahoe cp local.txt tahoe:virtual.txt - tahoe ls work:subdir - More functionality is available through the CLI: creating unlinked files and directories, recursive copy in or out of the storage grid, hardlinks, and retrieving the raw read- or write- caps through the 'ls' command. Please read `CLI.rst`_ for complete details. wapi: new pages, new commands ----------------------------- - Several new pages were added to the web API: - /helper_status : to describe what a Helper is doing - /statistics : reports node uptime, CPU usage, other stats - /file : for easy file-download URLs, see `#221`_ - /cap == /uri : future compatibility - The localdir=/localfile= and t=download operations were removed. These required special configuration to enable anyways, but this feature was a security problem, and was mostly obviated by the new "cp -r" command. - Several new options to the GET command were added: - t=deep-size : add up the size of all immutable files reachable from the directory - t=deep-stats : return a JSON-encoded description of number of files, size distribution, total size, etc - POST is now preferred over PUT for most operations which cause side-effects. - Most wapi calls now accept overwrite=, and default to overwrite=true - "POST /uri/DIRCAP/parent/child?t=mkdir" is now the preferred API to create multiple directories at once, rather than ...?t=mkdir-p . - PUT to a mutable file ("PUT /uri/MUTABLEFILECAP", "PUT /uri/DIRCAP/child") will modify the file in-place. - more munin graphs in misc/munin/ - tahoe-introstats - tahoe-rootdir-space - tahoe_estimate_files - mutable files published/retrieved - tahoe_cpu_watcher - tahoe_spacetime New Dependencies ---------------- - zfec 1.1.0 - foolscap 0.2.8 - pycryptopp 0.5 - setuptools (now required at runtime) New Mutable-File Code --------------------- - The mutable-file handling code (mostly used for directories) has been completely rewritten. The new scheme has a better API (with a modify() method) and is less likely to lose data when several uncoordinated writers change a file at the same time. - In addition, a single Tahoe process will coordinate its own writes. If you make two concurrent directory-modifying wapi calls to a single tahoe node, it will internally make one of them wait for the other to complete. This prevents auto-collision (`#391`_). - The new mutable-file code also detects errors during publish better. Earlier releases might believe that a mutable file was published when in fact it failed. other features -------------- - The node now monitors its own CPU usage, as a percentage, measured every 60 seconds. 1/5/15 minute moving averages are available on the /statistics web page and via the stats-gathering interface. - Clients now accelerate reconnection to all servers after being offline (`#374`_). When a client is offline for a long time, it scales back reconnection attempts to approximately once per hour, so it may take a while to make the first attempt, but once any attempt succeeds, the other server connections will be retried immediately. - A new "offloaded KeyGenerator" facility can be configured, to move RSA key generation out from, say, a wapi node, into a separate process. RSA keys can take several seconds to create, and so a wapi node which is being used for directory creation will be unavailable for anything else during this time. The Key Generator process will pre-compute a small pool of keys, to speed things up further. This also takes better advantage of multi-core CPUs, or SMP hosts. - The node will only use a potentially-slow "du -s" command at startup (to measure how much space has been used) if the "sizelimit" parameter has been configured (to limit how much space is used). Large storage servers should turn off sizelimit until a later release improves the space-management code, since "du -s" on a terabyte filesystem can take hours. - The Introducer now allows new announcements to replace old ones, to avoid buildups of obsolete announcements. - Immutable files are limited to about 12GiB (when using the default 3-of-10 encoding), because larger files would be corrupted by the four-byte share-size field on the storage servers (`#439`_). A later release will remove this limit. Earlier releases would allow >12GiB uploads, but the resulting file would be unretrievable. - The docs/ directory has been rearranged, with old docs put in docs/historical/ and not-yet-implemented ones in docs/proposed/ . - The Mac OS-X FUSE plugin has a significant bug fix: earlier versions would corrupt writes that used seek() instead of writing the file in linear order. The rsync tool is known to perform writes in this order. This has been fixed. .. _#221: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/221 .. _#374: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/374 .. _#391: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/391 .. _#439: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/439 .. _CLI.rst: docs/CLI.rst tahoe_lafs-1.20.0/Tahoe.home0000644000000000000000000000011013615410400012521 0ustar00This file exists so the preamble in bin/tahoe can find its source tree. tahoe_lafs-1.20.0/relnotes.txt0000644000000000000000000001431413615410400013216 0ustar00ANNOUNCING Tahoe, the Least-Authority File Store, v1.20.0 The Tahoe-LAFS team is pleased to announce version 1.20.0 of Tahoe-LAFS, an extremely reliable decentralized storage system. Get it with "pip install tahoe-lafs", or download a tarball here: https://tahoe-lafs.org/downloads Tahoe-LAFS is the first distributed storage system to offer "provider-independent security" — meaning that not even the operators of your storage servers can read or alter your data without your consent. Here is the one-page explanation of its unique security and fault-tolerance properties: https://tahoe-lafs.readthedocs.org/en/latest/about.html The previous stable release of Tahoe-LAFS was v1.19.0, released on January 18, 2024. Major new features and changes in this release: Declarative build system based on "hatch". The ability to great mutable directories with a given private-key. Pulled in fixes from CBOR, stopped using the C version of CBOR and fixed incompatibilities with attrs and cryptography libraries. Besides all this there have been dozens of other bug-fixes and improvements. Enjoy! Please see ``NEWS.rst`` [1] for a complete list of changes. WHAT IS IT GOOD FOR? With Tahoe-LAFS, you distribute your data across multiple servers. Even if some of the servers fail or are taken over by an attacker, the entire file store continues to function correctly, preserving your privacy and security. You can easily share specific files and directories with other people. In addition to the core storage system itself, volunteers have built other projects on top of Tahoe-LAFS and have integrated Tahoe-LAFS with existing systems, including Windows, JavaScript, iPhone, Android, Hadoop, Flume, Django, Puppet, bzr, mercurial, perforce, duplicity, TiddlyWiki, and more. See the Related Projects page on the wiki [3]. We believe that strong cryptography, Free and Open Source Software, erasure coding, and principled engineering practices make Tahoe-LAFS safer than RAID, removable drive, tape, on-line backup or cloud storage. This software is developed under test-driven development, and there are no known bugs or security flaws which would compromise confidentiality or data integrity under recommended use. (For all important issues that we are currently aware of please see the known_issues.rst file [2].) COMPATIBILITY This release should be compatible with the version 1 series of Tahoe-LAFS. Clients from this release can write files and directories in the format used by clients of all versions back to v1.0 (which was released March 25, 2008). Clients from this release can read files and directories produced by clients of all versions since v1.0. Network connections are limited by the Introducer protocol in use. If the Introducer is running v1.10 or v1.11, then servers from this release can serve clients of all versions back to v1.0 . If it is running v1.12 or higher, then they can only serve clients back to v1.10. Clients from this release can use servers back to v1.10, but not older servers. Except for the new optional MDMF format, we have not made any intentional compatibility changes. However we do not yet have the test infrastructure to continuously verify that all new versions are interoperable with previous versions. We intend to build such an infrastructure in the future. This is the twenty-second release in the version 1 series. This series of Tahoe-LAFS will be actively supported and maintained for the foreseeable future, and future versions of Tahoe-LAFS will retain the ability to read and write files compatible with this series. LICENCE You may use this package under the GNU General Public License, version 2 or, at your option, any later version. See the file "COPYING.GPL" [4] for the terms of the GNU General Public License, version 2. You may use this package under the Transitive Grace Period Public Licence, version 1 or, at your option, any later version. (The Transitive Grace Period Public Licence has requirements similar to the GPL except that it allows you to delay for up to twelve months after you redistribute a derived work before releasing the source code of your derived work.) See the file "COPYING.TGPPL.rst" [5] for the terms of the Transitive Grace Period Public Licence, version 1. (You may choose to use this package under the terms of either licence, at your option.) INSTALLATION Tahoe-LAFS works on Linux, Mac OS X, Windows, Solaris, *BSD, and probably most other systems. Start with "docs/INSTALL.rst" [6]. HACKING AND COMMUNITY Please join us on the mailing list [7]. Patches are gratefully accepted -- the Roadmap page [8] shows the next improvements that we plan to make and CREDITS [9] lists the names of people who've contributed to the project. The Dev page [10] contains resources for hackers. SPONSORSHIP A special thanks goes out to Least Authority Enterprises [12], which employs several Tahoe-LAFS developers, for their continued support. HACK TAHOE-LAFS! If you can find a security flaw in Tahoe-LAFS which is serious enough that we feel compelled to warn our users and issue a fix, then we will award you with a customized t-shirt with your exploit printed on it and add you to the "Hack Tahoe-LAFS Hall Of Fame" [13]. ACKNOWLEDGEMENTS This is the twenty-first release of Tahoe-LAFS to be created solely as a labor of love by volunteers. Thank you very much to the team of "hackers in the public interest" who make Tahoe-LAFS possible. meejah on behalf of the Tahoe-LAFS team December 13, 2024 Planet Earth [1] https://github.com/tahoe-lafs/tahoe-lafs/blob/tahoe-lafs-1.20.0/NEWS.rst [2] https://github.com/tahoe-lafs/tahoe-lafs/blob/master/docs/known_issues.rst [3] https://tahoe-lafs.org/trac/tahoe-lafs/wiki/RelatedProjects [4] https://github.com/tahoe-lafs/tahoe-lafs/blob/tahoe-lafs-1.20.0/COPYING.GPL [5] https://github.com/tahoe-lafs/tahoe-lafs/blob/tahoe-lafs-1.20.0/COPYING.TGPPL.rst [6] https://tahoe-lafs.readthedocs.org/en/tahoe-lafs-1.20.0/INSTALL.html [7] https://lists.tahoe-lafs.org/mailman/listinfo/tahoe-dev [8] https://tahoe-lafs.org/trac/tahoe-lafs/roadmap [9] https://github.com/tahoe-lafs/tahoe-lafs/blob/master/CREDITS [10] https://tahoe-lafs.org/trac/tahoe-lafs/wiki/Dev [12] https://leastauthority.com/ [13] https://tahoe-lafs.org/hacktahoelafs/ [14] https://github.com/warner/magic-wormhole tahoe_lafs-1.20.0/tox.ini0000644000000000000000000001715213615410400012140 0ustar00# Tox (http://tox.testrun.org/) is a tool for running tests # in multiple virtualenvs. This configuration file will run the # test suite on all supported python versions. To use it, "pip install tox" # and then run "tox" from this directory. # Map Python versions in GitHub Actions to tox environments to run, for use by # the tox-gh-actions package. [gh-actions] python = 3.9: py39-coverage 3.10: py310-coverage 3.11: py311-coverage 3.12: py312-coverage pypy-3.9: pypy39 [pytest] twisted = 1 [tox] envlist = typechecks,codechecks,py{39,310,311,312}-{coverage},pypy39,integration minversion = 4 [testenv] # Install code the real way, for maximum realism. usedevelop = False passenv = TAHOE_LAFS_*,PIP_*,SUBUNITREPORTER_*,USERPROFILE,HOMEDRIVE,HOMEPATH,COLUMNS deps = # We pull in certify *here* to avoid bug #2913. Basically if a # `setup_requires=...` causes a package to be installed (with setuptools) # then it'll fail on certain platforms (travis's OX-X 10.12, Slackware # 14.2) because PyPI's TLS requirements (TLS >= 1.2) are incompatible with # the old TLS clients available to those systems. Installing it ahead of # time (with pip) avoids this problem. # # We don't pin an exact version of it because it contains CA certificates # which necessarily change over time. Pinning this is guaranteed to cause # things to break eventually as old certificates expire and as new ones # are used in the wild that aren't present in whatever version we pin. # Hopefully there won't be functionality regressions in new releases of # this package that cause us the kind of suffering we're trying to avoid # with the above pins. certifi extras = # Get general testing environment dependencies so we can run the tests # how we like. testenv # And get all of the test suite's actual direct Python dependencies. test setenv = # Define TEST_SUITE in the environment as an aid to constructing the # correct test command below. TEST_SUITE = allmydata COLUMNS = 80 commands = # As an aid to debugging, dump all of the Python packages and their # versions that are installed in the test environment. This is # particularly useful to get from CI runs - though hopefully the # version pinning we do limits the variability of this output pip freeze tahoe --version python -c "import sys; print('sys.stdout.encoding:', sys.stdout.encoding)" # Run tests with -b to catch bugs like `"%s" % (some_bytes,)`. -b makes # Python emit BytesWarnings, and warnings configuration in # src/allmydata/tests/__init__.py turns allmydata's BytesWarnings into # exceptions. !coverage: python -b -m twisted.trial {env:TAHOE_LAFS_TRIAL_ARGS:--rterrors} {posargs:{env:TEST_SUITE}} # measuring coverage is somewhat slower than not measuring coverage # so only do it on request. coverage: python -b -m coverage run -m twisted.trial {env:TAHOE_LAFS_TRIAL_ARGS:--rterrors --reporter=timing} {posargs:{env:TEST_SUITE}} coverage: coverage combine coverage: coverage xml [testenv:integration] usedevelop = False basepython = python3 platform = mylinux: linux mymacos: darwin mywindows: win32 setenv = COVERAGE_PROCESS_START=.coveragerc commands = # NOTE: 'run with "py.test --keep-tempdir -s -v integration/" to debug failures' py.test --timeout=1800 --coverage -s -v {posargs:integration} coverage combine [testenv:codechecks] basepython = python3 skip_install = true deps = # Pin a specific version so we get consistent outcomes; update this # occasionally: ruff == 0.1.6 towncrier # On macOS, git inside of towncrier needs $HOME. passenv = HOME setenv = # If no positional arguments are given, try to run the checks on the # entire codebase, including various pieces of supporting code. DEFAULT_FILES=src integration benchmarks static misc setup.py commands = ruff check {posargs:{env:DEFAULT_FILES}} python misc/coding_tools/check-umids.py {posargs:{env:DEFAULT_FILES}} python misc/coding_tools/check-debugging.py {posargs:{env:DEFAULT_FILES}} # If towncrier.check fails, you forgot to add a towncrier news # fragment explaining the change in this branch. Create one at # `newsfragments/.` with some text for the news # file. See towncrier.toml for legal values. python -m towncrier.check --config towncrier.toml [testenv:typechecks] basepython = python3 deps = mypy==1.8.0 mypy-zope types-mock types-six types-PyYAML types-setuptools types-pyOpenSSL foolscap # Upgrade when new releases come out: Twisted==23.10.0 commands = # Different versions of Python have a different standard library, and we # want to be compatible with all the variations. For speed's sake we only do # the earliest and latest versions. mypy --python-version=3.9 src mypy --python-version=3.12 src [testenv:draftnews] passenv = TAHOE_LAFS_*,PIP_*,SUBUNITREPORTER_*,USERPROFILE,HOMEDRIVE,HOMEPATH,COLUMNS deps = # see comment in [testenv] about "certifi" certifi towncrier==23.11.0 commands = python -m towncrier --draft --config towncrier.toml [testenv:news] # On macOS, git invoked from Tox needs $HOME. passenv = TAHOE_LAFS_*,PIP_*,SUBUNITREPORTER_*,USERPROFILE,HOMEDRIVE,HOMEPATH,COLUMNS allowlist_externals = git deps = # see comment in [testenv] about "certifi" certifi towncrier==23.11.0 commands = python -m towncrier --yes --config towncrier.toml # commit the changes git commit -m "update NEWS.txt for release" [testenv:deprecations] commands = python misc/build_helpers/run-deprecations.py --package allmydata --warnings={env:TAHOE_LAFS_WARNINGS_LOG:_trial_temp/deprecation-warnings.log} trial {env:TAHOE_LAFS_TRIAL_ARGS:--rterrors} {posargs:allmydata} [testenv:upcoming-deprecations] deps = # Take the base deps as well! {[testenv]deps} git+https://github.com/warner/foolscap commands = flogtool --version python misc/build_helpers/run-deprecations.py --package allmydata --warnings={env:TAHOE_LAFS_WARNINGS_LOG:_trial_temp/deprecation-warnings.log} trial {env:TAHOE_LAFS_TRIAL_ARGS:--rterrors} {posargs:allmydata} # Use 'tox -e docs' to check formatting and cross-references in docs .rst # files. The published docs are built by code run over at readthedocs.org, # which does not use this target (but does something similar). # # If you have "sphinx" installed in your virtualenv, you can just do "make -C # docs html", or "cd docs; make html". # # You can also open docs/_build/html/index.html to see the rendered docs in # your web browser. [testenv:docs] deps = -r docs/requirements.txt # normal install is not needed for docs, and slows things down skip_install = True commands = sphinx-build -W -b html -d {toxinidir}/docs/_build/doctrees {toxinidir}/docs {toxinidir}/docs/_build/html [testenv:pyinstaller] extras = deps = {[testenv]deps} packaging pyinstaller pefile ; platform_system == "Windows" # Setting PYTHONHASHSEED to a known value assists with reproducible builds. # See https://pyinstaller.readthedocs.io/en/stable/advanced-topics.html#creating-a-reproducible-build setenv=PYTHONHASHSEED=1 commands= pip freeze pyinstaller -y --clean pyinstaller.spec [testenv:tarballs] basepython = python3 deps = commands = python setup.py update_version python setup.py sdist --formats=gztar bdist_wheel --universal tahoe_lafs-1.20.0/docs/CODE_OF_CONDUCT.md0000644000000000000000000000510413615410400014346 0ustar00# Contributor Code of Conduct As contributors and maintainers of this project, and in the interest of fostering an open and welcoming community, we pledge to respect all people who contribute through reporting issues, posting feature requests, updating documentation, submitting pull requests or patches, and other activities. We are committed to making participation in this project a harassment-free experience for everyone, regardless of level of experience, gender, gender identity and expression, sexual orientation, disability, personal appearance, body size, race, ethnicity, age, religion, or nationality. Examples of unacceptable behavior by participants include: * The use of sexualized language or imagery * Personal attacks * Trolling or insulting/derogatory comments * Public or private harassment * Publishing other's private information, such as physical or electronic addresses, without explicit permission * Other unethical or unprofessional conduct Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful. By adopting this Code of Conduct, project maintainers commit themselves to fairly and consistently applying these principles to every aspect of managing this project. Project maintainers who do not follow or enforce the Code of Conduct may be permanently removed from the project team. This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting a project maintainer (see below). All complaints will be reviewed and investigated and will result in a response that is deemed necessary and appropriate to the circumstances. Maintainers are obligated to maintain confidentiality with regard to the reporter of an incident. The following community members have made themselves available for conduct issues: - Jean-Paul Calderone (jean-paul at leastauthority dot com) - meejah (meejah at meejah dot ca) - May-Lee Sia(she/her) (tahoe dot lafs dot community at gmail dot com) This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.3.0, available at [http://contributor-covenant.org/version/1/3/0/][version] [homepage]: http://contributor-covenant.org [version]: http://contributor-covenant.org/version/1/3/0/ tahoe_lafs-1.20.0/docs/Makefile0000644000000000000000000001712013615410400013210 0ustar00# Makefile for Sphinx documentation # # You can set these variables from the command line. SPHINXOPTS = SPHINXBUILD = sphinx-build PAPER = BUILDDIR = _build # User-friendly check for sphinx-build ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1) $(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/) endif # Internal variables. PAPEROPT_a4 = -D latex_paper_size=a4 PAPEROPT_letter = -D latex_paper_size=letter ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . # the i18n builder cannot share the environment and doctrees with the others I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . .PHONY: help help: @echo "Please use \`make ' where is one of" @echo " html to make standalone HTML files" @echo " dirhtml to make HTML files named index.html in directories" @echo " singlehtml to make a single large HTML file" @echo " pickle to make pickle files" @echo " json to make JSON files" @echo " htmlhelp to make HTML files and a HTML help project" @echo " qthelp to make HTML files and a qthelp project" @echo " applehelp to make an Apple Help Book" @echo " devhelp to make HTML files and a Devhelp project" @echo " epub to make an epub" @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" @echo " latexpdf to make LaTeX files and run them through pdflatex" @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx" @echo " text to make text files" @echo " man to make manual pages" @echo " texinfo to make Texinfo files" @echo " info to make Texinfo files and run them through makeinfo" @echo " gettext to make PO message catalogs" @echo " changes to make an overview of all changed/added/deprecated items" @echo " xml to make Docutils-native XML files" @echo " pseudoxml to make pseudoxml-XML files for display purposes" @echo " linkcheck to check all external links for integrity" @echo " doctest to run all doctests embedded in the documentation (if enabled)" @echo " coverage to run coverage check of the documentation (if enabled)" .PHONY: clean clean: rm -rf $(BUILDDIR)/* .PHONY: html html: $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." .PHONY: dirhtml dirhtml: $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." .PHONY: singlehtml singlehtml: $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml @echo @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." .PHONY: pickle pickle: $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle @echo @echo "Build finished; now you can process the pickle files." .PHONY: json json: $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json @echo @echo "Build finished; now you can process the JSON files." .PHONY: htmlhelp htmlhelp: $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp @echo @echo "Build finished; now you can run HTML Help Workshop with the" \ ".hhp project file in $(BUILDDIR)/htmlhelp." .PHONY: qthelp qthelp: $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp @echo @echo "Build finished; now you can run "qcollectiongenerator" with the" \ ".qhcp project file in $(BUILDDIR)/qthelp, like this:" @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/Tahoe-LAFS.qhcp" @echo "To view the help file:" @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/Tahoe-LAFS.qhc" .PHONY: applehelp applehelp: $(SPHINXBUILD) -b applehelp $(ALLSPHINXOPTS) $(BUILDDIR)/applehelp @echo @echo "Build finished. The help book is in $(BUILDDIR)/applehelp." @echo "N.B. You won't be able to view it unless you put it in" \ "~/Library/Documentation/Help or install it in your application" \ "bundle." .PHONY: devhelp devhelp: $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp @echo @echo "Build finished." @echo "To view the help file:" @echo "# mkdir -p $$HOME/.local/share/devhelp/Tahoe-LAFS" @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/Tahoe-LAFS" @echo "# devhelp" .PHONY: epub epub: $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub @echo @echo "Build finished. The epub file is in $(BUILDDIR)/epub." .PHONY: latex latex: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." @echo "Run \`make' in that directory to run these through (pdf)latex" \ "(use \`make latexpdf' here to do that automatically)." .PHONY: latexpdf latexpdf: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo "Running LaTeX files through pdflatex..." $(MAKE) -C $(BUILDDIR)/latex all-pdf @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." .PHONY: latexpdfja latexpdfja: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo "Running LaTeX files through platex and dvipdfmx..." $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." .PHONY: text text: $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text @echo @echo "Build finished. The text files are in $(BUILDDIR)/text." .PHONY: man man: $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man @echo @echo "Build finished. The manual pages are in $(BUILDDIR)/man." .PHONY: texinfo texinfo: $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo @echo @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." @echo "Run \`make' in that directory to run these through makeinfo" \ "(use \`make info' here to do that automatically)." .PHONY: info info: $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo @echo "Running Texinfo files through makeinfo..." make -C $(BUILDDIR)/texinfo info @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." .PHONY: gettext gettext: $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale @echo @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." .PHONY: changes changes: $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes @echo @echo "The overview file is in $(BUILDDIR)/changes." .PHONY: linkcheck linkcheck: $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck @echo @echo "Link check complete; look for any errors in the above output " \ "or in $(BUILDDIR)/linkcheck/output.txt." .PHONY: doctest doctest: $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest @echo "Testing of doctests in the sources finished, look at the " \ "results in $(BUILDDIR)/doctest/output.txt." .PHONY: coverage coverage: $(SPHINXBUILD) -b coverage $(ALLSPHINXOPTS) $(BUILDDIR)/coverage @echo "Testing of coverage in the sources finished, look at the " \ "results in $(BUILDDIR)/coverage/python.txt." .PHONY: xml xml: $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml @echo @echo "Build finished. The XML files are in $(BUILDDIR)/xml." .PHONY: pseudoxml pseudoxml: $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml @echo @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml." .PHONY: livehtml livehtml: sphinx-autobuild -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html tahoe_lafs-1.20.0/docs/Makefile-old0000644000000000000000000000060113615410400013760 0ustar00 SOURCES = subtree1.svg lease-tradeoffs.svg PNGS = $(patsubst %.svg,%.png,$(SOURCES)) EPSS = $(patsubst %.svg,%.eps,$(SOURCES)) .PHONY: images-png images-eps all: $(PNGS) $(EPSS) images-png: $(PNGS) images-eps: $(EPSS) %.png: %.svg inkscape -b white -d 90 -D --export-png $@ $< %.eps: %.svg inkscape --export-eps $@ $< %.html: %.rst rst2html.py $< $@ clean: rm -f *.png *.eps tahoe_lafs-1.20.0/docs/README.txt0000644000000000000000000000633313615410400013252 0ustar00If you are reading Tahoe-LAFS documentation ------------------------------------------- If you are reading Tahoe-LAFS documentation at a code hosting site or from a checked-out source tree, the preferred place to view the docs is http://tahoe-lafs.readthedocs.io/en/latest/. Code-hosting sites do not render cross-document links or images correctly. If you are writing Tahoe-LAFS documentation ------------------------------------------- To edit Tahoe-LAFS docs, you will need a checked-out source tree. You can edit the `.rst` files in this directory using a text editor, and then generate HTML output using Sphinx, a program that can produce its output in HTML and other formats. Files with `.rst` extension use reStructuredText markup format, which is the format Sphinx natively handles. To learn more about Sphinx, and for a friendly primer on reStructuredText, please see Sphinx project's documentation, available at: https://www.sphinx-doc.org/ If you have `tox` installed, you can run `tox -e docs` and then open the resulting docs/_build/html/index.html in your web browser. Note that Sphinx can also process Python docstrings to generate API documentation. Tahoe-LAFS currently does not use Sphinx for this purpose. Organizing Tahoe-LAFS documentation ----------------------------------- Tahoe-LAFS documentation has been a mishmash of many things that are useful to many people, with little organization, and, as a result, confusing and hard-to-approach. We are working on improving this. It is reasonable to expect that documentation files in "docs" directory will serve different and possibly overlapping groups of readers, so the top-level sections are organized based on the likely needs of those almost-distinct groups. We have: (a) New and experienced users of Tahoe-LAFS, who mainly need an operating manual to the software. Notes under the section titled "Getting Started with Tahoe-LAFS" will be the most useful to them. (b) Project contributors, both new and experienced. This group includes developers, issue reporters, and documentation writers. It will help this group to have the project's processes and guidelines written down. The section titled "Contributing to Tahoe-LAFS" is meant to be useful for this group. (c) Those who want to know various implementation details about the project. This group might include people who are mainly curious and those who want change things. We could expect an overlap between members of group (a) who want to know more and members of group (b). The sections titled "Tahoe-LAFS in Depth" and "Specifications" could be of interest to them. (d) There's also the broader community. This includes people with a general interest in Tahoe-LAFS project, and people from groups both (a) and (b). They will find "Notes of Community Interest" useful. When you add new content or remove old content to Tahoe-LAFS docs, it would be helpful to organize your changes with the above-stated groups of readers in mind. This directory also contains old notes that are mainly of historical interest, under the section titled "Notes of Historical Interest". Those could be removed someday, after sufficient consideration. tahoe_lafs-1.20.0/docs/about-tahoe.rst0000644000000000000000000001524213615410400014515 0ustar00.. -*- coding: utf-8-with-signature -*- ********************** Welcome to Tahoe-LAFS! ********************** What is Tahoe-LAFS? =================== Welcome to Tahoe-LAFS_, the first decentralized storage system with *provider-independent security*. Tahoe-LAFS is a system that helps you to store files. You run a client program on your computer, which talks to one or more storage servers on other computers. When you tell your client to store a file, it will encrypt that file, encode it into multiple pieces, then spread those pieces out among multiple servers. The pieces are all encrypted and protected against modifications. Later, when you ask your client to retrieve the file, it will find the necessary pieces, make sure they haven't been corrupted, reassemble them, and decrypt the result. The client creates more pieces (or "shares") than it will eventually need, so even if some of the servers fail, you can still get your data back. Corrupt shares are detected and ignored, so the system can tolerate server-side hard-drive errors. All files are encrypted (with a unique key) before uploading, so even a malicious server operator cannot read your data. The only thing you ask of the servers is that they can (usually) provide the shares when you ask for them: you aren't relying upon them for confidentiality, integrity, or absolute availability. .. _Tahoe-LAFS: https://tahoe-lafs.org What is "provider-independent security"? ======================================== Every seller of cloud storage services will tell you that their service is "secure". But what they mean by that is something fundamentally different from what we mean. What they mean by "secure" is that after you've given them the power to read and modify your data, they try really hard not to let this power be abused. This turns out to be difficult! Bugs, misconfigurations, or operator error can accidentally expose your data to another customer or to the public, or can corrupt your data. Criminals routinely gain illicit access to corporate servers. Even more insidious is the fact that the employees themselves sometimes violate customer privacy out of carelessness, avarice, or mere curiosity. The most conscientious of these service providers spend considerable effort and expense trying to mitigate these risks. What we mean by "security" is something different. *The service provider never has the ability to read or modify your data in the first place: never.* If you use Tahoe-LAFS, then all of the threats described above are non-issues to you. Not only is it easy and inexpensive for the service provider to maintain the security of your data, but in fact they couldn't violate its security if they tried. This is what we call *provider-independent security*. This guarantee is integrated naturally into the Tahoe-LAFS storage system and doesn't require you to perform a manual pre-encryption step or cumbersome key management. (After all, having to do cumbersome manual operations when storing or accessing your data would nullify one of the primary benefits of using cloud storage in the first place: convenience.) Here's how it works: .. image:: network-and-reliance-topology.svg A "storage grid" is made up of a number of storage servers. A storage server has direct attached storage (typically one or more hard disks). A "gateway" communicates with storage nodes, and uses them to provide access to the grid over protocols such as HTTP(S) and SFTP. Note that you can find "client" used to refer to gateway nodes (which act as a client to storage servers), and also to processes or programs connecting to a gateway node and performing operations on the grid -- for example, a CLI command, Web browser, or SFTP client. Users do not rely on storage servers to provide *confidentiality* nor *integrity* for their data -- instead all of the data is encrypted and integrity-checked by the gateway, so that the servers can neither read nor modify the contents of the files. Users do rely on storage servers for *availability*. The ciphertext is erasure-coded into ``N`` shares distributed across at least ``H`` distinct storage servers (the default value for ``N`` is 10 and for ``H`` is 7) so that it can be recovered from any ``K`` of these servers (the default value of ``K`` is 3). Therefore only the failure of ``H-K+1`` (with the defaults, 5) servers can make the data unavailable. In the typical deployment mode each user runs her own gateway on her own machine. This way she relies on her own machine for the confidentiality and integrity of the data. An alternate deployment mode is that the gateway runs on a remote machine and the user connects to it over HTTPS or SFTP. This means that the operator of the gateway can view and modify the user's data (the user *relies on* the gateway for confidentiality and integrity), but the advantage is that the user can access the Tahoe-LAFS grid with a client that doesn't have the gateway software installed, such as an Internet kiosk or cell phone. Access Control ============== There are two kinds of files: immutable and mutable. When you upload a file to the storage grid you can choose which kind of file it will be in the grid. Immutable files can't be modified once they have been uploaded. A mutable file can be modified by someone with read-write access to it. A user can have read-write access to a mutable file or read-only access to it, or no access to it at all. A user who has read-write access to a mutable file or directory can give another user read-write access to that file or directory, or they can give read-only access to that file or directory. A user who has read-only access to a file or directory can give another user read-only access to it. When linking a file or directory into a parent directory, you can use a read-write link or a read-only link. If you use a read-write link, then anyone who has read-write access to the parent directory can gain read-write access to the child, and anyone who has read-only access to the parent directory can gain read-only access to the child. If you use a read-only link, then anyone who has either read-write or read-only access to the parent directory can gain read-only access to the child. For more technical detail, please see the `the doc page`_ on the Wiki. .. _the doc page: https://tahoe-lafs.org/trac/tahoe-lafs/wiki/Doc Get Started =========== To use Tahoe-LAFS, please see :doc:`Installing Tahoe-LAFS <../Installation/install-tahoe>`. License ======= Tahoe-LAFS is an open-source project; please see the `top-level README`_ for details. .. this is really ../README.rst, but it's not included in the Sphinx build so we can't link to it normally .. _top-level README: https://github.com/tahoe-lafs/tahoe-lafs/blob/master/README.rst tahoe_lafs-1.20.0/docs/accepting-donations.rst0000644000000000000000000000531513615410400016236 0ustar00======================== Storage Server Donations ======================== The following is a configuration convention which allows users to anonymously support the operators of storage servers. Donations are made using `Zcash shielded transactions`_ to limit the amount of personal information incidentally conveyed. Sending Donations ================= To support a storage server following this convention, you need several things: * a Zcash wallet capable of sending shielded transactions (at least until Zcash 1.1.1 this requires a Zcash full node) * a shielded address with sufficient balance * a running Tahoe-LAFS client node which knows about the recipient storage server For additional protection, you may also wish to operate your Zcash wallet and full node using Tor. Find Zcash Shielded Address --------------------------- To find an address at which a storage server operator wishes to receive donations, launch the Tahoe-LAFS web UI:: $ tahoe webopen Inspect the page for the storage server area. This will have a heading like *Connected to N of M known storage servers*. Each storage server in this section will have a nickname. A storage server with a nickname beginning with ``zcash:`` is signaling it accepts Zcash donations. Copy the full address following the ``zcash:`` prefix and save it for the next step. This is the donation address. Donation addresses beginning with ``z`` are shielded. It is recommended that all donations be sent from and to shielded addresses. Send the Donation ----------------- First, select a donation amount. Next, use a Zcash wallet to send the selected amount to the donation address. Using the Zcash cli wallet, this can be done with commands like:: $ DONATION_ADDRESS="..." $ AMOUNT="..." $ YOUR_ADDRESS="..." $ zcash-cli z_sendmany $YOUR_ADDRESS "[{\"address\": \"$DONATION_ADDRESS\", \"amount\": $AMOUNT}]" Remember that you must also have funds to pay the transaction fee (which defaults to 0.0001 ZEC in mid-2018). Receiving Donations =================== To receive donations from users following this convention, you need the following: * a Zcash shielded address Configuring Tahoe-LAFS ---------------------- The Zcash shielded address is placed in the storage server's ``nickname`` field. Edit ``tahoe.cfg`` and edit the ``nickname`` field in the ``node`` section like so:: [node] nickname = zcash:zcABCDEF.... Then restart the storage server. Further Reading =============== To acquaint yourself with the security and privacy properties of Zcash, refer to the `Zcash documentation`_. .. _Zcash shielded transactions: https://z.cash/support/security/privacy-security-recommendations.html#transaction .. _Zcash documentation: http://zcash.readthedocs.io/en/latest/ tahoe_lafs-1.20.0/docs/anonymity-configuration.rst0000644000000000000000000004360013615410400017200 0ustar00.. -*- coding: utf-8-with-signature; fill-column: 77 -*- ====================================================== Using Tahoe-LAFS with an anonymizing network: Tor, I2P ====================================================== #. `Overview`_ #. `Use cases`_ #. `Software Dependencies`_ #. `Tor`_ #. `I2P`_ #. `Connection configuration`_ #. `Anonymity configuration`_ #. `Client anonymity`_ #. `Server anonymity, manual configuration`_ #. `Server anonymity, automatic configuration`_ #. `Performance and security issues`_ Overview ======== Tor is an anonymizing network used to help hide the identity of internet clients and servers. Please see the Tor Project's website for more information: https://www.torproject.org/ I2P is a decentralized anonymizing network that focuses on end-to-end anonymity between clients and servers. Please see the I2P website for more information: https://geti2p.net/ Use cases ========= There are three potential use-cases for Tahoe-LAFS on the client side: 1. User wishes to always use an anonymizing network (Tor, I2P) to protect their anonymity when connecting to Tahoe-LAFS storage grids (whether or not the storage servers are anonymous). 2. User does not care to protect their anonymity but they wish to connect to Tahoe-LAFS storage servers which are accessible only via Tor Hidden Services or I2P. * Tor is only used if a server connection hint uses ``tor:``. These hints generally have a ``.onion`` address. * I2P is only used if a server connection hint uses ``i2p:``. These hints generally have a ``.i2p`` address. 3. User does not care to protect their anonymity or to connect to anonymous storage servers. This document is not useful to you... so stop reading. For Tahoe-LAFS storage servers there are three use-cases: 1. The operator wishes to protect their anonymity by making their Tahoe server accessible only over I2P, via Tor Hidden Services, or both. 2. The operator does not *require* anonymity for the storage server, but they want it to be available over both publicly routed TCP/IP and through an anonymizing network (I2P, Tor Hidden Services). One possible reason to do this is because being reachable through an anonymizing network is a convenient way to bypass NAT or firewall that prevents publicly routed TCP/IP connections to your server (for clients capable of connecting to such servers). Another is that making your storage server reachable through an anonymizing network can provide better protection for your clients who themselves use that anonymizing network to protect their anonymity. 3. Storage server operator does not care to protect their own anonymity nor to help the clients protect theirs. Stop reading this document and run your Tahoe-LAFS storage server using publicly routed TCP/IP. See this Tor Project page for more information about Tor Hidden Services: https://www.torproject.org/docs/hidden-services.html.en See this I2P Project page for more information about I2P: https://geti2p.net/en/about/intro Software Dependencies ===================== Tor --- Clients who wish to connect to Tor-based servers must install the following. * Tor (tor) must be installed. See here: https://www.torproject.org/docs/installguide.html.en . On Debian/Ubuntu, use ``apt-get install tor``. You can also install and run the Tor Browser Bundle. * Tahoe-LAFS must be installed with the ``[tor]`` "extra" enabled. This will install ``txtorcon`` :: pip install tahoe-lafs[tor] Manually-configured Tor-based servers must install Tor, but do not need ``txtorcon`` or the ``[tor]`` extra. Automatic configuration, when implemented, will need these, just like clients. I2P --- Clients who wish to connect to I2P-based servers must install the following. As with Tor, manually-configured I2P-based servers need the I2P daemon, but no special Tahoe-side supporting libraries. * I2P must be installed. See here: https://geti2p.net/en/download * The SAM API must be enabled. * Start I2P. * Visit http://127.0.0.1:7657/configclients in your browser. * Under "Client Configuration", check the "Run at Startup?" box for "SAM application bridge". * Click "Save Client Configuration". * Click the "Start" control for "SAM application bridge", or restart I2P. * Tahoe-LAFS must be installed with the ``[i2p]`` extra enabled, to get ``txi2p`` :: pip install tahoe-lafs[i2p] Both Tor and I2P ---------------- Clients who wish to connect to both Tor- and I2P-based servers must install all of the above. In particular, Tahoe-LAFS must be installed with both extras enabled:: pip install tahoe-lafs[tor,i2p] Connection configuration ======================== See :ref:`Connection Management` for a description of the ``[tor]`` and ``[i2p]`` sections of ``tahoe.cfg``. These control how the Tahoe client will connect to a Tor/I2P daemon, and thus make connections to Tor/I2P -based servers. The ``[tor]`` and ``[i2p]`` sections only need to be modified to use unusual configurations, or to enable automatic server setup. The default configuration will attempt to contact a local Tor/I2P daemon listening on the usual ports (9050/9150 for Tor, 7656 for I2P). As long as there is a daemon running on the local host, and the necessary support libraries were installed, clients will be able to use Tor-based servers without any special configuration. However note that this default configuration does not improve the client's anonymity: normal TCP connections will still be made to any server that offers a regular address (it fulfills the second client use case above, not the third). To protect their anonymity, users must configure the ``[connections]`` section as follows:: [connections] tcp = tor With this in place, the client will use Tor (instead of an IP-address -revealing direct connection) to reach TCP-based servers. Anonymity configuration ======================= Tahoe-LAFS provides a configuration "safety flag" for explicitly stating whether or not IP-address privacy is required for a node:: [node] reveal-IP-address = (boolean, optional) When ``reveal-IP-address = False``, Tahoe-LAFS will refuse to start if any of the configuration options in ``tahoe.cfg`` would reveal the node's network location: * ``[connections] tcp = tor`` is required: otherwise the client would make direct connections to the Introducer, or any TCP-based servers it learns from the Introducer, revealing its IP address to those servers and a network eavesdropper. With this in place, Tahoe-LAFS will only make outgoing connections through a supported anonymizing network. * ``tub.location`` must either be disabled, or contain safe values. This value is advertised to other nodes via the Introducer: it is how a server advertises it's location so clients can connect to it. In private mode, it is an error to include a ``tcp:`` hint in ``tub.location``. Private mode rejects the default value of ``tub.location`` (when the key is missing entirely), which is ``AUTO``, which uses ``ifconfig`` to guess the node's external IP address, which would reveal it to the server and other clients. This option is **critical** to preserving the client's anonymity (client use-case 3 from `Use cases`_, above). It is also necessary to preserve a server's anonymity (server use-case 3). This flag can be set (to False) by providing the ``--hide-ip`` argument to the ``create-node``, ``create-client``, or ``create-introducer`` commands. Note that the default value of ``reveal-IP-address`` is True, because unfortunately hiding the node's IP address requires additional software to be installed (as described above), and reduces performance. Client anonymity ---------------- To configure a client node for anonymity, ``tahoe.cfg`` **must** contain the following configuration flags:: [node] reveal-IP-address = False tub.port = disabled tub.location = disabled Once the Tahoe-LAFS node has been restarted, it can be used anonymously (client use-case 3). Server anonymity, manual configuration -------------------------------------- To configure a server node to listen on an anonymizing network, we must first configure Tor to run an "Onion Service", and route inbound connections to the local Tahoe port. Then we configure Tahoe to advertise the ``.onion`` address to clients. We also configure Tahoe to not make direct TCP connections. * Decide on a local listening port number, named PORT. This can be any unused port from about 1024 up to 65535 (depending upon the host's kernel/network config). We will tell Tahoe to listen on this port, and we'll tell Tor to route inbound connections to it. * Decide on an external port number, named VIRTPORT. This will be used in the advertised location, and revealed to clients. It can be any number from 1 to 65535. It can be the same as PORT, if you like. * Decide on a "hidden service directory", usually in ``/var/lib/tor/NAME``. We'll be asking Tor to save the onion-service state here, and Tor will write the ``.onion`` address here after it is generated. Then, do the following: * Create the Tahoe server node (with ``tahoe create-node``), but do **not** launch it yet. * Edit the Tor config file (typically in ``/etc/tor/torrc``). We need to add a section to define the hidden service. If our PORT is 2000, VIRTPORT is 3000, and we're using ``/var/lib/tor/tahoe`` as the hidden service directory, the section should look like:: HiddenServiceDir /var/lib/tor/tahoe HiddenServicePort 3000 127.0.0.1:2000 * Restart Tor, with ``systemctl restart tor``. Wait a few seconds. * Read the ``hostname`` file in the hidden service directory (e.g. ``/var/lib/tor/tahoe/hostname``). This will be a ``.onion`` address, like ``u33m4y7klhz3b.onion``. Call this ONION. * Edit ``tahoe.cfg`` to set ``tub.port`` to use ``tcp:PORT:interface=127.0.0.1``, and ``tub.location`` to use ``tor:ONION.onion:VIRTPORT``. Using the examples above, this would be:: [node] reveal-IP-address = false tub.port = tcp:2000:interface=127.0.0.1 tub.location = tor:u33m4y7klhz3b.onion:3000 [connections] tcp = tor * Launch the Tahoe server with ``tahoe run $NODEDIR`` The ``tub.port`` section will cause the Tahoe server to listen on PORT, but bind the listening socket to the loopback interface, which is not reachable from the outside world (but *is* reachable by the local Tor daemon). Then the ``tcp = tor`` section causes Tahoe to use Tor when connecting to the Introducer, hiding it's IP address. The node will then announce itself to all clients using ``tub.location``, so clients will know that they must use Tor to reach this server (and not revealing it's IP address through the announcement). When clients connect to the onion address, their packets will flow through the anonymizing network and eventually land on the local Tor daemon, which will then make a connection to PORT on localhost, which is where Tahoe is listening for connections. Follow a similar process to build a Tahoe server that listens on I2P. The same process can be used to listen on both Tor and I2P (``tub.location = tor:ONION.onion:VIRTPORT,i2p:ADDR.i2p``). It can also listen on both Tor and plain TCP (use-case 2), with ``tub.port = tcp:PORT``, ``tub.location = tcp:HOST:PORT,tor:ONION.onion:VIRTPORT``, and ``anonymous = false`` (and omit the ``tcp = tor`` setting, as the address is already being broadcast through the location announcement). Server anonymity, automatic configuration ----------------------------------------- To configure a server node to listen on an anonymizing network, create the node with the ``--listen=tor`` option. This requires a Tor configuration that either launches a new Tor daemon, or has access to the Tor control port (and enough authority to create a new onion service). On Debian/Ubuntu systems, do ``apt install tor``, add yourself to the control group with ``adduser YOURUSERNAME debian-tor``, and then logout and log back in: if the ``groups`` command includes ``debian-tor`` in the output, you should have permission to use the unix-domain control port at ``/var/run/tor/control``. This option will set ``reveal-IP-address = False`` and ``[connections] tcp = tor``. It will allocate the necessary ports, instruct Tor to create the onion service (saving the private key somewhere inside NODEDIR/private/), obtain the ``.onion`` address, and populate ``tub.port`` and ``tub.location`` correctly. Performance and security issues =============================== If you are running a server which does not itself need to be anonymous, should you make it reachable via an anonymizing network or not? Or should you make it reachable *both* via an anonymizing network and as a publicly traceable TCP/IP server? There are several trade-offs effected by this decision. NAT/Firewall penetration ------------------------ Making a server be reachable via Tor or I2P makes it reachable (by Tor/I2P-capable clients) even if there are NATs or firewalls preventing direct TCP/IP connections to the server. Anonymity --------- Making a Tahoe-LAFS server accessible *only* via Tor or I2P can be used to guarantee that the Tahoe-LAFS clients use Tor or I2P to connect (specifically, the server should only advertise Tor/I2P addresses in the ``tub.location`` config key). This prevents misconfigured clients from accidentally de-anonymizing themselves by connecting to your server through the traceable Internet. Clearly, a server which is available as both a Tor/I2P service *and* a regular TCP address is not itself anonymous: the .onion address and the real IP address of the server are easily linkable. Also, interaction, through Tor, with a Tor Hidden Service may be more protected from network traffic analysis than interaction, through Tor, with a publicly traceable TCP/IP server. **XXX is there a document maintained by Tor developers which substantiates or refutes this belief? If so we need to link to it. If not, then maybe we should explain more here why we think this?** Linkability ----------- As of 1.12.0, the node uses a single persistent Tub key for outbound connections to the Introducer, and inbound connections to the Storage Server (and Helper). For clients, a new Tub key is created for each storage server we learn about, and these keys are *not* persisted (so they will change each time the client reboots). Clients traversing directories (from rootcap to subdirectory to filecap) are likely to request the same storage-indices (SIs) in the same order each time. A client connected to multiple servers will ask them all for the same SI at about the same time. And two clients which are sharing files or directories will visit the same SIs (at various times). As a result, the following things are linkable, even with ``reveal-IP-address = false``: * Storage servers can link recognize multiple connections from the same not-yet-rebooted client. (Note that the upcoming Accounting feature may cause clients to present a persistent client-side public key when connecting, which will be a much stronger linkage). * Storage servers can probably deduce which client is accessing data, by looking at the SIs being requested. Multiple servers can collude to determine that the same client is talking to all of them, even though the TubIDs are different for each connection. * Storage servers can deduce when two different clients are sharing data. * The Introducer could deliver different server information to each subscribed client, to partition clients into distinct sets according to which server connections they eventually make. For client+server nodes, it can also correlate the server announcement with the deduced client identity. Performance ----------- A client connecting to a publicly traceable Tahoe-LAFS server through Tor incurs substantially higher latency and sometimes worse throughput than the same client connecting to the same server over a normal traceable TCP/IP connection. When the server is on a Tor Hidden Service, it incurs even more latency, and possibly even worse throughput. Connecting to Tahoe-LAFS servers which are I2P servers incurs higher latency and worse throughput too. Positive and negative effects on other Tor users ------------------------------------------------ Sending your Tahoe-LAFS traffic over Tor adds cover traffic for other Tor users who are also transmitting bulk data. So that is good for them -- increasing their anonymity. However, it makes the performance of other Tor users' interactive sessions -- e.g. ssh sessions -- much worse. This is because Tor doesn't currently have any prioritization or quality-of-service features, so someone else's ssh keystrokes may have to wait in line while your bulk file contents get transmitted. The added delay might make other people's interactive sessions unusable. Both of these effects are doubled if you upload or download files to a Tor Hidden Service, as compared to if you upload or download files over Tor to a publicly traceable TCP/IP server. Positive and negative effects on other I2P users ------------------------------------------------ Sending your Tahoe-LAFS traffic over I2P adds cover traffic for other I2P users who are also transmitting data. So that is good for them -- increasing their anonymity. It will not directly impair the performance of other I2P users' interactive sessions, because the I2P network has several congestion control and quality-of-service features, such as prioritizing smaller packets. However, if many users are sending Tahoe-LAFS traffic over I2P, and do not have their I2P routers configured to participate in much traffic, then the I2P network as a whole will suffer degradation. Each Tahoe-LAFS router using I2P has their own anonymizing tunnels that their data is sent through. On average, one Tahoe-LAFS node requires 12 other I2P routers to participate in their tunnels. It is therefore important that your I2P router is sharing bandwidth with other routers, so that you can give back as you use I2P. This will never impair the performance of your Tahoe-LAFS node, because your I2P router will always prioritize your own traffic. tahoe_lafs-1.20.0/docs/architecture.rst0000644000000000000000000007434413615410400014777 0ustar00.. -*- coding: utf-8-with-signature -*- ======================= Tahoe-LAFS Architecture ======================= 1. `Overview`_ 2. `The Key-Value Store`_ 3. `File Encoding`_ 4. `Capabilities`_ 5. `Server Selection`_ 6. `Swarming Download, Trickling Upload`_ 7. `The File Store Layer`_ 8. `Leases, Refreshing, Garbage Collection`_ 9. `File Repairer`_ 10. `Security`_ 11. `Reliability`_ Overview ======== (See the `docs/specifications directory`_ for more details.) There are three layers: the key-value store, the file store, and the application. The lowest layer is the key-value store. The keys are "capabilities" -- short ASCII strings -- and the values are sequences of data bytes. This data is encrypted and distributed across a number of nodes, such that it will survive the loss of most of the nodes. There are no hard limits on the size of the values, but there may be performance issues with extremely large values (just due to the limitation of network bandwidth). In practice, values as small as a few bytes and as large as tens of gigabytes are in common use. The middle layer is the decentralized file store: a directed graph in which the intermediate nodes are directories and the leaf nodes are files. The leaf nodes contain only the data -- they contain no metadata other than the length in bytes. The edges leading to leaf nodes have metadata attached to them about the file they point to. Therefore, the same file may be associated with different metadata if it is referred to through different edges. The top layer consists of the applications using the file store. Allmydata.com used it for a backup service: the application periodically copies files from the local disk onto the decentralized file store. We later provide read-only access to those files, allowing users to recover them. There are several other applications built on top of the Tahoe-LAFS file store (see the RelatedProjects_ page of the wiki for a list). .. _docs/specifications directory: https://github.com/tahoe-lafs/tahoe-lafs/tree/master/docs/specifications .. _RelatedProjects: https://tahoe-lafs.org/trac/tahoe-lafs/wiki/RelatedProjects The Key-Value Store =================== The key-value store is implemented by a grid of Tahoe-LAFS storage servers -- user-space processes. Tahoe-LAFS storage clients communicate with the storage servers over TCP. There are two supported protocols: * Foolscap, the only supported protocol in release before v1.19. * HTTPS, new in v1.19. By default HTTPS is enabled. When HTTPS is enabled on the server, the server transparently listens for both Foolscap and HTTPS on the same port. When it is disabled, the server only supports Foolscap. Clients can use either; by default they will use HTTPS when possible, falling back to I2p, but when configured appropriately they will only use Foolscap. At this time the only limitations of HTTPS is that I2P is not supported, so any usage of I2P only uses Foolscap. Storage servers hold data in the form of "shares". Shares are encoded pieces of files. There are a configurable number of shares for each file, 10 by default. Normally, each share is stored on a separate server, but in some cases a single server can hold multiple shares of a file. Nodes learn about each other through an "introducer". Each server connects to the introducer at startup and announces its presence. Each client connects to the introducer at startup, and receives a list of all servers from it. Each client then connects to every server, creating a "bi-clique" topology. In the current release, nodes behind NAT boxes will connect to all nodes that they can open connections to, but they cannot open connections to other nodes behind NAT boxes. Therefore, the more nodes behind NAT boxes, the less the topology resembles the intended bi-clique topology. The introducer is a Single Point of Failure ("SPoF"), in that clients who never connect to the introducer will be unable to connect to any storage servers, but once a client has been introduced to everybody, it does not need the introducer again until it is restarted. The danger of a SPoF is further reduced in two ways. First, the introducer is defined by a hostname and a private key, which are easy to move to a new host in case the original one suffers an unrecoverable hardware problem. Second, even if the private key is lost, clients can be reconfigured to use a new introducer. For future releases, we have plans to decentralize introduction, allowing any server to tell a new client about all the others. File Encoding ============= When a client stores a file on the grid, it first encrypts the file. It then breaks the encrypted file into small segments, in order to reduce the memory footprint, and to decrease the lag between initiating a download and receiving the first part of the file; for example the lag between hitting "play" and a movie actually starting. The client then erasure-codes each segment, producing blocks of which only a subset are needed to reconstruct the segment (3 out of 10, with the default settings). It sends one block from each segment to a given server. The set of blocks on a given server constitutes a "share". Therefore a subset of the shares (3 out of 10, by default) are needed to reconstruct the file. A hash of the encryption key is used to form the "storage index", which is used for both server selection (described below) and to index shares within the Storage Servers on the selected nodes. The client computes secure hashes of the ciphertext and of the shares. It uses `Merkle Trees`_ so that it is possible to verify the correctness of a subset of the data without requiring all of the data. For example, this allows you to verify the correctness of the first segment of a movie file and then begin playing the movie file in your movie viewer before the entire movie file has been downloaded. These hashes are stored in a small datastructure named the Capability Extension Block which is stored on the storage servers alongside each share. The capability contains the encryption key, the hash of the Capability Extension Block, and any encoding parameters necessary to perform the eventual decoding process. For convenience, it also contains the size of the file being stored. To download, the client that wishes to turn a capability into a sequence of bytes will obtain the blocks from storage servers, use erasure-decoding to turn them into segments of ciphertext, use the decryption key to convert that into plaintext, then emit the plaintext bytes to the output target. .. _`Merkle Trees`: http://systems.cs.colorado.edu/grunwald/Classes/Fall2003-InformationStorage/Papers/merkle-tree.pdf Capabilities ============ Capabilities to immutable files represent a specific set of bytes. Think of it like a hash function: you feed in a bunch of bytes, and you get out a capability, which is deterministically derived from the input data: changing even one bit of the input data will result in a completely different capability. Read-only capabilities to mutable files represent the ability to get a set of bytes representing some version of the file, most likely the latest version. Each read-only capability is unique. In fact, each mutable file has a unique public/private key pair created when the mutable file is created, and the read-only capability to that file includes a secure hash of the public key. Read-write capabilities to mutable files represent the ability to read the file (just like a read-only capability) and also to write a new version of the file, overwriting any extant version. Read-write capabilities are unique -- each one includes the secure hash of the private key associated with that mutable file. The capability provides both "location" and "identification": you can use it to retrieve a set of bytes, and then you can use it to validate ("identify") that these potential bytes are indeed the ones that you were looking for. The "key-value store" layer doesn't include human-meaningful names. Capabilities sit on the "global+secure" edge of `Zooko's Triangle`_. They are self-authenticating, meaning that nobody can trick you into accepting a file that doesn't match the capability you used to refer to that file. The file store layer (described below) adds human-meaningful names atop the key-value layer. .. _`Zooko's Triangle`: https://en.wikipedia.org/wiki/Zooko%27s_triangle Server Selection ================ When a file is uploaded, the encoded shares are sent to some servers. But to which ones? The "server selection" algorithm is used to make this choice. The storage index is used to consistently-permute the set of all servers nodes (by sorting them by ``HASH(storage_index+nodeid)``). Each file gets a different permutation, which (on average) will evenly distribute shares among the grid and avoid hotspots. Each server has announced its available space when it connected to the introducer, and we use that available space information to remove any servers that cannot hold an encoded share for our file. Then we ask some of the servers thus removed if they are already holding any encoded shares for our file; we use this information later. (We ask any servers which are in the first 2*``N`` elements of the permuted list.) We then use the permuted list of servers to ask each server, in turn, if it will hold a share for us (a share that was not reported as being already present when we talked to the full servers earlier, and that we have not already planned to upload to a different server). We plan to send a share to a server by sending an 'allocate_buckets() query' to the server with the number of that share. Some will say yes they can hold that share, others (those who have become full since they announced their available space) will say no; when a server refuses our request, we take that share to the next server on the list. In the response to allocate_buckets() the server will also inform us of any shares of that file that it already has. We keep going until we run out of shares that need to be stored. At the end of the process, we'll have a table that maps each share number to a server, and then we can begin the encode and push phase, using the table to decide where each share should be sent. Most of the time, this will result in one share per server, which gives us maximum reliability. If there are fewer writable servers than there are unstored shares, we'll be forced to loop around, eventually giving multiple shares to a single server. If we have to loop through the node list a second time, we accelerate the query process, by asking each node to hold multiple shares on the second pass. In most cases, this means we'll never send more than two queries to any given node. If a server is unreachable, or has an error, or refuses to accept any of our shares, we remove it from the permuted list, so we won't query it again for this file. If a server already has shares for the file we're uploading, we add that information to the share-to-server table. This lets us do less work for files which have been uploaded once before, while making sure we still wind up with as many shares as we desire. Before a file upload is called successful, it has to pass an upload health check. For immutable files, we check to see that a condition called 'servers-of-happiness' is satisfied. When satisfied, 'servers-of-happiness' assures us that enough pieces of the file are distributed across enough servers on the grid to ensure that the availability of the file will not be affected if a few of those servers later fail. For mutable files and directories, we check to see that all of the encoded shares generated during the upload process were successfully placed on the grid. This is a weaker check than 'servers-of-happiness'; it does not consider any information about how the encoded shares are placed on the grid, and cannot detect situations in which all or a majority of the encoded shares generated during the upload process reside on only one storage server. We hope to extend 'servers-of-happiness' to mutable files in a future release of Tahoe-LAFS. If, at the end of the upload process, the appropriate upload health check fails, the upload is considered a failure. The current defaults use ``k`` = 3, ``servers_of_happiness`` = 7, and ``N`` = 10. ``N`` = 10 means that we'll try to place 10 shares. ``k`` = 3 means that we need any three shares to recover the file. ``servers_of_happiness`` = 7 means that we'll consider an immutable file upload to be successful if we can place shares on enough servers that there are 7 different servers, the correct functioning of any ``k`` of which guarantee the availability of the immutable file. ``N`` = 10 and ``k`` = 3 means there is a 3.3x expansion factor. On a small grid, you should set ``N`` about equal to the number of storage servers in your grid; on a large grid, you might set it to something smaller to avoid the overhead of contacting every server to place a file. In either case, you should then set ``k`` such that ``N``/``k`` reflects your desired availability goals. The best value for ``servers_of_happiness`` will depend on how you use Tahoe-LAFS. In a friendnet with a variable number of servers, it might make sense to set it to the smallest number of servers that you expect to have online and accepting shares at any given time. In a stable environment without much server churn, it may make sense to set ``servers_of_happiness`` = ``N``. When downloading a file, the current version just asks all known servers for any shares they might have. Once it has received enough responses that it knows where to find the needed k shares, it downloads at least the first segment from those servers. This means that it tends to download shares from the fastest servers. If some servers had more than one share, it will continue sending "Do You Have Block" requests to other servers, so that it can download subsequent segments from distinct servers (sorted by their DYHB round-trip times), if possible. *future work* A future release will use the server selection algorithm to reduce the number of queries that must be sent out. Other peer-node selection algorithms are possible. One earlier version (known as "Tahoe 3") used the permutation to place the nodes around a large ring, distributed the shares evenly around the same ring, then walked clockwise from 0 with a basket. Each time it encountered a share, it put it in the basket, each time it encountered a server, give it as many shares from the basket as they'd accept. This reduced the number of queries (usually to 1) for small grids (where ``N`` is larger than the number of nodes), but resulted in extremely non-uniform share distribution, which significantly hurt reliability (sometimes the permutation resulted in most of the shares being dumped on a single node). Another algorithm (known as "denver airport" [#naming]_) uses the permuted hash to decide on an approximate target for each share, then sends lease requests via Chord routing. The request includes the contact information of the uploading node, and asks that the node which eventually accepts the lease should contact the uploader directly. The shares are then transferred over direct connections rather than through multiple Chord hops. Download uses the same approach. This allows nodes to avoid maintaining a large number of long-term connections, at the expense of complexity and latency. .. [#naming] all of these names are derived from the location where they were concocted, in this case in a car ride from Boulder to DEN. To be precise, "Tahoe 1" was an unworkable scheme in which everyone who holds shares for a given file would form a sort of cabal which kept track of all the others, "Tahoe 2" is the first-100-nodes in the permuted hash described in this document, and "Tahoe 3" (or perhaps "Potrero hill 1") was the abandoned ring-with-many-hands approach. Swarming Download, Trickling Upload =================================== Because the shares being downloaded are distributed across a large number of nodes, the download process will pull from many of them at the same time. The current encoding parameters require 3 shares to be retrieved for each segment, which means that up to 3 nodes will be used simultaneously. For larger networks, 8-of-22 encoding could be used, meaning 8 nodes can be used simultaneously. This allows the download process to use the sum of the available nodes' upload bandwidths, resulting in downloads that take full advantage of the common 8x disparity between download and upload bandwith on modern ADSL lines. On the other hand, uploads are hampered by the need to upload encoded shares that are larger than the original data (3.3x larger with the current default encoding parameters), through the slow end of the asymmetric connection. This means that on a typical 8x ADSL line, uploading a file will take about 32 times longer than downloading it again later. Smaller expansion ratios can reduce this upload penalty, at the expense of reliability (see `Reliability`_, below). By using an "upload helper", this penalty is eliminated: the client does a 1x upload of encrypted data to the helper, then the helper performs encoding and pushes the shares to the storage servers. This is an improvement if the helper has significantly higher upload bandwidth than the client, so it makes the most sense for a commercially-run grid for which all of the storage servers are in a colo facility with high interconnect bandwidth. In this case, the helper is placed in the same facility, so the helper-to-storage-server bandwidth is huge. See :doc:`helper` for details about the upload helper. The File Store Layer ==================== The "file store" layer is responsible for mapping human-meaningful pathnames (directories and filenames) to pieces of data. The actual bytes inside these files are referenced by capability, but the file store layer is where the directory names, file names, and metadata are kept. The file store layer is a graph of directories. Each directory contains a table of named children. These children are either other directories or files. All children are referenced by their capability. A directory has two forms of capability: read-write caps and read-only caps. The table of children inside the directory has a read-write and read-only capability for each child. If you have a read-only capability for a given directory, you will not be able to access the read-write capability of its children. This results in "transitively read-only" directory access. By having two different capabilities, you can choose which you want to share with someone else. If you create a new directory and share the read-write capability for it with a friend, then you will both be able to modify its contents. If instead you give them the read-only capability, then they will *not* be able to modify the contents. Any capability that you receive can be linked in to any directory that you can modify, so very powerful shared+published directory structures can be built from these components. This structure enable individual users to have their own personal space, with links to spaces that are shared with specific other users, and other spaces that are globally visible. Leases, Refreshing, Garbage Collection ====================================== When a file or directory in the file store is no longer referenced, the space that its shares occupied on each storage server can be freed, making room for other shares. Tahoe-LAFS uses a garbage collection ("GC") mechanism to implement this space-reclamation process. Each share has one or more "leases", which are managed by clients who want the file/directory to be retained. The storage server accepts each share for a pre-defined period of time, and is allowed to delete the share if all of the leases are cancelled or allowed to expire. Garbage collection is not enabled by default: storage servers will not delete shares without being explicitly configured to do so. When GC is enabled, clients are responsible for renewing their leases on a periodic basis at least frequently enough to prevent any of the leases from expiring before the next renewal pass. See :doc:`garbage-collection` for further information, and for how to configure garbage collection. File Repairer ============= Shares may go away because the storage server hosting them has suffered a failure: either temporary downtime (affecting availability of the file), or a permanent data loss (affecting the preservation of the file). Hard drives crash, power supplies explode, coffee spills, and asteroids strike. The goal of a robust distributed file store is to survive these setbacks. To work against this slow, continual loss of shares, a File Checker is used to periodically count the number of shares still available for any given file. A more extensive form of checking known as the File Verifier can download the ciphertext of the target file and perform integrity checks (using strong hashes) to make sure the data is still intact. When the file is found to have decayed below some threshold, the File Repairer can be used to regenerate and re-upload the missing shares. These processes are conceptually distinct (the repairer is only run if the checker/verifier decides it is necessary), but in practice they will be closely related, and may run in the same process. The repairer process does not get the full capability of the file to be maintained: it merely gets the "repairer capability" subset, which does not include the decryption key. The File Verifier uses that data to find out which nodes ought to hold shares for this file, and to see if those nodes are still around and willing to provide the data. If the file is not healthy enough, the File Repairer is invoked to download the ciphertext, regenerate any missing shares, and upload them to new nodes. The goal of the File Repairer is to finish up with a full set of ``N`` shares. There are a number of engineering issues to be resolved here. The bandwidth, disk IO, and CPU time consumed by the verification/repair process must be balanced against the robustness that it provides to the grid. The nodes involved in repair will have very different access patterns than normal nodes, such that these processes may need to be run on hosts with more memory or network connectivity than usual. The frequency of repair will directly affect the resources consumed. In some cases, verification of multiple files can be performed at the same time, and repair of files can be delegated off to other nodes. *future work* Currently there are two modes of checking on the health of your file: "Checker" simply asks storage servers which shares they have and does nothing to try to verify that they aren't lying. "Verifier" downloads and cryptographically verifies every bit of every share of the file from every server, which costs a lot of network and CPU. A future improvement would be to make a random-sampling verifier which downloads and cryptographically verifies only a few randomly-chosen blocks from each server. This would require much less network and CPU but it could make it extremely unlikely that any sort of corruption -- even malicious corruption intended to evade detection -- would evade detection. This would be an instance of a cryptographic notion called "Proof of Retrievability". Note that to implement this requires no change to the server or to the cryptographic data structure -- with the current data structure and the current protocol it is up to the client which blocks they choose to download, so this would be solely a change in client behavior. Security ======== The design goal for this project is that an attacker may be able to deny service (i.e. prevent you from recovering a file that was uploaded earlier) but can accomplish none of the following three attacks: 1) violate confidentiality: the attacker gets to view data to which you have not granted them access 2) violate integrity: the attacker convinces you that the wrong data is actually the data you were intending to retrieve 3) violate unforgeability: the attacker gets to modify a mutable file or directory (either the pathnames or the file contents) to which you have not given them write permission Integrity (the promise that the downloaded data will match the uploaded data) is provided by the hashes embedded in the capability (for immutable files) or the digital signature (for mutable files). Confidentiality (the promise that the data is only readable by people with the capability) is provided by the encryption key embedded in the capability (for both immutable and mutable files). Data availability (the hope that data which has been uploaded in the past will be downloadable in the future) is provided by the grid, which distributes failures in a way that reduces the correlation between individual node failure and overall file recovery failure, and by the erasure-coding technique used to generate shares. Many of these security properties depend upon the usual cryptographic assumptions: the resistance of AES and RSA to attack, the resistance of SHA-256 to collision attacks and pre-image attacks, and upon the proximity of 2^-128 and 2^-256 to zero. A break in AES would allow a confidentiality violation, a collision break in SHA-256 would allow a consistency violation, and a break in RSA would allow a mutability violation. There is no attempt made to provide anonymity, neither of the origin of a piece of data nor the identity of the subsequent downloaders. In general, anyone who already knows the contents of a file will be in a strong position to determine who else is uploading or downloading it. Also, it is quite easy for a sufficiently large coalition of nodes to correlate the set of nodes who are all uploading or downloading the same file, even if the attacker does not know the contents of the file in question. Also note that the file size and (when convergence is being used) a keyed hash of the plaintext are not protected. Many people can determine the size of the file you are accessing, and if they already know the contents of a given file, they will be able to determine that you are uploading or downloading the same one. The capability-based security model is used throughout this project. Directory operations are expressed in terms of distinct read- and write- capabilities. Knowing the read-capability of a file is equivalent to the ability to read the corresponding data. The capability to validate the correctness of a file is strictly weaker than the read-capability (possession of read-capability automatically grants you possession of validate-capability, but not vice versa). These capabilities may be expressly delegated (irrevocably) by simply transferring the relevant secrets. The application layer can provide whatever access model is desired, built on top of this capability access model. Reliability =========== File encoding and peer-node selection parameters can be adjusted to achieve different goals. Each choice results in a number of properties; there are many tradeoffs. First, some terms: the erasure-coding algorithm is described as ``k``-out-of-``N`` (for this release, the default values are ``k`` = 3 and ``N`` = 10). Each grid will have some number of nodes; this number will rise and fall over time as nodes join, drop out, come back, and leave forever. Files are of various sizes, some are popular, others are unpopular. Nodes have various capacities, variable upload/download bandwidths, and network latency. Most of the mathematical models that look at node failure assume some average (and independent) probability 'P' of a given node being available: this can be high (servers tend to be online and available >90% of the time) or low (laptops tend to be turned on for an hour then disappear for several days). Files are encoded in segments of a given maximum size, which affects memory usage. The ratio of ``N``/``k`` is the "expansion factor". Higher expansion factors improve reliability very quickly (the binomial distribution curve is very sharp), but consumes much more grid capacity. When P=50%, the absolute value of ``k`` affects the granularity of the binomial curve (1-out-of-2 is much worse than 50-out-of-100), but high values asymptotically approach a constant (i.e. 500-of-1000 is not much better than 50-of-100). When P is high and the expansion factor is held at a constant, higher values of ``k`` and ``N`` give much better reliability (for P=99%, 50-out-of-100 is much much better than 5-of-10, roughly 10^50 times better), because there are more shares that can be lost without losing the file. Likewise, the total number of nodes in the network affects the same granularity: having only one node means a single point of failure, no matter how many copies of the file you make. Independent nodes (with uncorrelated failures) are necessary to hit the mathematical ideals: if you have 100 nodes but they are all in the same office building, then a single power failure will take out all of them at once. Pseudospoofing, also called a "Sybil Attack", is where a single attacker convinces you that they are actually multiple servers, so that you think you are using a large number of independent nodes, but in fact you have a single point of failure (where the attacker turns off all their machines at once). Large grids, with lots of truly independent nodes, will enable the use of lower expansion factors to achieve the same reliability, but will increase overhead because each node needs to know something about every other, and the rate at which nodes come and go will be higher (requiring network maintenance traffic). Also, the File Repairer work will increase with larger grids, although then the job can be distributed out to more nodes. Higher values of ``N`` increase overhead: more shares means more Merkle hashes that must be included with the data, and more nodes to contact to retrieve the shares. Smaller segment sizes reduce memory usage (since each segment must be held in memory while erasure coding runs) and improves "alacrity" (since downloading can validate a smaller piece of data faster, delivering it to the target sooner), but also increase overhead (because more blocks means more Merkle hashes to validate them). In general, small private grids should work well, but the participants will have to decide between storage overhead and reliability. Large stable grids will be able to reduce the expansion factor down to a bare minimum while still retaining high reliability, but large unstable grids (where nodes are coming and going very quickly) may require more repair/verification bandwidth than actual upload/download traffic. tahoe_lafs-1.20.0/docs/aspiration-contract.txt0000644000000000000000000000330313615410400016273 0ustar00In December 2018, the Tahoe-LAFS project engaged Aspiration[1], a US 501(c)3 nonprofit technology organization, as a "fiscal sponsor"[2]. A portion of the project's Bitcoin will be given to Aspiration, from which they can pay developers and contractors to work on the Tahoe codebase. Aspiration will handle the payroll, taxes, accounting, project management, and oversight, and is compensated by an 8% management fee. This provides the tax-withholding structure to use our project's BTC for significant development. We're using 25% of our ~369 BTC for this initial stage of the project, which will give us about $300K-$350K of development work, spread out over the 2019 calendar year. While it would have been nice to make this happen a year ago (given the recent decline of the BTC price), we think this is a reasonable value, and we're excited to finally get to use this surprise windfall to improve the codebase. Our initial set of projects to fund, drafted by Liz Steininger of Least Authority and approved by Zooko and Brian, looks like this: * porting Tahoe and dependent libraries to Python 3 * improving grid operation/management tools * community outreach, UI/UX improvements, documentation * adding new community-requested features, improving garbage collection * possibly run another summit If this goes well (and especially if the BTC price recovers), we'll probably do more next year. As usual, the transfer amounts and addresses will be logged in "donations.rst" and "expenses.rst" in the docs/ directory. Many thanks to Gunner and Josh Black of Aspiration, and Liz Steininger of Least Authority, for making this possible. [1]: https://aspirationtech.org/ [2]: https://aspirationtech.org/services/openprojects tahoe_lafs-1.20.0/docs/backdoors.rst0000644000000000000000000000423113615410400014250 0ustar00.. -*- coding: utf-8-with-signature -*- Statement on Backdoors ====================== October 5, 2010 The New York Times has `recently reported`_ that the current U.S. administration is proposing a bill that would apparently, if passed, require communication systems to facilitate government wiretapping and access to encrypted data. (login required; username/password pairs available at `bugmenot`_). .. _recently reported: https://www.nytimes.com/2010/09/27/us/27wiretap.html .. _bugmenot: http://www.bugmenot.com/view/nytimes.com Commentary by the `Electronic Frontier Foundation`_, `Peter Suderman / Reason`_, `Julian Sanchez / Cato Institute`_. .. _Electronic Frontier Foundation: https://www.eff.org/deeplinks/2010/09/government-seeks .. _Peter Suderman / Reason: http://reason.com/blog/2010/09/27/obama-administration-frustrate .. _Julian Sanchez / Cato Institute: http://www.cato-at-liberty.org/designing-an-insecure-internet/ The core Tahoe developers promise never to change Tahoe-LAFS to facilitate government access to data stored or transmitted by it. Even if it were desirable to facilitate such access -- which it is not -- we believe it would not be technically feasible to do so without severely compromising Tahoe-LAFS' security against other attackers. There have been many examples in which backdoors intended for use by government have introduced vulnerabilities exploitable by other parties (a notable example being the Greek cellphone eavesdropping scandal in 2004/5). RFCs `1984`_ and `2804`_ elaborate on the security case against such backdoors. .. _1984: https://tools.ietf.org/html/rfc1984 .. _2804: https://tools.ietf.org/html/rfc2804 Note that since Tahoe-LAFS is open-source software, forks by people other than the current core developers are possible. In that event, we would try to persuade any such forks to adopt a similar policy. The following Tahoe-LAFS developers agree with this statement: David-Sarah Hopwood [Daira Hopwood] Zooko Wilcox-O'Hearn Brian Warner Kevan Carstensen Frédéric Marti Jack Lloyd François Deppierraz Yu Xue Marc Tooley Peter Secor Shawn Willden Terrell Russell Jean-Paul Calderone meejah Sajith Sasidharan tahoe_lafs-1.20.0/docs/backupdb.rst0000644000000000000000000002026213615410400014056 0ustar00.. -*- coding: utf-8-with-signature -*- ================== The Tahoe BackupDB ================== 1. `Overview`_ 2. `Schema`_ 3. `Upload Operation`_ 4. `Directory Operations`_ Overview ======== To speed up backup operations, Tahoe maintains a small database known as the "backupdb". This is used to avoid re-uploading files which have already been uploaded recently. This database lives in ``~/.tahoe/private/backupdb.sqlite``, and is a SQLite single-file database. It is used by the "``tahoe backup``" command. In the future, it may optionally be used by other commands such as "``tahoe cp``". The purpose of this database is twofold: to manage the file-to-cap translation (the "upload" step) and the directory-to-cap translation (the "mkdir-immutable" step). The overall goal of optimizing backup is to reduce the work required when the source disk has not changed (much) since the last backup. In the ideal case, running "``tahoe backup``" twice in a row, with no intervening changes to the disk, will not require any network traffic. Minimal changes to the source disk should result in minimal traffic. This database is optional. If it is deleted, the worst effect is that a subsequent backup operation may use more effort (network bandwidth, CPU cycles, and disk IO) than it would have without the backupdb. The database uses sqlite3, which is included as part of the standard Python library with Python 2.5 and later. For Python 2.4, Tahoe will try to install the "pysqlite" package at build-time, but this will succeed only if sqlite3 with development headers is already installed. On Debian and Debian derivatives you can install the "python-pysqlite2" package (which, despite the name, actually provides sqlite3 rather than sqlite2). On old distributions such as Debian etch (4.0 "oldstable") or Ubuntu Edgy (6.10) the "python-pysqlite2" package won't work, but the "sqlite3-dev" package will. Schema ====== The database contains the following tables:: CREATE TABLE version ( version integer # contains one row, set to 1 ); CREATE TABLE local_files ( path varchar(1024), PRIMARY KEY -- index, this is an absolute UTF-8-encoded local filename size integer, -- os.stat(fn)[stat.ST_SIZE] mtime number, -- os.stat(fn)[stat.ST_MTIME] ctime number, -- os.stat(fn)[stat.ST_CTIME] fileid integer ); CREATE TABLE caps ( fileid integer PRIMARY KEY AUTOINCREMENT, filecap varchar(256) UNIQUE -- URI:CHK:... ); CREATE TABLE last_upload ( fileid INTEGER PRIMARY KEY, last_uploaded TIMESTAMP, last_checked TIMESTAMP ); CREATE TABLE directories ( dirhash varchar(256) PRIMARY KEY, dircap varchar(256), last_uploaded TIMESTAMP, last_checked TIMESTAMP ); Upload Operation ================ The upload process starts with a pathname (like ``~/.emacs``) and wants to end up with a file-cap (like ``URI:CHK:...``). The first step is to convert the path to an absolute form (``/home/warner/.emacs``) and do a lookup in the local_files table. If the path is not present in this table, the file must be uploaded. The upload process is: 1. record the file's size, ctime (which is the directory-entry change time or file creation time depending on OS) and modification time 2. upload the file into the grid, obtaining an immutable file read-cap 3. add an entry to the 'caps' table, with the read-cap, to get a fileid 4. add an entry to the 'last_upload' table, with the current time 5. add an entry to the 'local_files' table, with the fileid, the path, and the local file's size/ctime/mtime If the path *is* present in 'local_files', the easy-to-compute identifying information is compared: file size and ctime/mtime. If these differ, the file must be uploaded. The row is removed from the local_files table, and the upload process above is followed. If the path is present but ctime or mtime differs, the file may have changed. If the size differs, then the file has certainly changed. At this point, a future version of the "backup" command might hash the file and look for a match in an as-yet-defined table, in the hopes that the file has simply been moved from somewhere else on the disk. This enhancement requires changes to the Tahoe upload API before it can be significantly more efficient than simply handing the file to Tahoe and relying upon the normal convergence to notice the similarity. If ctime, mtime, or size is different, the client will upload the file, as above. If these identifiers are the same, the client will assume that the file is unchanged (unless the ``--ignore-timestamps`` option is provided, in which case the client always re-uploads the file), and it may be allowed to skip the upload. For safety, however, we require the client periodically perform a filecheck on these probably-already-uploaded files, and re-upload anything that doesn't look healthy. The client looks the fileid up in the 'last_checked' table, to see how long it has been since the file was last checked. A "random early check" algorithm should be used, in which a check is performed with a probability that increases with the age of the previous results. E.g. files that were last checked within a month are not checked, files that were checked 5 weeks ago are re-checked with 25% probability, 6 weeks with 50%, more than 8 weeks are always checked. This reduces the "thundering herd" of filechecks-on-everything that would otherwise result when a backup operation is run one month after the original backup. If a filecheck reveals the file is not healthy, it is re-uploaded. If the filecheck shows the file is healthy, or if the filecheck was skipped, the client gets to skip the upload, and uses the previous filecap (from the 'caps' table) to add to the parent directory. If a new file is uploaded, a new entry is put in the 'caps' and 'last_upload' table, and an entry is made in the 'local_files' table to reflect the mapping from local disk pathname to uploaded filecap. If an old file is re-uploaded, the 'last_upload' entry is updated with the new timestamps. If an old file is checked and found healthy, the 'last_upload' entry is updated. Relying upon timestamps is a compromise between efficiency and safety: a file which is modified without changing the timestamp or size will be treated as unmodified, and the "``tahoe backup``" command will not copy the new contents into the grid. The ``--no-timestamps`` option can be used to disable this optimization, forcing every byte of the file to be hashed and encoded. Directory Operations ==================== Once the contents of a directory are known (a filecap for each file, and a dircap for each directory), the backup process must find or create a tahoe directory node with the same contents. The contents are hashed, and the hash is queried in the 'directories' table. If found, the last-checked timestamp is used to perform the same random-early-check algorithm described for files above, but no new upload is performed. Since "``tahoe backup``" creates immutable directories, it is perfectly safe to re-use a directory from a previous backup. If not found, the web-API "mkdir-immutable" operation is used to create a new directory, and an entry is stored in the table. The comparison operation ignores timestamps and metadata, and pays attention solely to the file names and contents. By using a directory-contents hash, the "``tahoe backup``" command is able to re-use directories from other places in the backed up data, or from old backups. This means that renaming a directory and moving a subdirectory to a new parent both count as "minor changes" and will result in minimal Tahoe operations and subsequent network traffic (new directories will be created for the modified directory and all of its ancestors). It also means that you can perform a backup ("#1"), delete a file or directory, perform a backup ("#2"), restore it, and then the next backup ("#3") will re-use the directories from backup #1. The best case is a null backup, in which nothing has changed. This will result in minimal network bandwidth: one directory read and two modifies. The ``Archives/`` directory must be read to locate the latest backup, and must be modified to add a new snapshot, and the ``Latest/`` directory will be updated to point to that same snapshot. tahoe_lafs-1.20.0/docs/cautions.rst0000644000000000000000000000620613615410400014132 0ustar00.. -*- coding: utf-8-with-signature -*- ======================================================= Things To Be Careful About As We Venture Boldly Forth ======================================================= See also :doc:`known_issues`. Timing Attacks ============== Asymmetric-key cryptography operations are particularly sensitive to side-channel attacks. Unless the library is carefully hardened against timing attacks, it is dangerous to allow an attacker to measure how long signature and pubkey-derivation operations take. With enough samples, the attacker can deduce the private signing key from these measurements. (Note that verification operations are only sensitive if the verifying key is secret, which is not the case for anything in Tahoe). We currently use private-key operations in mutable-file writes, and anticipate using them in signed-introducer announcements and accounting setup. Mutable-file writes can reveal timing information to the attacker because the signature operation takes place in the middle of a read-modify-write cycle. Modifying a directory requires downloading the old contents of the mutable file, modifying the contents, signing the new contents, then uploading the new contents. By observing the elapsed time between the receipt of the last packet for the download, and the emission of the first packet of the upload, the attacker will learn information about how long the signature took. The attacker might ensure that they run one of the servers, and delay responding to the download request so that their packet is the last one needed by the client. They might also manage to be the first server to which a new upload packet is sent. This attack gives the adversary timing information about one signature operation per mutable-file write. Note that the UCWE automatic-retry response (used by default in directory modification code) can cause multiple mutable-file read-modify-write cycles per user-triggered operation, giving the adversary a slightly higher multiplier. The signed-introducer announcement involves a signature made as the client node is booting, before the first connection is established to the Introducer. This might reveal timing information if any information is revealed about the client's exact boot time: the signature operation starts a fixed number of cycles after node startup, and the first packet to the Introducer is sent a fixed number of cycles after the signature is made. An adversary who can compare the node boot time against the transmission time of the first packet will learn information about the signature operation, one measurement per reboot. We currently do not provide boot-time information in Introducer messages or other client-to-server data. In general, we are not worried about these leakages, because timing-channel attacks typically require thousands or millions of measurements to detect the (presumably) small timing variations exposed by our asymmetric crypto operations, which would require thousands of mutable-file writes or thousands of reboots to be of use to the adversary. However, future authors should take care to not make changes that could provide additional information to attackers. tahoe_lafs-1.20.0/docs/check_running.py0000644000000000000000000000264213615410400014742 0ustar00 import psutil import filelock def can_spawn_tahoe(pidfile): """ Determine if we can spawn a Tahoe-LAFS for the given pidfile. That pidfile may be deleted if it is stale. :param pathlib.Path pidfile: the file to check, that is the Path to "running.process" in a Tahoe-LAFS configuration directory :returns bool: True if we can spawn `tahoe run` here """ lockpath = pidfile.parent / (pidfile.name + ".lock") with filelock.FileLock(lockpath): try: with pidfile.open("r") as f: pid, create_time = f.read().strip().split(" ", 1) except FileNotFoundError: return True # somewhat interesting: we have a pidfile pid = int(pid) create_time = float(create_time) try: proc = psutil.Process(pid) # most interesting case: there _is_ a process running at the # recorded PID -- but did it just happen to get that PID, or # is it the very same one that wrote the file? if create_time == proc.create_time(): # _not_ stale! another intance is still running against # this configuration return False except psutil.NoSuchProcess: pass # the file is stale pidfile.unlink() return True from pathlib import Path print("can spawn?", can_spawn_tahoe(Path("running.process"))) tahoe_lafs-1.20.0/docs/conf.py0000644000000000000000000002205713615410400013054 0ustar00# -*- coding: utf-8 -*- # # Tahoe-LAFS documentation build configuration file, created by # sphinx-quickstart on Sat Mar 26 11:20:25 2016. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = ['recommonmark', 'sphinx_rtd_theme'] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # source_suffix = ['.rst', '.md'] source_suffix = ['.rst', '.md'] # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'Tahoe-LAFS' copyright = u'2016, The Tahoe-LAFS Developers' author = u'The Tahoe-LAFS Developers' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = u'1.x' # The full version, including alpha/beta/rc tags. release = u'1.x' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = "en" # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build'] # The reST default role (used for this markup: `text`) to use for all # documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. #keep_warnings = False # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'sphinx_rtd_theme' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (relative to this directory) to use as a favicon of # the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. #html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Language to be used for generating the HTML full-text search index. # Sphinx supports the following languages: # 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' # 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr' #html_search_language = 'en' # A dictionary with options for the search language support, empty by default. # Now only 'ja' uses this config value #html_search_options = {'type': 'default'} # The name of a javascript file (relative to the configuration directory) that # implements a search results scorer. If empty, the default will be used. #html_search_scorer = 'scorer.js' # Output file base name for HTML help builder. htmlhelp_basename = 'Tahoe-LAFSdoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', # Latex figure (float) alignment #'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ (master_doc, 'Tahoe-LAFS.tex', u'Tahoe-LAFS Documentation', u'The Tahoe-LAFS Developers', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ (master_doc, 'tahoe-lafs', u'Tahoe-LAFS Documentation', [author], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ (master_doc, 'Tahoe-LAFS', u'Tahoe-LAFS Documentation', author, 'Tahoe-LAFS', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. #texinfo_no_detailmenu = False tahoe_lafs-1.20.0/docs/configuration.rst0000644000000000000000000014525113615410400015160 0ustar00.. -*- coding: utf-8-with-signature -*- ============================= Configuring a Tahoe-LAFS node ============================= #. `Node Types`_ #. `Overall Node Configuration`_ #. `Connection Management`_ #. `Client Configuration`_ #. `Storage Server Configuration`_ #. `Storage Server Plugin Configuration`_ #. `Frontend Configuration`_ #. `Running A Helper`_ #. `Running An Introducer`_ #. `Other Files in BASEDIR`_ #. `Static Server Definitions`_ #. `Other files`_ #. `Example`_ A Tahoe-LAFS node is configured by writing to files in its base directory. These files are read by the node when it starts, so each time you change them, you need to restart the node. The node also writes state to its base directory, so it will create files on its own. This document contains a complete list of the config files that are examined by the client node, as well as the state files that you'll observe in its base directory. The main file is named "``tahoe.cfg``", and is an "`.INI`_"-style configuration file (parsed by the Python stdlib `ConfigParser`_ module: "``[name]``" section markers, lines with "``key.subkey: value``", `RFC822-style`_ continuations). There are also other files containing information that does not easily fit into this format. The "``tahoe create-node``" or "``tahoe create-client``" command will create an initial ``tahoe.cfg`` file for you. After creation, the node will never modify the ``tahoe.cfg`` file: all persistent state is put in other files. The item descriptions below use the following types: ``boolean`` one of (True, yes, on, 1, False, off, no, 0), case-insensitive ``strports string`` a Twisted listening-port specification string, like "``tcp:80``" or "``tcp:3456:interface=127.0.0.1``". For a full description of the format, see `the Twisted strports documentation`_. Please note, if interface= is not specified, Tahoe-LAFS will attempt to bind the port specified on all interfaces. ``endpoint specification string`` a Twisted Endpoint specification string, like "``tcp:80``" or "``tcp:3456:interface=127.0.0.1``". These are replacing strports strings. For a full description of the format, see `the Twisted Endpoints documentation`_. Please note, if interface= is not specified, Tahoe-LAFS will attempt to bind the port specified on all interfaces. Also note that ``tub.port`` only works with TCP endpoints right now. ``FURL string`` a Foolscap endpoint identifier, like ``pb://soklj4y7eok5c3xkmjeqpw@192.168.69.247:44801/eqpwqtzm`` .. _.INI: https://en.wikipedia.org/wiki/INI_file .. _ConfigParser: https://docs.python.org/2/library/configparser.html .. _RFC822-style: https://www.ietf.org/rfc/rfc0822 .. _the Twisted strports documentation: https://twistedmatrix.com/documents/current/api/twisted.application.strports.html .. _the Twisted Endpoints documentation: http://twistedmatrix.com/documents/current/core/howto/endpoints.html#endpoint-types-included-with-twisted Node Types ========== A node can be a client/server or an introducer. Client/server nodes provide one or more of the following services: * web-API service * SFTP service * helper service * storage service. A client/server that provides storage service (i.e. storing shares for clients) is called a "storage server". If it provides any of the other services, it is a "storage client" (a node can be both a storage server and a storage client). A client/server node that provides web-API service is called a "gateway". Overall Node Configuration ========================== This section controls the network behavior of the node overall: which ports and IP addresses are used, when connections are timed out, etc. This configuration applies to all node types and is independent of the services that the node is offering. If your node is behind a firewall or NAT device and you want other clients to connect to it, you'll need to open a port in the firewall or NAT, and specify that port number in the tub.port option. If behind a NAT, you *may* need to set the ``tub.location`` option described below. ``[node]`` ``nickname = (UTF-8 string, optional)`` This value will be displayed in management tools as this node's "nickname". If not provided, the nickname will be set to "". This string shall be a UTF-8 encoded Unicode string. ``web.port = (strports string, optional)`` This controls where the node's web server should listen, providing node status and, if the node is a client/server, providing web-API service as defined in :doc:`frontends/webapi`. This file contains a Twisted "strports" specification such as "``3456``" or "``tcp:3456:interface=127.0.0.1``". The "``tahoe create-node``" or "``tahoe create-client``" commands set the ``web.port`` to "``tcp:3456:interface=127.0.0.1``" by default; this is overridable by the ``--webport`` option. You can make it use SSL by writing "``ssl:3456:privateKey=mykey.pem:certKey=cert.pem``" instead. If this is not provided, the node will not run a web server. ``web.static = (string, optional)`` This controls where the ``/static`` portion of the URL space is served. The value is a directory name (``~username`` is allowed, and non-absolute names are interpreted relative to the node's basedir), which can contain HTML and other files. This can be used to serve a Javascript-based frontend to the Tahoe-LAFS node, or other services. The default value is "``public_html``", which will serve ``BASEDIR/public_html`` . With the default settings, ``http://127.0.0.1:3456/static/foo.html`` will serve the contents of ``BASEDIR/public_html/foo.html`` . ``tub.port = (endpoint specification strings or "disabled", optional)`` This controls which port the node uses to accept Foolscap connections from other nodes. It is parsed as a comma-separated list of Twisted "server endpoint descriptor" strings, each of which is a value like ``tcp:12345`` and ``tcp:23456:interface=127.0.0.1``. To listen on multiple ports at once (e.g. both TCP-on-IPv4 and TCP-on-IPv6), use something like ``tcp6:interface=2600\:3c01\:f03c\:91ff\:fe93\:d272:3456,tcp:interface=8.8.8.8:3456``. Lists of endpoint descriptor strings like the following ``tcp:12345,tcp6:12345`` are known to not work because an ``Address already in use.`` error. If any descriptor begins with ``listen:tor``, or ``listen:i2p``, the corresponding tor/i2p Provider object will construct additional endpoints for the Tub to listen on. This allows the ``[tor]`` or ``[i2p]`` sections in ``tahoe.cfg`` to customize the endpoint; e.g. to add I2CP control options. If you use ``listen:i2p``, you should not also have an ``i2p:..`` endpoint in ``tub.port``, as that would result in multiple I2P-based listeners. If ``tub.port`` is the string ``disabled``, the node will not listen at all, and thus cannot accept connections from other nodes. If ``[storage] enabled = true``, or ``[helper] enabled = true``, or the node is an Introducer, then it is an error to have ``tub.port`` be empty. If ``tub.port`` is disabled, then ``tub.location`` must also be disabled, and vice versa. For backwards compatibility, if this contains a simple integer, it will be used as a TCP port number, like ``tcp:%d`` (which will accept connections on all interfaces). However ``tub.port`` cannot be ``0`` or ``tcp:0`` (older versions accepted this, but the node is no longer willing to ask Twisted to allocate port numbers in this way). If ``tub.port`` is present, it may not be empty. If the ``tub.port`` config key is not provided (e.g. ``tub.port`` appears nowhere in the ``[node]`` section, or is commented out), the node will look in ``BASEDIR/client.port`` (or ``BASEDIR/introducer.port``, for introducers) for the descriptor that was used last time. If neither ``tub.port`` nor the port file is available, the node will ask the kernel to allocate any available port (the moral equivalent of ``tcp:0``). The allocated port number will be written into a descriptor string in ``BASEDIR/client.port`` (or ``introducer.port``), so that subsequent runs will re-use the same port. ``tub.location = (hint string or "disabled", optional)`` In addition to running as a client, each Tahoe-LAFS node can also run as a server, listening for connections from other Tahoe-LAFS clients. The node announces its location by publishing a "FURL" (a string with some connection hints) to the Introducer. The string it publishes can be found in ``BASEDIR/private/storage.furl`` . The ``tub.location`` configuration controls what location is published in this announcement. If your node is meant to run as a server, you should fill this in, using a hostname or IP address that is reachable from your intended clients. If ``tub.port`` is set to ``disabled``, then ``tub.location`` must also be ``disabled``. If you don't provide ``tub.location``, the node will try to figure out a useful one by itself, by using tools like "``ifconfig``" to determine the set of IP addresses on which it can be reached from nodes both near and far. It will also include the TCP port number on which it is listening (either the one specified by ``tub.port``, or whichever port was assigned by the kernel when ``tub.port`` is left unspecified). However this automatic address-detection is discouraged, and will probably be removed from a future release. It will include the ``127.0.0.1`` "localhost" address (which is only useful to clients running on the same computer), and RFC1918 private-network addresses like ``10.*.*.*`` and ``192.168.*.*`` (which are only useful to clients on the local LAN). In general, the automatically-detected IP addresses will only be useful if the node has a public IP address, such as a VPS or colo-hosted server. You will certainly need to set ``tub.location`` if your node lives behind a firewall that is doing inbound port forwarding, or if you are using other proxies such that the local IP address or port number is not the same one that remote clients should use to connect. You might also want to control this when using a Tor proxy to avoid revealing your actual IP address through the Introducer announcement. If ``tub.location`` is specified, by default it entirely replaces the automatically determined set of IP addresses. To include the automatically determined addresses as well as the specified ones, include the uppercase string "``AUTO``" in the list. The value is a comma-separated string of method:host:port location hints, like this:: tcp:123.45.67.89:8098,tcp:tahoe.example.com:8098,tcp:127.0.0.1:8098 A few examples: * Don't listen at all (client-only mode):: tub.port = disabled tub.location = disabled * Use a DNS name so you can change the IP address more easily:: tub.port = tcp:8098 tub.location = tcp:tahoe.example.com:8098 * Run a node behind a firewall (which has an external IP address) that has been configured to forward external port 7912 to our internal node's port 8098:: tub.port = tcp:8098 tub.location = tcp:external-firewall.example.com:7912 * Emulate default behavior, assuming your host has public IP address of 123.45.67.89, and the kernel-allocated port number was 8098:: tub.port = tcp:8098 tub.location = tcp:123.45.67.89:8098,tcp:127.0.0.1:8098 * Use a DNS name but also include the default set of addresses:: tub.port = tcp:8098 tub.location = tcp:tahoe.example.com:8098,AUTO * Run a node behind a Tor proxy (perhaps via ``torsocks``), in client-only mode (i.e. we can make outbound connections, but other nodes will not be able to connect to us). The literal '``unreachable.example.org``' will not resolve, but will serve as a reminder to human observers that this node cannot be reached. "Don't call us.. we'll call you":: tub.port = tcp:8098 tub.location = tcp:unreachable.example.org:0 * Run a node behind a Tor proxy, and make the server available as a Tor "hidden service". (This assumes that other clients are running their node with ``torsocks``, such that they are prepared to connect to a ``.onion`` address.) The hidden service must first be configured in Tor, by giving it a local port number and then obtaining a ``.onion`` name, using something in the ``torrc`` file like:: HiddenServiceDir /var/lib/tor/hidden_services/tahoe HiddenServicePort 29212 127.0.0.1:8098 once Tor is restarted, the ``.onion`` hostname will be in ``/var/lib/tor/hidden_services/tahoe/hostname``. Then set up your ``tahoe.cfg`` like:: tub.port = tcp:8098 tub.location = tor:ualhejtq2p7ohfbb.onion:29212 ``log_gatherer.furl = (FURL, optional)`` If provided, this contains a single FURL string that is used to contact a "log gatherer", which will be granted access to the logport. This can be used to gather operational logs in a single place. Note that in previous releases of Tahoe-LAFS, if an old-style ``BASEDIR/log_gatherer.furl`` file existed it would also be used in addition to this value, allowing multiple log gatherers to be used at once. As of Tahoe-LAFS v1.9.0, an old-style file is ignored and a warning will be emitted if one is detected. This means that as of Tahoe-LAFS v1.9.0 you can have at most one log gatherer per node. See ticket `#1423`_ about lifting this restriction and letting you have multiple log gatherers. .. _`#1423`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1423 ``timeout.keepalive = (integer in seconds, optional)`` ``timeout.disconnect = (integer in seconds, optional)`` If ``timeout.keepalive`` is provided, it is treated as an integral number of seconds, and sets the Foolscap "keepalive timer" to that value. For each connection to another node, if nothing has been heard for a while, we will attempt to provoke the other end into saying something. The duration of silence that passes before sending the PING will be between KT and 2*KT. This is mainly intended to keep NAT boxes from expiring idle TCP sessions, but also gives TCP's long-duration keepalive/disconnect timers some traffic to work with. The default value is 240 (i.e. 4 minutes). If timeout.disconnect is provided, this is treated as an integral number of seconds, and sets the Foolscap "disconnect timer" to that value. For each connection to another node, if nothing has been heard for a while, we will drop the connection. The duration of silence that passes before dropping the connection will be between DT-2*KT and 2*DT+2*KT (please see ticket `#521`_ for more details). If we are sending a large amount of data to the other end (which takes more than DT-2*KT to deliver), we might incorrectly drop the connection. The default behavior (when this value is not provided) is to disable the disconnect timer. See ticket `#521`_ for a discussion of how to pick these timeout values. Using 30 minutes means we'll disconnect after 22 to 68 minutes of inactivity. Receiving data will reset this timeout, however if we have more than 22min of data in the outbound queue (such as 800kB in two pipelined segments of 10 shares each) and the far end has no need to contact us, our ping might be delayed, so we may disconnect them by accident. .. _`#521`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/521 ``tempdir = (string, optional)`` This specifies a temporary directory for the web-API server to use, for holding large files while they are being uploaded. If a web-API client attempts to upload a 10GB file, this tempdir will need to have at least 10GB available for the upload to complete. The default value is the ``tmp`` directory in the node's base directory (i.e. ``BASEDIR/tmp``), but it can be placed elsewhere. This directory is used for files that usually (on a Unix system) go into ``/tmp``. The string will be interpreted relative to the node's base directory. ``reveal-IP-address = (boolean, optional, defaults to True)`` This is a safety flag. When set to False (aka "private mode"), the node will refuse to start if any of the other configuration options would reveal the node's IP address to servers or the external network. This flag does not directly affect the node's behavior: its only power is to veto node startup when something looks unsafe. The default is True (non-private mode), because setting it to False requires the installation of additional libraries (use ``pip install tahoe-lafs[tor]`` and/or ``pip install tahoe-lafs[i2p]`` to get them) as well as additional non-python software (Tor/I2P daemons). Performance is also generally reduced when operating in private mode. When False, any of the following configuration problems will cause ``tahoe run`` to throw a PrivacyError instead of starting the node: * ``[node] tub.location`` contains any ``tcp:`` hints * ``[node] tub.location`` uses ``AUTO``, or is missing/empty (because that defaults to AUTO) * ``[connections] tcp =`` is set to ``tcp`` (or left as the default), rather than being set to ``tor`` or ``disabled`` .. _Connection Management: Connection Management ===================== Three sections (``[tor]``, ``[i2p]``, and ``[connections]``) control how the Tahoe node makes outbound connections. Tor and I2P are configured here. This also controls when Tor and I2P are used: for all TCP connections (to hide your IP address), or only when necessary (just for servers which declare that they need Tor, because they use ``.onion`` addresses). Note that if you want to protect your node's IP address, you should set ``[node] reveal-IP-address = False``, which will refuse to launch the node if any of the other configuration settings might violate this privacy property. ``[connections]`` ----------------- This section controls *when* Tor and I2P are used. The ``[tor]`` and ``[i2p]`` sections (described later) control *how* Tor/I2P connections are managed. All Tahoe nodes need to make a connection to the Introducer; the ``private/introducers.yaml`` file (described below) configures where one or more Introducers live. Tahoe client nodes must also make connections to storage servers: these targets are specified in announcements that come from the Introducer. Both are expressed as FURLs (a Foolscap URL), which include a list of "connection hints". Each connection hint describes one (of perhaps many) network endpoints where the service might live. Connection hints include a type, and look like: * ``tcp:tahoe.example.org:12345`` * ``tor:u33m4y7klhz3b.onion:1000`` * ``i2p:c2ng2pbrmxmlwpijn`` ``tor`` hints are always handled by the ``tor`` handler (configured in the ``[tor]`` section, described below). Likewise, ``i2p`` hints are always routed to the ``i2p`` handler. But either will be ignored if Tahoe was not installed with the necessary Tor/I2P support libraries, or if the Tor/I2P daemon is unreachable. The ``[connections]`` section lets you control how ``tcp`` hints are handled. By default, they use the normal TCP handler, which just makes direct connections (revealing your node's IP address to both the target server and the intermediate network). The node behaves this way if the ``[connections]`` section is missing entirely, or if it looks like this:: [connections] tcp = tcp To hide the Tahoe node's IP address from the servers that it uses, set the ``[connections]`` section to use Tor for TCP hints:: [connections] tcp = tor You can also disable TCP hints entirely, which would be appropriate when running an I2P-only node:: [connections] tcp = disabled (Note that I2P does not support connections to normal TCP ports, so ``[connections] tcp = i2p`` is invalid) In the future, Tahoe services may be changed to live on HTTP/HTTPS URLs instead of Foolscap. In that case, connections will be made using whatever handler is configured for ``tcp`` hints. So the same ``tcp = tor`` configuration will work. ``[tor]`` --------- This controls how Tor connections are made. The defaults (all empty) mean that, when Tor is needed, the node will try to connect to a Tor daemon's SOCKS proxy on localhost port 9050 or 9150. Port 9050 is the default Tor SOCKS port, so it should be available under any system Tor instance (e.g. the one launched at boot time when the standard Debian ``tor`` package is installed). Port 9150 is the SOCKS port for the Tor Browser Bundle, so it will be available any time the TBB is running. You can set ``launch = True`` to cause the Tahoe node to launch a new Tor daemon when it starts up (and kill it at shutdown), if you don't have a system-wide instance available. Note that it takes 30-60 seconds for Tor to get running, so using a long-running Tor process may enable a faster startup. If your Tor executable doesn't live on ``$PATH``, use ``tor.executable=`` to specify it. ``[tor]`` ``enabled = (boolean, optional, defaults to True)`` If False, this will disable the use of Tor entirely. The default of True means the node will use Tor, if necessary, and if possible. ``socks.port = (string, optional, endpoint specification string, defaults to empty)`` This tells the node that Tor connections should be routed to a SOCKS proxy listening on the given endpoint. The default (of an empty value) will cause the node to first try localhost port 9050, then if that fails, try localhost port 9150. These are the default listening ports of the standard Tor daemon, and the Tor Browser Bundle, respectively. While this nominally accepts an arbitrary endpoint string, internal limitations prevent it from accepting anything but ``tcp:HOST:PORT`` (unfortunately, unix-domain sockets are not yet supported). See ticket #2813 for details. Also note that using a HOST of anything other than localhost is discouraged, because you would be revealing your IP address to external (and possibly hostile) machines. ``control.port = (string, optional, endpoint specification string)`` This tells the node to connect to a pre-existing Tor daemon on the given control port (which is typically ``unix://var/run/tor/control`` or ``tcp:localhost:9051``). The node will then ask Tor what SOCKS port it is using, and route Tor connections to that. ``launch = (bool, optional, defaults to False)`` If True, the node will spawn a new (private) copy of Tor at startup, and will kill it at shutdown. The new Tor will be given a persistent state directory under ``NODEDIR/private/``, where Tor's microdescriptors will be cached, to speed up subsequent startup. ``tor.executable = (string, optional, defaults to empty)`` This controls which Tor executable is used when ``launch = True``. If empty, the first executable program named ``tor`` found on ``$PATH`` will be used. There are 5 valid combinations of these configuration settings: * 1: ``(empty)``: use SOCKS on port 9050/9150 * 2: ``launch = true``: launch a new Tor * 3: ``socks.port = tcp:HOST:PORT``: use an existing Tor on the given SOCKS port * 4: ``control.port = ENDPOINT``: use an existing Tor at the given control port * 5: ``enabled = false``: no Tor at all 1 is the default, and should work for any Linux host with the system Tor package installed. 2 should work on any box with Tor installed into $PATH, but will take an extra 30-60 seconds at startup. 3 and 4 can be used for specialized installations, where Tor is already running, but not listening on the default port. 5 should be used in environments where Tor is installed, but should not be used (perhaps due to a site-wide policy). Note that Tor support depends upon some additional Python libraries. To install Tahoe with Tor support, use ``pip install tahoe-lafs[tor]``. ``[i2p]`` --------- This controls how I2P connections are made. Like with Tor, the all-empty defaults will cause I2P connections to be routed to a pre-existing I2P daemon on port 7656. This is the default SAM port for the ``i2p`` daemon. ``[i2p]`` ``enabled = (boolean, optional, defaults to True)`` If False, this will disable the use of I2P entirely. The default of True means the node will use I2P, if necessary, and if possible. ``sam.port = (string, optional, endpoint descriptor, defaults to empty)`` This tells the node that I2P connections should be made via the SAM protocol on the given port. The default (of an empty value) will cause the node to try localhost port 7656. This is the default listening port of the standard I2P daemon. ``launch = (bool, optional, defaults to False)`` If True, the node will spawn a new (private) copy of I2P at startup, and will kill it at shutdown. The new I2P will be given a persistent state directory under ``NODEDIR/private/``, where I2P's microdescriptors will be cached, to speed up subsequent startup. The daemon will allocate its own SAM port, which will be queried from the config directory. ``i2p.configdir = (string, optional, directory)`` This tells the node to parse an I2P config file in the given directory, and use the SAM port it finds there. If ``launch = True``, the new I2P daemon will be told to use the given directory (which can be pre-populated with a suitable config file). If ``launch = False``, we assume there is a pre-running I2P daemon running from this directory, and can again parse the config file for the SAM port. ``i2p.executable = (string, optional, defaults to empty)`` This controls which I2P executable is used when ``launch = True``. If empty, the first executable program named ``i2p`` found on ``$PATH`` will be used. .. _Client Configuration: Client Configuration ==================== ``[client]`` ``introducer.furl = (FURL string, mandatory)`` DEPRECATED. See :ref:`introducer-definitions`. This FURL tells the client how to connect to the introducer. Each Tahoe-LAFS grid is defined by an introducer. The introducer's FURL is created by the introducer node and written into its private base directory when it starts, whereupon it should be published to everyone who wishes to attach a client to that grid ``helper.furl = (FURL string, optional)`` If provided, the node will attempt to connect to and use the given helper for uploads. See :doc:`helper` for details. ``shares.needed = (int, optional) aka "k", default 3`` ``shares.total = (int, optional) aka "N", N >= k, default 10`` ``shares.happy = (int, optional) 1 <= happy <= N, default 7`` These three values set the default encoding parameters. Each time a new file is uploaded, erasure-coding is used to break the ciphertext into separate shares. There will be ``N`` (i.e. ``shares.total``) shares created, and the file will be recoverable if any ``k`` (i.e. ``shares.needed``) shares are retrieved. The default values are 3-of-10 (i.e. ``shares.needed = 3``, ``shares.total = 10``). Setting ``k`` to 1 is equivalent to simple replication (uploading ``N`` copies of the file). These values control the tradeoff between storage overhead and reliability. To a first approximation, a 1MB file will use (1MB * ``N``/``k``) of backend storage space (the actual value will be a bit more, because of other forms of overhead). Up to ``N``-``k`` shares can be lost before the file becomes unrecoverable. So large ``N``/``k`` ratios are more reliable, and small ``N``/``k`` ratios use less disk space. ``N`` cannot be larger than 256, because of the 8-bit erasure-coding algorithm that Tahoe-LAFS uses. ``k`` can not be greater than ``N``. See :doc:`performance` for more details. ``shares.happy`` allows you control over how well to "spread out" the shares of an immutable file. For a successful upload, shares are guaranteed to be initially placed on at least ``shares.happy`` distinct servers, the correct functioning of any ``k`` of which is sufficient to guarantee the availability of the uploaded file. This value should not be larger than the number of servers on your grid. A value of ``shares.happy`` <= ``k`` is allowed, but this is not guaranteed to provide any redundancy if some servers fail or lose shares. It may still provide redundancy in practice if ``N`` is greater than the number of connected servers, because in that case there will typically be more than one share on at least some storage nodes. However, since a successful upload only guarantees that at least ``shares.happy`` shares have been stored, the worst case is still that there is no redundancy. (Mutable files use a different share placement algorithm that does not currently consider this parameter.) ``mutable.format = sdmf or mdmf`` This value tells Tahoe-LAFS what the default mutable file format should be. If ``mutable.format=sdmf``, then newly created mutable files will be in the old SDMF format. This is desirable for clients that operate on grids where some peers run older versions of Tahoe-LAFS, as these older versions cannot read the new MDMF mutable file format. If ``mutable.format`` is ``mdmf``, then newly created mutable files will use the new MDMF format, which supports efficient in-place modification and streaming downloads. You can overwrite this value using a special mutable-type parameter in the webapi. If you do not specify a value here, Tahoe-LAFS will use SDMF for all newly-created mutable files. Note that this parameter applies only to files, not to directories. Mutable directories, which are stored in mutable files, are not controlled by this parameter and will always use SDMF. We may revisit this decision in future versions of Tahoe-LAFS. See :doc:`specifications/mutable` for details about mutable file formats. ``peers.preferred = (string, optional)`` This is an optional comma-separated list of Node IDs of servers that will be tried first when selecting storage servers for reading or writing. Servers should be identified here by their Node ID as it appears in the web ui, underneath the server's nickname. For storage servers running tahoe versions >=1.10 (if the introducer is also running tahoe >=1.10) this will be a "Node Key" (which is prefixed with 'v0-'). For older nodes, it will be a TubID instead. When a preferred server (and/or the introducer) is upgraded to 1.10 or later, clients must adjust their configs accordingly. Every node selected for upload, whether preferred or not, will still receive the same number of shares (one, if there are ``N`` or more servers accepting uploads). Preferred nodes are simply moved to the front of the server selection lists computed for each file. This is useful if a subset of your nodes have different availability or connectivity characteristics than the rest of the grid. For instance, if there are more than ``N`` servers on the grid, and ``K`` or more of them are at a single physical location, it would make sense for clients at that location to prefer their local servers so that they can maintain access to all of their uploads without using the internet. ``force_foolscap = (boolean, optional)`` If this is ``True``, the client will only connect to storage servers via Foolscap, regardless of whether they support HTTPS. If this is ``False``, the client will prefer HTTPS when it is available on the server. The default value is ``False``. In addition, see :doc:`accepting-donations` for a convention for donating to storage server operators. Frontend Configuration ====================== The Tahoe-LAFS client process can run a variety of frontend file access protocols. You will use these to create and retrieve files from the Tahoe-LAFS file store. Configuration details for each are documented in the following protocol-specific guides: HTTP Tahoe runs a webserver by default on port 3456. This interface provides a human-oriented "WUI", with pages to create, modify, and browse directories and files, as well as a number of pages to check on the status of your Tahoe node. It also provides a machine-oriented "WAPI", with a REST-ful HTTP interface that can be used by other programs (including the CLI tools). Please see :doc:`frontends/webapi` for full details, and the ``web.port`` and ``web.static`` config variables above. :doc:`frontends/download-status` also describes a few WUI status pages. CLI The main ``tahoe`` executable includes subcommands for manipulating the file store, uploading/downloading files, and creating/running Tahoe nodes. See :doc:`frontends/CLI` for details. SFTP Tahoe can also run SFTP servers, and map a username/password pair to a top-level Tahoe directory. See :doc:`frontends/FTP-and-SFTP` for instructions on configuring this service, and the ``[sftpd]`` section of ``tahoe.cfg``. Storage Server Configuration ============================ ``[storage]`` ``enabled = (boolean, optional)`` If this is ``True``, the node will run a storage server, offering space to other clients. If it is ``False``, the node will not run a storage server, meaning that no shares will be stored on this node. Use ``False`` for clients who do not wish to provide storage service. The default value is ``True``. ``anonymous = (boolean, optional)`` If this is ``True``, the node will expose the storage server via Foolscap without any additional authentication or authorization. The capability to use all storage services is conferred by knowledge of the Foolscap fURL for the storage server which will be included in the storage server's announcement. If it is ``False``, the node will not expose this and storage must be exposed using the storage server plugin system (see `Storage Server Plugin Configuration`_ for details). The default value is ``True``. ``readonly = (boolean, optional)`` If ``True``, the node will run a storage server but will not accept any shares, making it effectively read-only. Use this for storage servers that are being decommissioned: the ``storage/`` directory could be mounted read-only, while shares are moved to other servers. Note that this currently only affects immutable shares. Mutable shares (used for directories) will be written and modified anyway. See ticket `#390`_ for the current status of this bug. The default value is ``False``. ``reserved_space = (str, optional)`` If provided, this value defines how much disk space is reserved: the storage server will not accept any share that causes the amount of free disk space to drop below this value. (The free space is measured by a call to ``statvfs(2)`` on Unix, or ``GetDiskFreeSpaceEx`` on Windows, and is the space available to the user account under which the storage server runs.) This string contains a number, with an optional case-insensitive scale suffix, optionally followed by "B" or "iB". The supported scale suffixes are "K", "M", "G", "T", "P" and "E", and a following "i" indicates to use powers of 1024 rather than 1000. So "100MB", "100 M", "100000000B", "100000000", and "100000kb" all mean the same thing. Likewise, "1MiB", "1024KiB", "1024 Ki", and "1048576 B" all mean the same thing. "``tahoe create-node``" generates a tahoe.cfg with "``reserved_space=1G``", but you may wish to raise, lower, or remove the reservation to suit your needs. ``expire.enabled =`` ``expire.mode =`` ``expire.override_lease_duration =`` ``expire.cutoff_date =`` ``expire.immutable =`` ``expire.mutable =`` These settings control garbage collection, in which the server will delete shares that no longer have an up-to-date lease on them. Please see :doc:`garbage-collection` for full details. .. _#390: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/390 ``storage_dir = (string, optional)`` This specifies a directory where share files and other state pertaining to storage servers will be kept. The default value is the ``storage`` directory in the node's base directory (i.e. ``BASEDIR/storage``), but it can be placed elsewhere. Relative paths will be interpreted relative to the node's base directory. ``force_foolscap = (boolean, optional)`` If this is ``True``, the node will expose the storage server via Foolscap only, with no support for HTTPS. If this is ``False``, the server will support both Foolscap and HTTPS on the same port. The default value is ``False``. In addition, see :doc:`accepting-donations` for a convention encouraging donations to storage server operators. Storage Server Plugin Configuration =================================== In addition to the built-in storage server, it is also possible to load and configure storage server plugins into Tahoe-LAFS. Plugins to load are specified in the ``[storage]`` section. ``plugins = (string, optional)`` This gives a comma-separated list of plugin names. Plugins named here will be loaded and offered to clients. The default is for no such plugins to be loaded. Each plugin can also be configured in a dedicated section. The section for each plugin is named after the plugin itself:: [storageserver.plugins.] For example, the configuration section for a plugin named ``acme-foo-v1`` is ``[storageserver.plugins.acme-foo-v1]``. The contents of such sections are defined by the plugins themselves. Refer to the documentation provided with those plugins. Running A Helper ================ A "helper" is a regular client node that also offers the "upload helper" service. ``[helper]`` ``enabled = (boolean, optional)`` If ``True``, the node will run a helper (see :doc:`helper` for details). The helper's contact FURL will be placed in ``private/helper.furl``, from which it can be copied to any clients that wish to use it. Clearly nodes should not both run a helper and attempt to use one: do not create ``helper.furl`` and also define ``[helper]enabled`` in the same node. The default is ``False``. Running An Introducer ===================== The introducer node uses a different ``.tac`` file (named "``introducer.tac``"), and pays attention to the ``[node]`` section, but not the others. The Introducer node maintains some different state than regular client nodes. ``BASEDIR/private/introducer.furl`` This is generated the first time the introducer node is started, and used again on subsequent runs, to give the introduction service a persistent long-term identity. This file should be published and copied into new client nodes before they are started for the first time. Other Files in BASEDIR ====================== Some configuration is not kept in ``tahoe.cfg``, for the following reasons: * it doesn't fit into the INI format of ``tahoe.cfg`` (e.g. ``private/servers.yaml``) * it is generated by the node at startup, e.g. encryption keys. The node never writes to ``tahoe.cfg``. * it is generated by user action, e.g. the "``tahoe create-alias``" command. In addition, non-configuration persistent state is kept in the node's base directory, next to the configuration knobs. This section describes these other files. ``private/node.pem`` This contains an SSL private-key certificate. The node generates this the first time it is started, and re-uses it on subsequent runs. This certificate allows the node to have a cryptographically-strong identifier (the Foolscap "TubID"), and to establish secure connections to other nodes. ``storage/`` Nodes that host StorageServers will create this directory to hold shares of files on behalf of other clients. There will be a directory underneath it for each StorageIndex for which this node is holding shares. There is also an "incoming" directory where partially-completed shares are held while they are being received. This location may be overridden in ``tahoe.cfg``. ``tahoe-client.tac`` This file defines the client, by constructing the actual Client instance each time the node is started. It is used by the "``twistd``" daemonization program (in the ``-y`` mode), which is run internally by the "``tahoe start``" command. This file is created by the "``tahoe create-node``" or "``tahoe create-client``" commands. ``tahoe-introducer.tac`` This file is used to construct an introducer, and is created by the "``tahoe create-introducer``" command. ``private/control.furl`` This file contains a FURL that provides access to a control port on the client node, from which files can be uploaded and downloaded. This file is created with permissions that prevent anyone else from reading it (on operating systems that support such a concept), to insure that only the owner of the client node can use this feature. This port is intended for debugging and testing use. ``private/logport.furl`` This file contains a FURL that provides access to a 'log port' on the client node, from which operational logs can be retrieved. Do not grant logport access to strangers, because occasionally secret information may be placed in the logs. ``private/helper.furl`` If the node is running a helper (for use by other clients), its contact FURL will be placed here. See :doc:`helper` for more details. ``private/root_dir.cap`` (optional) The command-line tools will read a directory cap out of this file and use it, if you don't specify a '--dir-cap' option or if you specify '--dir-cap=root'. ``private/convergence`` (automatically generated) An added secret for encrypting immutable files. Everyone who has this same string in their ``private/convergence`` file encrypts their immutable files in the same way when uploading them. This causes identical files to "converge" -- to share the same storage space since they have identical ciphertext -- which conserves space and optimizes upload time, but it also exposes file contents to the possibility of a brute-force attack by people who know that string. In this attack, if the attacker can guess most of the contents of a file, then they can use brute-force to learn the remaining contents. So the set of people who know your ``private/convergence`` string is the set of people who converge their storage space with you when you and they upload identical immutable files, and it is also the set of people who could mount such an attack. The content of the ``private/convergence`` file is a base-32 encoded string. If the file doesn't exist, then when the Tahoe-LAFS client starts up it will generate a random 256-bit string and write the base-32 encoding of this string into the file. If you want to converge your immutable files with as many people as possible, put the empty string (so that ``private/convergence`` is a zero-length file). .. _introducer-definitions: Introducer Definitions ====================== The ``private/introducers.yaml`` file defines Introducers. Choose a locally-unique "petname" for each one then define their FURLs in ``private/introducers.yaml`` like this:: introducers: petname2: furl: "FURL2" petname3: furl: "FURL3" Servers will announce themselves to all configured introducers. Clients will merge the announcements they receive from all introducers. Nothing will re-broadcast an announcement (i.e. telling introducer 2 about something you heard from introducer 1). If you omit the introducer definitions from ``introducers.yaml``, the node will not use an Introducer at all. Such "introducerless" clients must be configured with static servers (described below), or they will not be able to upload and download files. .. _server_list: Static Server Definitions ========================= The ``private/servers.yaml`` file defines "static servers": those which are not announced through the Introducer. This can also control how we connect to those servers. Most clients do not need this file. It is only necessary if you want to use servers which are (for some specialized reason) not announced through the Introducer, or to connect to those servers in different ways. You might do this to "freeze" the server list: use the Introducer for a while, then copy all announcements into ``servers.yaml``, then stop using the Introducer entirely. Or you might have a private server that you don't want other users to learn about (via the Introducer). Or you might run a local server which is announced to everyone else as a Tor onion address, but which you can connect to directly (via TCP). The file syntax is `YAML`_, with a top-level dictionary named ``storage``. Other items may be added in the future. The ``storage`` dictionary takes keys which are server-ids, and values which are dictionaries with two keys: ``ann`` and ``connections``. The ``ann`` value is a dictionary which will be used in lieu of the introducer announcement, so it can be populated by copying the ``ann`` dictionary from ``NODEDIR/introducer_cache.yaml``. The server-id can be any string, but ideally you should use the public key as published by the server. Each server displays this as "Node ID:" in the top-right corner of its "WUI" web welcome page. It can also be obtained from other client nodes, which record it as ``key_s:`` in their ``introducer_cache.yaml`` file. The format is "v0-" followed by 52 base32 characters like so:: v0-c2ng2pbrmxmlwpijn3mr72ckk5fmzk6uxf6nhowyosaubrt6y5mq The ``ann`` dictionary really only needs one key: * ``anonymous-storage-FURL``: how we connect to the server (note that other important keys may be added in the future, as Accounting and HTTP-based servers are implemented) Optional keys include: * ``nickname``: the name of this server, as displayed on the Welcome page server list * ``permutation-seed-base32``: this controls how shares are mapped to servers. This is normally computed from the server-ID, but can be overridden to maintain the mapping for older servers which used to use Foolscap TubIDs as server-IDs. If your selected server-ID cannot be parsed as a public key, it will be hashed to compute the permutation seed. This is fine as long as all clients use the same thing, but if they don't, then your client will disagree with the other clients about which servers should hold each share. This will slow downloads for everybody, and may cause additional work or consume extra storage when repair operations don't converge. * anything else from the ``introducer_cache.yaml`` announcement, like ``my-version``, which is displayed on the Welcome page server list For example, a private static server could be defined with a ``private/servers.yaml`` file like this:: storage: v0-4uazse3xb6uu5qpkb7tel2bm6bpea4jhuigdhqcuvvse7hugtsia: ann: nickname: my-server-1 anonymous-storage-FURL: pb://u33m4y7klhz3bypswqkozwetvabelhxt@tcp:8.8.8.8:51298/eiu2i7p6d6mm4ihmss7ieou5hac3wn6b Or, if you're feeling really lazy:: storage: my-serverid-1: ann: anonymous-storage-FURL: pb://u33m4y7klhz3bypswqkozwetvabelhxt@tcp:8.8.8.8:51298/eiu2i7p6d6mm4ihmss7ieou5hac3wn6b .. _YAML: http://yaml.org/ Overriding Connection-Handlers for Static Servers ------------------------------------------------- A ``connections`` entry will override the default connection-handler mapping (as established by ``tahoe.cfg [connections]``). This can be used to build a "Tor-mostly client": one which is restricted to use Tor for all connections, except for a few private servers to which normal TCP connections will be made. To override the published announcement (and thus avoid connecting twice to the same server), the server ID must exactly match. ``tahoe.cfg``:: [connections] # this forces the use of Tor for all "tcp" hints tcp = tor ``private/servers.yaml``:: storage: v0-c2ng2pbrmxmlwpijn3mr72ckk5fmzk6uxf6nhowyosaubrt6y5mq: ann: nickname: my-server-1 anonymous-storage-FURL: pb://u33m4y7klhz3bypswqkozwetvabelhxt@tcp:10.1.2.3:51298/eiu2i7p6d6mm4ihmss7ieou5hac3wn6b connections: # this overrides the tcp=tor from tahoe.cfg, for just this server tcp: tcp The ``connections`` table is needed to override the ``tcp = tor`` mapping that comes from ``tahoe.cfg``. Without it, the client would attempt to use Tor to connect to ``10.1.2.3``, which would fail because it is a local/non-routeable (RFC1918) address. Other files =========== ``logs/`` Each Tahoe-LAFS node creates a directory to hold the log messages produced as the node runs. These logfiles are created and rotated by the "``twistd``" daemonization program, so ``logs/twistd.log`` will contain the most recent messages, ``logs/twistd.log.1`` will contain the previous ones, ``logs/twistd.log.2`` will be older still, and so on. ``twistd`` rotates logfiles after they grow beyond 1MB in size. If the space consumed by logfiles becomes troublesome, they should be pruned: a cron job to delete all files that were created more than a month ago in this ``logs/`` directory should be sufficient. ``my_nodeid`` this is written by all nodes after startup, and contains a base32-encoded (i.e. human-readable) NodeID that identifies this specific node. This NodeID is the same string that gets displayed on the web page (in the "which peers am I connected to" list), and the shortened form (the first few characters) is recorded in various log messages. ``access.blacklist`` Gateway nodes may find it necessary to prohibit access to certain files. The web-API has a facility to block access to filecaps by their storage index, returning a 403 "Forbidden" error instead of the original file. For more details, see the "Access Blacklist" section of :doc:`frontends/webapi`. Example ======= The following is a sample ``tahoe.cfg`` file, containing values for some of the keys described in the previous section. Note that this is not a recommended configuration (most of these are not the default values), merely a legal one. :: [node] nickname = Bob's Tahoe-LAFS Node tub.port = tcp:34912 tub.location = tcp:123.45.67.89:8098,tcp:44.55.66.77:8098 web.port = tcp:3456 log_gatherer.furl = pb://soklj4y7eok5c3xkmjeqpw@192.168.69.247:44801/eqpwqtzm timeout.keepalive = 240 timeout.disconnect = 1800 [client] helper.furl = pb://ggti5ssoklj4y7eok5c3xkmj@tcp:helper.tahoe.example:7054/kk8lhr [storage] enabled = True readonly = True reserved_space = 10000000000 [helper] enabled = True To be introduced to storage servers, here is a sample ``private/introducers.yaml`` which can be used in conjunction:: introducers: examplegrid: furl: "pb://ok45ssoklj4y7eok5c3xkmj@tcp:tahoe.example:44801/ii3uumo" Old Configuration Files ======================= Tahoe-LAFS releases before v1.3.0 had no ``tahoe.cfg`` file, and used distinct files for each item. This is no longer supported and if you have configuration in the old format you must manually convert it to the new format for Tahoe-LAFS to detect it. See :doc:`historical/configuration`. tahoe_lafs-1.20.0/docs/contributing.rst0000644000000000000000000000005113615410400015004 0ustar00.. include:: ../.github/CONTRIBUTING.rst tahoe_lafs-1.20.0/docs/convergence-secret.rst0000644000000000000000000000723113615410400016065 0ustar00.. -*- coding: utf-8-with-signature -*- ********************** The Convergence Secret ********************** What Is It? ----------- The identifier of a file (also called the "capability" to a file) is derived from two pieces of information when the file is uploaded: the content of the file and the upload client's "convergence secret". By default, the convergence secret is randomly generated by the client when it first starts up, then stored in the client's base directory (/private/convergence) and re-used after that. So the same file content uploaded from the same client will always have the same cap. Uploading the file from a different client with a different convergence secret would result in a different cap -- and in a second copy of the file's contents stored on the grid. If you want files you upload to converge (also known as "deduplicate") with files uploaded by someone else, just make sure you're using the same convergence secret when you upload files as them. The advantages of deduplication should be clear, but keep in mind that the convergence secret was created to protect confidentiality. There are two attacks that can be used against you by someone who knows the convergence secret you use. The first one is called the "Confirmation-of-a-File Attack". Someone who knows the convergence secret that you used when you uploaded a file, and who has a copy of that file themselves, can check whether you have a copy of that file. This is usually not a problem, but it could be if that file is, for example, a book or movie that is banned in your country. The second attack is more subtle. It is called the "Learn-the-Remaining-Information Attack". Suppose you've received a confidential document, such as a PDF from your bank which contains many pages of boilerplate text as well as containing your bank account number and balance. Someone who knows your convergence secret can generate a file with all of the boilerplate text (perhaps they would open an account with the same bank so they receive the same document with their account number and balance). Then they can try a "brute force search" to find your account number and your balance. The defense against these attacks is that only someone who knows the convergence secret that you used on each file can perform these attacks on that file. Both of these attacks and the defense are described in more detail in `Drew Perttula's Hack Tahoe-LAFS Hall Of Fame entry`_ .. _`Drew Perttula's Hack Tahoe-LAFS Hall Of Fame entry`: https://tahoe-lafs.org/hacktahoelafs/drew_perttula.html What If I Change My Convergence Secret? --------------------------------------- All your old file capabilities will still work, but the new data that you upload will not be deduplicated with the old data. If you upload all of the same things to the grid, you will end up using twice the space until garbage collection kicks in (if it's enabled). Changing the convergence secret that a storage client uses for uploads can be though of as moving the client to a new "deduplication domain". How To Use It ------------- To enable deduplication between different clients, **securely** copy the convergence secret file from one client to all the others. For example, if you are on host A and have an account on host B and you have scp installed, run: *scp ~/.tahoe/private/convergence my_other_account@B:.tahoe/private/convergence* If you have two different clients on a single computer, say one for each disk, you would do: *cp /tahoe1/private/convergence /tahoe2/private/convergence* After you change the convergence secret file, you must restart the client before it will stop using the old one and read the new one from the file. tahoe_lafs-1.20.0/docs/debian.rst0000644000000000000000000000455613615410400013535 0ustar00.. -*- coding: utf-8-with-signature -*- ========================= Debian and Ubuntu Support ========================= 1. `Overview`_ 2. `Dependency Packages`_ Overview ======== Tahoe-LAFS is provided as a ``.deb`` package in current Debian (>= `stretch `_) and Ubuntu (>= lucid) releases. Before official packages were added, the Tahoe source tree provided support for building unofficial packages for a variety of popular Debian/Ubuntu versions. The project also ran buildbots to create ``.debs`` of current trunk for ease of testing. As of version 1.9, the source tree no longer provides these tools. To construct a ``.deb`` from current trunk, your best bet is to apply the current Debian diff from the latest upstream package and invoke the ``debian/rules`` as usual. Debian's standard ``apt-get`` tool can be used to fetch the current source package (including the Debian-specific diff): run "``apt-get source tahoe-lafs``". That will fetch three files: the ``.dsc`` control file, the main Tahoe tarball, and the Debian-specific ``.debian.tar.gz`` file. Just unpack the ``.debian.tar.gz`` file inside your Tahoe source tree, modify the version number in ``debian/changelog``, then run "``fakeroot ./debian/rules binary``", and a new ``.deb`` will be placed in the parent directory. Dependency Packages =================== Tahoe depends upon a number of additional libraries. When building Tahoe from source, any dependencies that are not already present in the environment will be downloaded (via ``pip`` and ``easy_install``) and installed in the virtualenv. The ``.deb`` packages, of course, rely solely upon other ``.deb`` packages. For reference, here is a list of the debian package names that provide Tahoe's dependencies as of the 1.14.0 release: * python * python-zfec * python-foolscap * python-openssl (needed by foolscap) * python-twisted * python-nevow * python-mock * python-cryptography * python-simplejson * python-setuptools * python-support (for Debian-specific install-time tools) When building your own Debian packages, a convenient way to get all these dependencies installed is to first install the official "tahoe-lafs" package, then uninstall it, leaving the dependencies behind. You may also find it useful to run "``apt-get build-dep tahoe-lafs``" to make sure all the usual build-essential tools are installed. tahoe_lafs-1.20.0/docs/developer-guide.rst0000644000000000000000000000300513615410400015357 0ustar00Developer Guide =============== Pre-commit Checks ----------------- This project is configured for use with `pre-commit`_ to install `VCS/git hooks`_ which perform some static code analysis checks and other code checks to catch common errors. These hooks can be configured to run before commits or pushes For example:: tahoe-lafs $ pre-commit install --hook-type pre-push pre-commit installed at .git/hooks/pre-push tahoe-lafs $ echo "undefined" > src/allmydata/undefined_name.py tahoe-lafs $ git add src/allmydata/undefined_name.py tahoe-lafs $ git commit -a -m "Add a file that violates flake8" tahoe-lafs $ git push codechecks...............................................................Failed - hook id: codechecks - exit code: 1 GLOB sdist-make: ./tahoe-lafs/setup.py codechecks inst-nodeps: ... codechecks installed: ... codechecks run-test-pre: PYTHONHASHSEED='...' codechecks run-test: commands[0] | flake8 src/allmydata/undefined_name.py src/allmydata/undefined_name.py:1:1: F821 undefined name 'undefined' ERROR: InvocationError for command ./tahoe-lafs/.tox/codechecks/bin/flake8 src/allmydata/undefined_name.py (exited with code 1) ___________________________________ summary ____________________________________ ERROR: codechecks: commands failed To uninstall:: tahoe-lafs $ pre-commit uninstall --hook-type pre-push pre-push uninstalled .. _`pre-commit`: https://pre-commit.com .. _`VCS/git hooks`: `pre-commit`_ .. _`pre-commit configuration`: ../.pre-commit-config.yaml tahoe_lafs-1.20.0/docs/developer-release-signatures0000644000000000000000000000257713615410400017272 0ustar00-----BEGIN PGP SIGNED MESSAGE----- Hash: SHA512 January 20, 2021 Any of the following core Tahoe contributers may sign a release. Each release MUST be signed by at least one developer but MAY have additional signatures. Each developer independently produces a signature which is made available beside Tahoe releases after 1.15.0 This statement is signed by the existing Tahoe release key. Any future such statements may be signed by it OR by any two developers (for example, to add or remove developers from the list). meejah 0xC2602803128069A7 9D5A 2BD5 688E CB88 9DEB CD3F C260 2803 1280 69A7 https://meejah.ca/meejah.asc jean-paul calderone (exarkun) 0xE27B085EDEAA4B1B 96B9 C5DA B2EA 9EB6 7941 9DB7 E27B 085E DEAA 4B1B https://twistedmatrix.com/~exarkun/E27B085EDEAA4B1B.asc brian warner (lothar) 0x863333C265497810 5810 F125 7F8C F753 7753 895A 8633 33C2 6549 7810 https://www.lothar.com/warner-gpg.html -----BEGIN PGP SIGNATURE----- iQEzBAEBCgAdFiEE405i0G0Oac/KQXn/veDTHWhmanoFAmAHIyIACgkQveDTHWhm anqhqQf/YSbMXL+gwFhAZsjX39EVlbr/Ik7WPPkJW7v1oHybTnwFpFIc52COU1x/ sqRfk4OyYtz9IBgOPXoWgXu9R4qdK6vYKxEsekcGT9C5l0OyDz8YWXEWgbGK5mvI aEub9WucD8r2uOQnnW6DtznFuEpvOjtf/+2BU767+bvLsbViW88ocbuLfCqLdOgD WZT9j3M+Y2Dc56DAJzP/4fkrUSVIofZStYp5u9HBjburgcYIp0g/cyc4xXRoi6Mp lFTRFv3MIjmoamzSQseoIgP6fi8QRqPrffPrsyqAp+06mJnPhxxFqxtO/ZErmpSa +BGrLBxdWa8IF9U1A4Fs5nuAzAKMEg== =E9J+ -----END PGP SIGNATURE----- tahoe_lafs-1.20.0/docs/donations.rst0000644000000000000000000001060013615410400014274 0ustar00-----BEGIN PGP SIGNED MESSAGE----- Hash: SHA256 ========= Donations ========= Donations to the Tahoe-LAFS project are welcome, and can be made to the following Bitcoin address: 1PxiFvW1jyLM5T6Q1YhpkCLxUh3Fw8saF3 The funds currently available to the project are visible through the blockchain explorer: https://blockchain.info/address/1PxiFvW1jyLM5T6Q1YhpkCLxUh3Fw8saF3 Governance ========== The Tahoe-LAFS Software Foundation manages these funds. Our intention is to use them for operational expenses (website hosting, test infrastructure, EC2 instance rental, and SSL certificates). Future uses might include developer summit expenses, bug bounties, contract services (e.g. graphic design for the web site, professional security review of codebases, development of features outside the core competencies of the main developers), and student sponsorships. The Foundation currently consists of secorp (Peter Secor), warner (Brian Warner), and zooko (Zooko Wilcox). Transparent Accounting ====================== Our current plan is to leave all funds in the main `1Pxi` key until they are spent. For each declared budget item, we will allocate a new public key, and transfer funds to that specific key before distributing them to the ultimate recipient. All expenditures can thus be tracked on the blockchain. Some day, we might choose to move the funds into a more sophisticated type of key (e.g. a 2-of-3 multisig address). If/when that happens, we will publish the new donation address, and transfer all funds to it. We will continue the plan of keeping all funds in the (new) primary donation address until they are spent. Expenditure Addresses ===================== This lists the public key used for each declared budget item. The individual payments will be recorded in a separate file (see `docs/expenses.rst`), which is not signed. All transactions from the main `1Pxi` key should be to some key on this list. * Initial testing (warner) 1387fFG7Jg1iwCzfmQ34FwUva7RnC6ZHYG one-time 0.01 BTC deposit+withdrawal * tahoe-lafs.org DNS registration (paid by warner) 1552pt6wpudVCRcJaU14T7tAk8grpUza4D ~$15/yr for DNS * tahoe-lafs.org SSL certificates (paid by warner) $0-$50/yr, ending 2015 (when we switched to LetsEncrypt) 1EkT8yLvQhnjnLpJ6bNFCfAHJrM9yDjsqa * website/dev-server hosting (on Linode, paid by secorp) ~$20-$25/mo, 2007-present 1MSWNt1R1fohYxgaMV7gJSWbYjkGbXzKWu (<= may-2016) 1NHgVsq1nAU9x1Bb8Rs5K3SNtzEH95C5kU (>= jun-2016) * 2016 Tahoe Summit expenses: venue rental, team dinners (paid by warner) ~$1020 1DskmM8uCvmvTKjPbeDgfmVsGifZCmxouG * Aspiration contract $300k-$350k (first phase, 2019) $800k (second phase, 2020) 1gDXYQNH4kCJ8Dk7kgiztfjNUaA1KJcHv * OpenCollective development work (2023) ~$260k 1KZYr8UU2XjuEdSPzn2pF8eRPZZvffByDf Historical Donation Addresses ============================= The Tahoe project has had a couple of different donation addresses over the years, managed by different people. All of these funds have been (or will be) transferred to the current primary donation address (`1Pxi`). * 13GrdS9aLXoEbcptBLQi7ffTsVsPR7ubWE (21-Aug-2010 - 23-Aug-2010) Managed by secorp, total receipts: 17 BTC * 19jzBxijUeLvcMVpUYXcRr5kGG3ThWgx4P (23-Aug-2010 - 29-Jan-2013) Managed by secorp, total receipts: 358.520276 BTC * 14WTbezUqWSD3gLhmXjHD66jVg7CwqkgMc (24-May-2013 - 21-Mar-2016) Managed by luckyredhot, total receipts: 3.97784278 BTC stored in 19jXek4HRL54JrEwNPyEzitPnkew8XPkd8 * 1PxiFvW1jyLM5T6Q1YhpkCLxUh3Fw8saF3 (21-Mar-2016 - present) Managed by warner, backups with others Validation ========== This document is signed by the Tahoe-LAFS Release-Signing Key (GPG keyid 2048R/68666A7A, fingerprint E34E 62D0 6D0E 69CF CA41 79FF BDE0 D31D 6866 6A7A). It is also committed to the Tahoe source tree (https://github.com/tahoe-lafs/tahoe-lafs.git) as `docs/donations.rst`. Both actions require access to secrets held closely by Tahoe developers. signed: Brian Warner, 25-Oct-2023 -----BEGIN PGP SIGNATURE----- iQEzBAEBCAAdFiEE405i0G0Oac/KQXn/veDTHWhmanoFAmU5YZMACgkQveDTHWhm anqt+ggAo2kulNmjrWA5VhqE8i6ckkxQMRVY4y0LAfiI0ho/505ZBZvpoh/Ze31x ZJj4DczHmZM+m3L+fZyubT4ldagYEojtwkYmxHAQz2DIV4PrdjsUQWyvkNcTBZWu y5mR5ATk3EYRa19xGEosWK1OzW2kgRbpAbznuWsdxxw9vNENBrolGRsyJqRQHCiV /4UkrGiOegaJSFMKy2dCyDF3ExD6wT9+fdqC5xDJZjhD+SUDJnD4oWLYLroj//v1 sy4J+/ElNU9oaC0jDb9fx1ECk+u6B+YiaYlW/MrZNqzKCM/76yZ8sA2+ynsOHGtL bPFpLJjX6gBwHkMqvkWhsJEojxkFVQ== =gxlb -----END PGP SIGNATURE----- tahoe_lafs-1.20.0/docs/expenses.rst0000644000000000000000000001746413615410400014147 0ustar00============================== Expenses paid by donated BTC ============================== `docs/donations.rst` describes the "Transparent Accounting" that we use for BTC that has been donated to the Tahoe project. That document lists the budget items for which we intend to spend these funds, and a Bitcoin public key for each one. It is signed by the Tahoe-LAFS Release Signing Key, and gets re-signed each time a new budget item is added. For every expense that get paid, the BTC will first be moved from the primary donation key into the budget-item -specific subkey, then moved from that subkey to whatever vendor or individual is being paid. This document tracks the actual payments made to each vendor. This file changes more frequently than `donations.rst`, hence it is *not* signed. However this file should never reference a budget item or public key which is not present in `donations.rst`. And every payment in this file should correspond to a transaction visible on the Bitcoin block chain explorer: https://blockchain.info/address/1PxiFvW1jyLM5T6Q1YhpkCLxUh3Fw8saF3 Budget Items ============ Initial Testing --------------- This was a small transfer to obtain proof-of-spendability for the new wallet. * Budget: trivial * Recipient: warner * Address: 1387fFG7Jg1iwCzfmQ34FwUva7RnC6ZHYG Expenses/Transactions: * 17-Mar-2016: deposit+withdrawal of 0.01 BTC * bcad5f46ebf9fd5d2d7a6a9bed81acf6382cd7216ceddbb5b5f5d968718ec139 (in) * 13c7f4abf9d6e7f2223c20fefdc47837779bebf3bd95dbb1f225f0d2a2d62c44 (out 1/2) * 7ca0828ea11fa2f93ab6b8afd55ebdca1415c82c567119d9bd943adbefccce84 (out 2/2) DNS Registration ---------------- Yearly registration of the `tahoe-lafs.org` domain name. * Budget: ~$15/yr * Recipient: warner * Address: 1552pt6wpudVCRcJaU14T7tAk8grpUza4D Expenses/Transactions: * 21-Aug-2012: 1 year, GANDI: $12.50 * 20-Aug-2013: 4 years, GANDI: $64.20 * 4ee7fbcb07f758d51187b6856eaf9999f14a7f3d816fe3afb7393f110814ae5e 0.11754609 BTC (@$653.41) = $76.70, plus 0.000113 tx-fee TLS certificates ---------------- Yearly payment for TLS certificates from various vendors. We plan to move to Lets Encrypt, so 2015 should be last time we pay for a cert. * Budget: $0-$50/yr * Recipient: warner * Address: 1EkT8yLvQhnjnLpJ6bNFCfAHJrM9yDjsqa Expenses/Transactions: * 29-Oct-2012: RapidSSL: $49 * 02-Nov-2013: GlobalSign, free for open source projects: $0 * 14-Nov-2014: GANDI: $16 * 28-Oct-2015: GANDI: $16 * e8d1b78fab163baa45de0ec592f8d7547329343181e35c2cdb30e427a442337e 0.12400489 BTC (@$653.20) = $81, plus 0.000113 tx-fee Web/Developer Server Hosting ---------------------------- This pays for the rental of a VPS (currently from Linode) for tahoe-lafs.org, running the project website, Trac, buildbot, and other development tools. * Budget: $20-$25/month, 2007-present * Recipient: secorp * Addresses: 1MSWNt1R1fohYxgaMV7gJSWbYjkGbXzKWu (<= may-2016) 1NHgVsq1nAU9x1Bb8Rs5K3SNtzEH95C5kU (>= jun-2016) Expenses/Transactions: * Invoice 311312, 12 Feb 2010: $339.83 * Invoice 607395, 05 Jan 2011: $347.39 * Invoice 1183568, 01 Feb 2012: $323.46 * Invoice 1973091, 01 Feb 2013: $323.46 * Invoice 2899489, 01 Feb 2014: $324.00 * Invoice 3387159, 05 July 2014: $6.54 (add backups) * Multiple invoices monthly 01 Aug 2014 - 01 May 2016: $7.50*22 = $165.00 * Invoice 4083422, 01 Feb 2015: $324.00 * Invoice 5650991, 01 Feb 2016: $324.00 * -- Total through 01 May 2016: $2477.68 * 5861efda59f9ae10952389cf52f968bb469019c77a3642e276a9e35131c36600 3.78838567 BTC (@$654.02) = $2477.68, plus 0.000113 tx-fee * * June 2016 - Oct 2016 $27.45/mo, total $137.25 * 8975b03002166b20782b0f023116b3a391ac5176de1a27e851891bee29c11957 0.19269107 BTC (@$712.28) = $137.25, plus 0.000113 tx-fee * (Oops, I forgot the process, and sent the BTC directly secorp's key. I should have stuck with the 1MSWN key as the intermediary. Next time I'll go back to doing it that way.) Tahoe Summit ------------ This pays for office space rental and team dinners for each day of the developer summit. * Recipient: warner * Address: 1DskmM8uCvmvTKjPbeDgfmVsGifZCmxouG * 2016 Summit (Nov 8-9, San Francisco) * Rental of the Mechanics Institute Library "Board Room": $300/day*2 * Team Dinner (Cha Cha Cha): $164.49 * Team Dinner (Rasoi): $255.34 * -- total: $1019.83 * dcd468fb2792b018e9ebc238e9b93992ad5a8fce48a8ff71db5d79ccbbe30a92 0.01403961 (@$712.28) = $10, plus 0.000113 tx-fee * acdfc299c35eed3bb27f7463ad8cdfcdcd4dcfd5184f290f87530c2be999de3e 1.41401086 (@$714.16) = $1009.83, plus 0.000133 tx-fee Aspiration Contract ------------------- In December 2018, we entered into an agreement with a non-profit named Aspiration (https://aspirationtech.org/) to fund contractors for development work. They handle payroll, taxes, and oversight, in exchange for an 8% management fee. The first phase of work will extend through most of 2019. * Recipient: Aspiration * Address: 1gDXYQNH4kCJ8Dk7kgiztfjNUaA1KJcHv These txids record the transfers from the primary 1Pxi address to the Aspiration-specific 1gDXY subaddress. In some cases, leftover funds were swept back into the main 1Pxi address after the transfers were complete. First phase, transfers performed 28-Dec-2018 - 31-Dec-2018, total 89 BTC, about $350K. * 95c68d488bd92e8c164195370aaa516dff05aa4d8c543d3fb8cfafae2b811e7a 1.0 BTC plus 0.00002705 tx-fee * c0a5b8e3a63c56c4365d4c3ded0821bc1170f6351502849168bc34e30a0582d7 89.0 BTC plus 0.00000633 tx-fee * 421cff5f398509aaf48951520738e0e63dfddf1157920c15bdc72c34e24cf1cf return 0.00005245 BTC to 1Pxi, less 0.00000211 tx-fee In November 2020, we funded a second phase of the work: 51.38094 BTC, about $800K. * 7558cbf3b24e8d835809d2d6f01a8ba229190102efdf36280d0639abaa488721 1.0 BTC plus 0.00230766 tx-fee * 9c78ae6bb7db62cbd6be82fd52d50a2f015285b562f05de0ebfb0e5afc6fd285 56.0 BTC plus 0.00057400 tx-fee * fbee4332e8c7ffbc9c1bcaee773f063550e589e58d350d14f6daaa473966c368 returning 5.61906 BTC to 1Pxi, less 0.00012000 tx-fee Open Collective --------------- In August 2023, we started working with Open Collective to fund a grant covering development work performed over the last year, and onwards into 2024. * Recipient: Open Collective (US) * Transfer Address: 1KZYr8UU2XjuEdSPzn2pF8eRPZZvffByDf The first phase transferred 7.5 BTC (about $250K). * 25-26-oct-2023: ~7.5 BTC ($250k) * transfer address: 1KZYr8UU2XjuEdSPzn2pF8eRPZZvffByDf * xfer 0.1 BTC: txid 9bfe10e3f240724d0d15bcd6405f4e568b5f1fb1dc2069d0ecf20df22d6ee502 * xfer 7.39994304 BTC: txid 882dca0e1acc2e203b2ecfbb20d70dc2018840bed7f4ad4e1b8c629d2b3f1136 * payment address: 3LVNG26VxfE6RXJJjKCVNHnAGMtyDrx9WU * send 0.01 BTC txid 24ca9a87e8022802ccae2db1310636973d2caf0e3f46892490cb896d03f2e795 * send 7.48969257 BTC txid a83ff318a1d56b8f95c10d1740fbd1fd1065958d4e1c83ef39a8ec9e50f08ddf * 06-jan-2024: 5.0 BTC ($224k) * transfer address: 1KZYr8UU2XjuEdSPzn2pF8eRPZZvffByDf * xfer 5.0 BTC: txid 6f0af3fe6eeaf51d9054a7f666c90898aaa7b203deb2cbf89164fca0517953c0 * payment address: 3LVNG26VxfE6RXJJjKCVNHnAGMtyDrx9WU * send 0.01 BTC: txid 20e4afdf6eec1dad8968164eed187de1e840a5064c09f03bbded48fee24deb71 * send 4.989379 BTC: txid 0f210c9ae279d912482cc3cbcd40df53fd4fe7644ba8d25fbb3e42de5140ad15 * 25-apr-2024: 5.0 BTC ($316k) (current price: $64,385.24) * transfer address: 1KZYr8UU2XjuEdSPzn2pF8eRPZZvffByDf * xfer 5.0 BTC: txid 01a4c2cb18b95025ac8074aa1ccd46ec1f3783d5d9b15ef5bb0d57a59fe09e5b , block 840,915 * payment address: 3LVNG26VxfE6RXJJjKCVNHnAGMtyDrx9WU * send 4.9999487 BTC: txid a840f2c14a9acc2d2d3ecd35dff69a0d1904151825262a52dd397a22487e9ec8 * 21-aug-2024: 6.0 BTC ($350k) (current price: $59,308) * transfer address: 1KZYr8UU2XjuEdSPzn2pF8eRPZZvffByDf * xfer 6.0 BTC: txid 766fa17b43ab0d2a0c3d2839a59e9887abf3026d44ccecee42a348fb2cc05474, block 857,777 * payment address: 3LVNG26VxfE6RXJJjKCVNHnAGMtyDrx9WU * send 5.999943 BTC: txid b44c8d4dbbfcef6eee2681700ccb8e3c6d7d56b3796ce8813848d2c91022d7a4, block 857,788 tahoe_lafs-1.20.0/docs/filesystem-notes.rst0000644000000000000000000000214313615410400015613 0ustar00.. -*- coding: utf-8-with-signature -*- ========================= Filesystem-specific notes ========================= 1. ext3_ Tahoe storage servers use a large number of subdirectories to store their shares on local disk. This format is simple and robust, but depends upon the local filesystem to provide fast access to those directories. ext3 ==== For moderate- or large-sized storage servers, you'll want to make sure the "directory index" feature is enabled on your ext3 directories, otherwise share lookup may be very slow. Recent versions of ext3 enable this automatically, but older filesystems may not have it enabled:: $ sudo tune2fs -l /dev/sda1 |grep feature Filesystem features: has_journal ext_attr resize_inode dir_index filetype needs_recovery sparse_super large_file If "dir_index" is present in the "features:" line, then you're all set. If not, you'll need to use tune2fs and e2fsck to enable and build the index. See `http://wiki2.dovecot.org/MailboxFormat/Maildir`_ for some hints. .. _`http://wiki2.dovecot.org/MailboxFormat/Maildir`: http://wiki2.dovecot.org/MailboxFormat/Maildir tahoe_lafs-1.20.0/docs/garbage-collection.rst0000644000000000000000000003402713615410400016030 0ustar00.. -*- coding: utf-8-with-signature -*- =========================== Garbage Collection in Tahoe =========================== 1. `Overview`_ 2. `Client-side Renewal`_ 3. `Server Side Expiration`_ 4. `Expiration Progress`_ 5. `Future Directions`_ Overview ======== When a file or directory in a Tahoe-LAFS file store is no longer referenced, the space that its shares occupied on each storage server can be freed, making room for other shares. Tahoe currently uses a garbage collection ("GC") mechanism to implement this space-reclamation process. Each share has one or more "leases", which are managed by clients who want the file/directory to be retained. The storage server accepts each share for a pre-defined period of time, and is allowed to delete the share if all of the leases expire. Garbage collection is not enabled by default: storage servers will not delete shares without being explicitly configured to do so. When GC is enabled, clients are responsible for renewing their leases on a periodic basis at least frequently enough to prevent any of the leases from expiring before the next renewal pass. There are several tradeoffs to be considered when choosing the renewal timer and the lease duration, and there is no single optimal pair of values. See the following diagram to get an idea of the tradeoffs involved: .. image:: lease-tradeoffs.svg If lease renewal occurs quickly and with 100% reliability, than any renewal time that is shorter than the lease duration will suffice, but a larger ratio of duration-over-renewal-time will be more robust in the face of occasional delays or failures. The current recommended values for a small Tahoe grid are to renew the leases once a week, and give each lease a duration of 31 days. In the current release, there is not yet a way to create a lease with a different duration, but the server can use the ``expire.override_lease_duration`` configuration setting to increase or decrease the effective duration (when the lease is processed) to something other than 31 days. Renewing leases can be expected to take about one second per file/directory, depending upon the number of servers and the network speeds involved. Client-side Renewal =================== If all of the files and directories which you care about are reachable from a single starting point (usually referred to as a "rootcap"), and you store that rootcap as an alias (via "``tahoe create-alias``" for example), then the simplest way to renew these leases is with the following CLI command:: tahoe deep-check --add-lease ALIAS: This will recursively walk every directory under the given alias and renew the leases on all files and directories. (You may want to add a ``--repair`` flag to perform repair at the same time.) Simply run this command once a week (or whatever other renewal period your grid recommends) and make sure it completes successfully. As a side effect, a manifest of all unique files and directories will be emitted to stdout, as well as a summary of file sizes and counts. It may be useful to track these statistics over time. Note that newly uploaded files (and newly created directories) get an initial lease too: the ``--add-lease`` process is only needed to ensure that all older objects have up-to-date leases on them. A separate "rebalancing manager/service" is also planned -- see ticket `#543`_. The exact details of what this service will do are not settled, but it is likely to work by acquiring manifests from rootcaps on a periodic basis, keeping track of checker results, managing lease-addition, and prioritizing repair and rebalancing of shares. Eventually it may use multiple worker nodes to perform these jobs in parallel. .. _#543: http://tahoe-lafs.org/trac/tahoe-lafs/ticket/543 Server Side Expiration ====================== Expiration must be explicitly enabled on each storage server, since the default behavior is to never expire shares. Expiration is enabled by adding config keys to the ``[storage]`` section of the ``tahoe.cfg`` file (as described below) and restarting the server node. Each lease has two parameters: a create/renew timestamp and a duration. The timestamp is updated when the share is first uploaded (i.e. the file or directory is created), and updated again each time the lease is renewed (i.e. "``tahoe check --add-lease``" is performed). The duration is currently fixed at 31 days, and the "nominal lease expiration time" is simply $duration seconds after the $create_renew timestamp. (In a future release of Tahoe, the client will get to request a specific duration, and the server will accept or reject the request depending upon its local configuration, so that servers can achieve better control over their storage obligations.) The lease-expiration code has two modes of operation. The first is age-based: leases are expired when their age is greater than their duration. This is the preferred mode: as long as clients consistently update their leases on a periodic basis, and that period is shorter than the lease duration, then all active files and directories will be preserved, and the garbage will collected in a timely fashion. Since there is not yet a way for clients to request a lease duration of other than 31 days, there is a ``tahoe.cfg`` setting to override the duration of all leases. If, for example, this alternative duration is set to 60 days, then clients could safely renew their leases with an add-lease operation perhaps once every 50 days: even though nominally their leases would expire 31 days after the renewal, the server would not actually expire the leases until 60 days after renewal. The other mode is an absolute-date-cutoff: it compares the create/renew timestamp against some absolute date, and expires any lease which was not created or renewed since the cutoff date. If all clients have performed an add-lease some time after March 20th, you could tell the storage server to expire all leases that were created or last renewed on March 19th or earlier. This is most useful if you have a manual (non-periodic) add-lease process. Note that there is not much point to running a storage server in this mode for a long period of time: once the lease-checker has examined all shares and expired whatever it is going to expire, the second and subsequent passes are not going to find any new leases to remove. The ``tahoe.cfg`` file uses the following keys to control lease expiration: ``[storage]`` ``expire.enabled = (boolean, optional)`` If this is ``True``, the storage server will delete shares on which all leases have expired. Other controls dictate when leases are considered to have expired. The default is ``False``. ``expire.mode = (string, "age" or "cutoff-date", required if expiration enabled)`` If this string is "age", the age-based expiration scheme is used, and the ``expire.override_lease_duration`` setting can be provided to influence the lease ages. If it is "cutoff-date", the absolute-date-cutoff mode is used, and the ``expire.cutoff_date`` setting must be provided to specify the cutoff date. The mode setting currently has no default: you must provide a value. In a future release, this setting is likely to default to "age", but in this release it was deemed safer to require an explicit mode specification. ``expire.override_lease_duration = (duration string, optional)`` When age-based expiration is in use, a lease will be expired if its ``lease.create_renew`` timestamp plus its ``lease.duration`` time is earlier/older than the current time. This key, if present, overrides the duration value for all leases, changing the algorithm from:: if (lease.create_renew_timestamp + lease.duration) < now: expire_lease() to:: if (lease.create_renew_timestamp + override_lease_duration) < now: expire_lease() The value of this setting is a "duration string", which is a number of days, months, or years, followed by a units suffix, and optionally separated by a space, such as one of the following:: 7days 31day 60 days 2mo 3 month 12 months 2years This key is meant to compensate for the fact that clients do not yet have the ability to ask for leases that last longer than 31 days. A grid which wants to use faster or slower GC than a 31-day lease timer permits can use this parameter to implement it. This key is only valid when age-based expiration is in use (i.e. when ``expire.mode = age`` is used). It will be rejected if cutoff-date expiration is in use. ``expire.cutoff_date = (date string, required if mode=cutoff-date)`` When cutoff-date expiration is in use, a lease will be expired if its create/renew timestamp is older than the cutoff date. This string will be a date in the following format:: 2009-01-16 (January 16th, 2009) 2008-02-02 2007-12-25 The actual cutoff time shall be midnight UTC at the beginning of the given day. Lease timers should naturally be generous enough to not depend upon differences in timezone: there should be at least a few days between the last renewal time and the cutoff date. This key is only valid when cutoff-based expiration is in use (i.e. when "expire.mode = cutoff-date"). It will be rejected if age-based expiration is in use. expire.immutable = (boolean, optional) If this is False, then immutable shares will never be deleted, even if their leases have expired. This can be used in special situations to perform GC on mutable files but not immutable ones. The default is True. expire.mutable = (boolean, optional) If this is False, then mutable shares will never be deleted, even if their leases have expired. This can be used in special situations to perform GC on immutable files but not mutable ones. The default is True. Expiration Progress =================== In the current release, leases are stored as metadata in each share file, and no separate database is maintained. As a result, checking and expiring leases on a large server may require multiple reads from each of several million share files. This process can take a long time and be very disk-intensive, so a "share crawler" is used. The crawler limits the amount of time looking at shares to a reasonable percentage of the storage server's overall usage: by default it uses no more than 10% CPU, and yields to other code after 100ms. A typical server with 1.1M shares was observed to take 3.5 days to perform this rate-limited crawl through the whole set of shares, with expiration disabled. It is expected to take perhaps 4 or 5 days to do the crawl with expiration turned on. The crawler's status is displayed on the "Storage Server Status Page", a web page dedicated to the storage server. This page resides at $NODEURL/storage, and there is a link to it from the front "welcome" page. The "Lease Expiration crawler" section of the status page shows the progress of the current crawler cycle, expected completion time, amount of space recovered, and details of how many shares have been examined. The crawler's state is persistent: restarting the node will not cause it to lose significant progress. The state file is located in two files ($BASEDIR/storage/lease_checker.state and lease_checker.history), and the crawler can be forcibly reset by stopping the node, deleting these two files, then restarting the node. Future Directions ================= Tahoe's GC mechanism is undergoing significant changes. The global mark-and-sweep garbage-collection scheme can require considerable network traffic for large grids, interfering with the bandwidth available for regular uploads and downloads (and for non-Tahoe users of the network). A preferable method might be to have a timer-per-client instead of a timer-per-lease: the leases would not be expired until/unless the client had not checked in with the server for a pre-determined duration. This would reduce the network traffic considerably (one message per week instead of thousands), but retain the same general failure characteristics. In addition, using timers is not fail-safe (from the client's point of view), in that a client which leaves the network for an extended period of time may return to discover that all of their files have been garbage-collected. (It *is* fail-safe from the server's point of view, in that a server is not obligated to provide disk space in perpetuity to an unresponsive client). It may be useful to create a "renewal agent" to which a client can pass a list of renewal-caps: the agent then takes the responsibility for keeping these leases renewed, so the client can go offline safely. Of course, this requires a certain amount of coordination: the renewal agent should not be keeping files alive that the client has actually deleted. The client can send the renewal-agent a manifest of renewal caps, and each new manifest should replace the previous set. The GC mechanism is also not immediate: a client which deletes a file will nevertheless be consuming extra disk space (and might be charged or otherwise held accountable for it) until the ex-file's leases finally expire on their own. In the current release, these leases are each associated with a single "node secret" (stored in $BASEDIR/private/secret), which is used to generate renewal-secrets for each lease. Two nodes with different secrets will produce separate leases, and will not be able to renew each others' leases. Once the Accounting project is in place, leases will be scoped by a sub-delegatable "account id" instead of a node secret, so clients will be able to manage multiple leases per file. In addition, servers will be able to identify which shares are leased by which clients, so that clients can safely reconcile their idea of which files/directories are active against the server's list, and explicitly cancel leases on objects that aren't on the active list. By reducing the size of the "lease scope", the coordination problem is made easier. In general, mark-and-sweep is easier to implement (it requires mere vigilance, rather than coordination), so unless the space used by deleted files is not expiring fast enough, the renew/expire timed lease approach is recommended. tahoe_lafs-1.20.0/docs/glossary.rst0000644000000000000000000000434613615410400014153 0ustar00.. -*- coding: utf-8 -*- ============================ Glossary of Tahoe-LAFS Terms ============================ .. glossary:: `Foolscap `_ an RPC/RMI (Remote Procedure Call / Remote Method Invocation) protocol for use with Twisted storage server a Tahoe-LAFS process configured to offer storage and reachable over the network for store and retrieve operations storage service a Python object held in memory in the storage server which provides the implementation of the storage protocol introducer a Tahoe-LAFS process at a known location configured to re-publish announcements about the location of storage servers :ref:`fURLs ` a self-authenticating URL-like string which can be used to locate a remote object using the Foolscap protocol (the storage service is an example of such an object) :ref:`NURLs ` a self-authenticating URL-like string almost exactly like a fURL but without being tied to Foolscap swissnum a short random string which is part of a fURL/NURL and which acts as a shared secret to authorize clients to use a storage service lease state associated with a share informing a storage server of the duration of storage desired by a client share a single unit of client-provided arbitrary data to be stored by a storage server (in practice, one of the outputs of applying ZFEC encoding to some ciphertext with some additional metadata attached) bucket a group of one or more immutable shares held by a storage server and having a common storage index slot a group of one or more mutable shares held by a storage server and having a common storage index (sometimes "slot" is considered a synonym for "storage index of a slot") storage index a 16 byte string which can address a slot or a bucket (in practice, derived by hashing the encryption key associated with contents of that slot or bucket) write enabler a short secret string which storage servers require to be presented before allowing mutation of any mutable share lease renew secret a short secret string which storage servers required to be presented before allowing a particular lease to be renewed tahoe_lafs-1.20.0/docs/gpg-setup.rst0000644000000000000000000000174513615410400014223 0ustar00Preparing to Authenticate Release (Setting up GPG) -------------------------------------------------- In other to keep releases authentic it's required that releases are signed before being published. This ensure's that users of Tahoe are able to verify that the version of Tahoe they are using is coming from a trusted or at the very least known source. The authentication is done using the ``GPG`` implementation of ``OpenGPG`` to be able to complete the release steps you would have to download the ``GPG`` software and setup a key(identity). - `Download `__ and install GPG for your operating system. - Generate a key pair using ``gpg --gen-key``. *Some questions would be asked to personalize your key configuration.* You might take additional steps including: - Setting up a revocation certificate (Incase you lose your secret key) - Backing up your key pair - Upload your fingerprint to a keyserver such as `openpgp.org `__ tahoe_lafs-1.20.0/docs/helper.rst0000644000000000000000000002043213615410400013561 0ustar00.. -*- coding: utf-8-with-signature -*- ======================= The Tahoe Upload Helper ======================= 1. `Overview`_ 2. `Setting Up A Helper`_ 3. `Using a Helper`_ 4. `Other Helper Modes`_ Overview ======== As described in the "Swarming Download, Trickling Upload" section of :doc:`architecture`, Tahoe uploads require more bandwidth than downloads: you must push the redundant shares during upload, but you do not need to retrieve them during download. With the default 3-of-10 encoding parameters, this means that an upload will require about 3.3x the traffic as a download of the same file. Unfortunately, this "expansion penalty" occurs in the same upstream direction that most consumer DSL lines are slow anyways. Typical ADSL lines get 8 times as much download capacity as upload capacity. When the ADSL upstream penalty is combined with the expansion penalty, the result is uploads that can take up to 32 times longer than downloads. The "Helper" is a service that can mitigate the expansion penalty by arranging for the client node to send data to a central Helper node instead of sending it directly to the storage servers. It sends ciphertext to the Helper, so the security properties remain the same as with non-Helper uploads. The Helper is responsible for applying the erasure encoding algorithm and placing the resulting shares on the storage servers. Of course, the helper cannot mitigate the ADSL upstream penalty. The second benefit of using an upload helper is that clients who lose their network connections while uploading a file (because of a network flap, or because they shut down their laptop while an upload was in progress) can resume their upload rather than needing to start again from scratch. The helper holds the partially-uploaded ciphertext on disk, and when the client tries to upload the same file a second time, it discovers that the partial ciphertext is already present. The client then only needs to upload the remaining ciphertext. This reduces the "interrupted upload penalty" to a minimum. This also serves to reduce the number of active connections between the client and the outside world: most of their traffic flows over a single TCP connection to the helper. This can improve TCP fairness, and should allow other applications that are sharing the same uplink to compete more evenly for the limited bandwidth. Setting Up A Helper =================== Who should consider running a helper? * Benevolent entities which wish to provide better upload speed for clients that have slow uplinks * Folks which have machines with upload bandwidth to spare. * Server grid operators who want clients to connect to a small number of helpers rather than a large number of storage servers (a "multi-tier" architecture) What sorts of machines are good candidates for running a helper? * The Helper needs to have good bandwidth to the storage servers. In particular, it needs to have at least 3.3x better upload bandwidth than the client does, or the client might as well upload directly to the storage servers. In a commercial grid, the helper should be in the same colo (and preferably in the same rack) as the storage servers. * The Helper will take on most of the CPU load involved in uploading a file. So having a dedicated machine will give better results. * The Helper buffers ciphertext on disk, so the host will need at least as much free disk space as there will be simultaneous uploads. When an upload is interrupted, that space will be used for a longer period of time. To turn a Tahoe-LAFS node into a helper (i.e. to run a helper service in addition to whatever else that node is doing), edit the tahoe.cfg file in your node's base directory and set "enabled = true" in the section named "[helper]". Then restart the node. This will signal the node to create a Helper service and listen for incoming requests. Once the node has started, there will be a file named private/helper.furl which contains the contact information for the helper: you will need to give this FURL to any clients that wish to use your helper. :: cat $BASEDIR/private/helper.furl | mail -s "helper furl" friend@example.com You can tell if your node is running a helper by looking at its web status page. Assuming that you've set up the 'webport' to use port 3456, point your browser at ``http://localhost:3456/`` . The welcome page will say "Helper: 0 active uploads" or "Not running helper" as appropriate. The http://localhost:3456/helper_status page will also provide details on what the helper is currently doing. The helper will store the ciphertext that is is fetching from clients in $BASEDIR/helper/CHK_incoming/ . Once all the ciphertext has been fetched, it will be moved to $BASEDIR/helper/CHK_encoding/ and erasure-coding will commence. Once the file is fully encoded and the shares are pushed to the storage servers, the ciphertext file will be deleted. If a client disconnects while the ciphertext is being fetched, the partial ciphertext will remain in CHK_incoming/ until they reconnect and finish sending it. If a client disconnects while the ciphertext is being encoded, the data will remain in CHK_encoding/ until they reconnect and encoding is finished. For long-running and busy helpers, it may be a good idea to delete files in these directories that have not been modified for a week or two. Future versions of tahoe will try to self-manage these files a bit better. Using a Helper ============== Who should consider using a Helper? * clients with limited upstream bandwidth, such as a consumer ADSL line * clients who believe that the helper will give them faster uploads than they could achieve with a direct upload * clients who experience problems with TCP connection fairness: if other programs or machines in the same home are getting less than their fair share of upload bandwidth. If the connection is being shared fairly, then a Tahoe upload that is happening at the same time as a single SFTP upload should get half the bandwidth. * clients who have been given the helper.furl by someone who is running a Helper and is willing to let them use it To take advantage of somebody else's Helper, take the helper furl that they give you, and edit your tahoe.cfg file. Enter the helper's furl into the value of the key "helper.furl" in the "[client]" section of tahoe.cfg, as described in the "Client Configuration" section of :doc:`configuration`. Then restart the node. This will signal the client to try and connect to the helper. Subsequent uploads will use the helper rather than using direct connections to the storage server. If the node has been configured to use a helper, that node's HTTP welcome page (``http://localhost:3456/``) will say "Helper: $HELPERFURL" instead of "Helper: None". If the helper is actually running and reachable, the bullet to the left of "Helper" will be green. The helper is optional. If a helper is connected when an upload begins, the upload will use the helper. If there is no helper connection present when an upload begins, that upload will connect directly to the storage servers. The client will automatically attempt to reconnect to the helper if the connection is lost, using the same exponential-backoff algorithm as all other tahoe/foolscap connections. The upload/download status page (``http://localhost:3456/status``) will announce the using-helper-or-not state of each upload, in the "Helper?" column. Other Helper Modes ================== The Tahoe Helper only currently helps with one kind of operation: uploading immutable files. There are three other things it might be able to help with in the future: * downloading immutable files * uploading mutable files (such as directories) * downloading mutable files (like directories) Since mutable files are currently limited in size, the ADSL upstream penalty is not so severe for them. There is no ADSL penalty to downloads, but there may still be benefit to extending the helper interface to assist with them: fewer connections to the storage servers, and better TCP fairness. A future version of the Tahoe helper might provide assistance with these other modes. If it were to help with all four modes, then the clients would not need direct connections to the storage servers at all: clients would connect to helpers, and helpers would connect to servers. For a large grid with tens of thousands of clients, this might make the grid more scalable. tahoe_lafs-1.20.0/docs/index.rst0000644000000000000000000000337313615410400013416 0ustar00 Tahoe-LAFS ========== .. Please view a nicely formatted version of this documentation at http://tahoe-lafs.readthedocs.io/en/latest/ Please see the notes under "Organizing Tahoe-LAFS documentation" in docs/README.txt if you are editing this file. Tahoe-LAFS is a Free and Open decentralized storage system. It distributes your data across multiple servers. Even if some of the servers fail or are taken over by an attacker, the entire file store continues to function correctly, preserving your privacy and security. .. toctree:: :maxdepth: 1 :caption: Getting Started with Tahoe-LAFS about-tahoe Installation/install-tahoe running configuration servers frontends/CLI frontends/FTP-and-SFTP frontends/download-status magic-wormhole-invites anonymity-configuration known_issues glossary .. toctree:: :maxdepth: 1 :caption: Tahoe-LAFS in Depth architecture gpg-setup servers managed-grid helper convergence-secret garbage-collection filesystem-notes key-value-store frontends/webapi write_coordination cautions backupdb nodekeys performance logging stats .. toctree:: :maxdepth: 1 :caption: Specifications specifications/index proposed/index .. toctree:: :maxdepth: 1 :caption: Contributing to Tahoe-LAFS contributing CODE_OF_CONDUCT build/build-on-windows build/build-on-linux build/build-on-desert-island developer-guide ticket-triage release-checklist .. toctree:: :maxdepth: 1 :caption: Notes of Community Interest backdoors donations accepting-donations expenses .. toctree:: :maxdepth: 1 :caption: Notes of Historical Interest historical/configuration debian build/build-pyOpenSSL tahoe_lafs-1.20.0/docs/key-value-store.rst0000644000000000000000000001464413615410400015346 0ustar00.. -*- coding: utf-8-with-signature-unix; fill-column: 77 -*- ******************************** Using Tahoe as a key-value store ******************************** There are several ways you could use Tahoe-LAFS as a key-value store. Looking only at things that are *already implemented*, there are three options: 1. Immutable files API: * key ← put(value) This is spelled "`PUT /uri`_" in the API. Note: the user (client code) of this API does not get to choose the key! The key is determined programmatically using secure hash functions and encryption of the value and of the optional "added convergence secret". * value ← get(key) This is spelled "`GET /uri/$FILECAP`_" in the API. "$FILECAP" is the key. For details, see "immutable files" in :doc:`performance`, but in summary: the performance is not great but not bad. That document doesn't mention that if the size of the A-byte mutable file is less than or equal to `55 bytes`_ then the performance cost is much smaller, because the value gets packed into the key. Added a ticket: `#2226`_. 2. Mutable files API: * key ← create() This is spelled "`PUT /uri?format=mdmf`_". Note: again, the key cannot be chosen by the user! The key is determined programmatically using secure hash functions and RSA public key pair generation. * set(key, value) * value ← get(key) This is spelled "`GET /uri/$FILECAP`_". Again, the "$FILECAP" is the key. This is the same API as for getting the value from an immutable, above. Whether the value you get this way is immutable (i.e. it will always be the same value) or mutable (i.e. an authorized person can change what value you get when you read) depends on the type of the key. Again, for details, see "mutable files" in :doc:`performance` (and `these tickets`_ about how that doc is incomplete), but in summary, the performance of the create() operation is *terrible*! (It involves generating a 2048-bit RSA key pair.) The performance of the set and get operations are probably merely not great but not bad. 3. Directories API: * directory ← create() This is spelled "`POST /uri?t=mkdir`_". :doc:`performance` does not mention directories (`#2228`_), but in order to understand the performance of directories you have to understand how they are implemented. Mkdir creates a new mutable file, exactly the same, and with exactly the same performance, as the "create() mutable" above. * set(directory, key, value) This is spelled "`PUT /uri/$DIRCAP/[SUBDIRS../]FILENAME`_". "$DIRCAP" is the directory, "FILENAME" is the key. The value is the body of the HTTP PUT request. The part about "[SUBDIRS../]" in there is for optional nesting which you can ignore for the purposes of this key-value store. This way, you *do* get to choose the key to be whatever you want (an arbitrary unicode string). To understand the performance of ``PUT /uri/$directory/$key``, understand that this proceeds in two steps: first it uploads the value as an immutable file, exactly the same as the "put(value)" API from the immutable API above. So right there you've already paid exactly the same cost as if you had used that API. Then after it has finished uploading that, and it has the immutable file cap from that operation in hand, it downloads the entire current directory, changes it to include the mapping from key to the immutable file cap, and re-uploads the entire directory. So that has a cost which is easy to understand: you have to download and re-upload the entire directory, which is the entire set of mappings from user-chosen keys (Unicode strings) to immutable file caps. Each entry in the directory occupies something on the order of 300 bytes. So the "set()" call from this directory-based API has obviously much worse performance than the the equivalent "set()" calls from the immutable-file-based API or the mutable-file-based API. This is not necessarily worse overall than the performance of the mutable-file-based API if you take into account the cost of the necessary create() calls. * value ← get(directory, key) This is spelled "`GET /uri/$DIRCAP/[SUBDIRS../]FILENAME`_". As above, "$DIRCAP" is the directory, "FILENAME" is the key. The performance of this is determined by the fact that it first downloads the entire directory, then finds the immutable filecap for the given key, then does a GET on that immutable filecap. So again, it is strictly worse than using the immutable file API (about twice as bad, if the directory size is similar to the value size). What about ways to use LAFS as a key-value store that are not yet implemented? Well, Zooko has lots of ideas about ways to extend Tahoe-LAFS to support different kinds of storage APIs or better performance. One that he thinks is pretty promising is just the Keep It Simple, Stupid idea of "store a sqlite db in a Tahoe-LAFS mutable". ☺ .. _PUT /uri: https://tahoe-lafs.org/trac/tahoe-lafs/browser/trunk/docs/frontends/webapi.rst#writing-uploading-a-file .. _GET /uri/$FILECAP: https://tahoe-lafs.org/trac/tahoe-lafs/browser/trunk/docs/frontends/webapi.rst#viewing-downloading-a-file .. _55 bytes: https://tahoe-lafs.org/trac/tahoe-lafs/browser/trunk/src/allmydata/immutable/upload.py?rev=196bd583b6c4959c60d3f73cdcefc9edda6a38ae#L1504 .. _PUT /uri?format=mdmf: https://tahoe-lafs.org/trac/tahoe-lafs/browser/trunk/docs/frontends/webapi.rst#writing-uploading-a-file .. _#2226: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2226 .. _these tickets: https://tahoe-lafs.org/trac/tahoe-lafs/query?status=assigned&status=new&status=reopened&keywords=~doc&description=~performance.rst&col=id&col=summary&col=status&col=owner&col=type&col=priority&col=milestone&order=priority .. _POST /uri?t=mkdir: https://tahoe-lafs.org/trac/tahoe-lafs/browser/trunk/docs/frontends/webapi.rst#creating-a-new-directory .. _#2228: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2228 .. _PUT /uri/$DIRCAP/[SUBDIRS../]FILENAME: https://tahoe-lafs.org/trac/tahoe-lafs/browser/trunk/docs/frontends/webapi.rst#creating-a-new-directory .. _GET /uri/$DIRCAP/[SUBDIRS../]FILENAME: https://tahoe-lafs.org/trac/tahoe-lafs/browser/trunk/docs/frontends/webapi.rst#reading-a-file tahoe_lafs-1.20.0/docs/known_issues.rst0000644000000000000000000004453613615410400015044 0ustar00.. -*- coding: utf-8-with-signature -*- See also :doc:`cautions.rst`. ============ Known Issues ============ Below is a list of known issues in recent releases of Tahoe-LAFS, and how to manage them. The current version of this file can be found at https://github.com/tahoe-lafs/tahoe-lafs/blob/master/docs/known_issues.rst . If you've been using Tahoe-LAFS since v1.1 (released 2008-06-11) or if you're just curious about what sort of mistakes we've made in the past, then you might want to read the "historical known issues" document in ``docs/historical/historical_known_issues.txt``. Known Issues in Tahoe-LAFS v1.10.3, released 30-Mar-2016 ======================================================== * `Unauthorized access by JavaScript in unrelated files`_ * `Disclosure of file through embedded hyperlinks or JavaScript in that file`_ * `Command-line arguments are leaked to other local users`_ * `Capabilities may be leaked to web browser phishing filter / "safe browsing" servers`_ * `Known issues in the SFTP frontend`_ * `Traffic analysis based on sizes of files/directories, storage indices, and timing`_ * `Privacy leak via Google Chart API link in map-update timing web page`_ ---- Unauthorized access by JavaScript in unrelated files ---------------------------------------------------- If you view a file stored in Tahoe-LAFS through a web user interface, JavaScript embedded in that file can, in some circumstances, access other files or directories stored in Tahoe-LAFS that you view through the same web user interface. Such a script would be able to send the contents of those other files or directories to the author of the script, and if you have the ability to modify the contents of those files or directories, then that script could modify or delete those files or directories. This attack is known to be possible when an attacking tab or window could reach a tab or window containing a Tahoe URI by navigating back or forward in the history, either from itself or from any frame with a known name (as specified by the "target" attribute of an HTML link). It might be possible in other cases depending on the browser. *how to manage it* For future versions of Tahoe-LAFS, we are considering ways to close off this leakage of authority while preserving ease of use -- the discussion of this issue is ticket `#615`_. For the present, either do not view files stored in Tahoe-LAFS through a web user interface, or turn off JavaScript in your web browser before doing so, or limit your viewing to files which you know don't contain malicious JavaScript. .. _#615: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/615 ---- Disclosure of file through embedded hyperlinks or JavaScript in that file ------------------------------------------------------------------------- If there is a file stored on a Tahoe-LAFS storage grid, and that file gets downloaded and displayed in a web browser, then JavaScript or hyperlinks within that file can leak the capability to that file to a third party, which means that third party gets access to the file. If there is JavaScript in the file, then it could deliberately leak the capability to the file out to some remote listener. If there are hyperlinks in the file, and they get followed, then whichever server they point to receives the capability to the file. Note that IMG tags are typically followed automatically by web browsers, so being careful which hyperlinks you click on is not sufficient to prevent this from happening. *how to manage it* For future versions of Tahoe-LAFS, we are considering ways to close off this leakage of authority while preserving ease of use -- the discussion of this issue is ticket `#127`_. For the present, a good work-around is that if you want to store and view a file on Tahoe-LAFS and you want that file to remain private, then remove from that file any hyperlinks pointing to other people's servers and remove any JavaScript unless you are sure that the JavaScript is not written to maliciously leak access. .. _#127: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/127 ---- Command-line arguments are leaked to other local users ------------------------------------------------------ Remember that command-line arguments are visible to other users (through the 'ps' command, or the windows Process Explorer tool), so if you are using a Tahoe-LAFS node on a shared host, other users on that host will be able to see (and copy) any caps that you pass as command-line arguments. This includes directory caps that you set up with the "tahoe add-alias" command. *how to manage it* As of Tahoe-LAFS v1.3.0 there is a "tahoe create-alias" command that does the following technique for you. Bypass add-alias and edit the NODEDIR/private/aliases file directly, by adding a line like this: fun: URI:DIR2:ovjy4yhylqlfoqg2vcze36dhde:4d4f47qko2xm5g7osgo2yyidi5m4muyo2vjjy53q4vjju2u55mfa By entering the dircap through the editor, the command-line arguments are bypassed, and other users will not be able to see them. Once you've added the alias, if you use that alias instead of a cap itself on the command-line, then no secrets are passed through the command line. Then other processes on the system can still see your filenames and other arguments you type there, but not the caps that Tahoe-LAFS uses to permit access to your files and directories. ---- Capabilities may be leaked to web browser phishing filter / "safe browsing" servers ----------------------------------------------------------------------------------- Firefox, Internet Explorer, and Chrome include a "phishing filter" or "safe browing" component, which is turned on by default, and which sends any URLs that it deems suspicious to a central server. Microsoft gives `a brief description of their filter's operation`_. Firefox and Chrome both use Google's `"safe browsing API"`_ (`specification`_). This of course has implications for the privacy of general web browsing (especially in the cases of Firefox and Chrome, which send your main personally identifying Google cookie along with these requests without your explicit consent, as described in `Firefox bugzilla ticket #368255`_. The reason for documenting this issue here, though, is that when using the Tahoe-LAFS web user interface, it could also affect confidentiality and integrity by leaking capabilities to the filter server. Since IE's filter sends URLs by SSL/TLS, the exposure of caps is limited to the filter server operators (or anyone able to hack the filter server) rather than to network eavesdroppers. The "safe browsing API" protocol used by Firefox and Chrome, on the other hand, is *not* encrypted, although the URL components are normally hashed. Opera also has a similar facility that is disabled by default. A previous version of this file stated that Firefox had abandoned their phishing filter; this was incorrect. .. _a brief description of their filter's operation: https://blogs.msdn.com/ie/archive/2005/09/09/463204.aspx .. _"safe browsing API": https://code.google.com/apis/safebrowsing/ .. _specification: https://code.google.com/p/google-safe-browsing/wiki/Protocolv2Spec .. _Firefox bugzilla ticket #368255: https://bugzilla.mozilla.org/show_bug.cgi?id=368255 *how to manage it* If you use any phishing filter or "safe browsing" feature, consider either disabling it, or not using the WUI via that browser. Phishing filters have `very limited effectiveness`_ , and phishing or malware attackers have learnt how to bypass them. .. _very limited effectiveness: http://lorrie.cranor.org/pubs/ndss-phish-tools-final.pdf To disable the filter in IE7 or IE8: ++++++++++++++++++++++++++++++++++++ - Click Internet Options from the Tools menu. - Click the Advanced tab. - If an "Enable SmartScreen Filter" option is present, uncheck it. If a "Use Phishing Filter" or "Phishing Filter" option is present, set it to Disable. - Confirm (click OK or Yes) out of all dialogs. If you have a version of IE that splits the settings between security zones, do this for all zones. To disable the filter in Firefox: +++++++++++++++++++++++++++++++++ - Click Options from the Tools menu. - Click the Security tab. - Uncheck both the "Block reported attack sites" and "Block reported web forgeries" options. - Click OK. To disable the filter in Chrome: ++++++++++++++++++++++++++++++++ - Click Options from the Tools menu. - Click the "Under the Hood" tab and find the "Privacy" section. - Uncheck the "Enable phishing and malware protection" option. - Click Close. ---- Known issues in the SFTP frontend --------------------------------- These are documented in :doc:`frontends/FTP-and-SFTP` and on `the SftpFrontend page`_ on the wiki. .. _the SftpFrontend page: https://tahoe-lafs.org/trac/tahoe-lafs/wiki/SftpFrontend ---- Traffic analysis based on sizes of files/directories, storage indices, and timing --------------------------------------------------------------------------------- Files and directories stored by Tahoe-LAFS are encrypted, but the ciphertext reveals the exact size of the original file or directory representation. This information is available to passive eavesdroppers and to server operators. For example, a large data set with known file sizes could probably be identified with a high degree of confidence. Uploads and downloads of the same file or directory can be linked by server operators, even without making assumptions based on file size. Anyone who knows the introducer furl for a grid may be able to act as a server operator. This implies that if such an attacker knows which file/directory is being accessed in a particular request (by some other form of surveillance, say), then they can identify later or earlier accesses of the same file/directory. Observing requests during a directory traversal (such as a deep-check operation) could reveal information about the directory structure, i.e. which files and subdirectories are linked from a given directory. Attackers can combine the above information with inferences based on timing correlations. For instance, two files that are accessed close together in time are likely to be related even if they are not linked in the directory structure. Also, users that access the same files may be related to each other. ---- Privacy leak via Google Chart API link in map-update timing web page -------------------------------------------------------------------- The Tahoe web-based user interface includes a diagnostic page known as the "map-update timing page". It is reached through the "Recent and Active Operations" link on the front welcome page, then through the "Status" column for "map-update" operations (which occur when mutable files, including directories, are read or written). This page contains per-server response times, as lines of text, and includes an image which displays the response times in graphical form. The image is generated by constructing a URL for the `Google Chart API`_, which is then served by the `chart.apis.google.com` internet server. .. _Google Chart API: https://developers.google.com/chart/image/ When you view this page, several parties may learn information about your Tahoe activities. The request will typically include a "Referer" header, revealing the URL of the mapupdate status page (which is typically something like "http://127.0.0.1:3456/status/mapupdate-123") to network observers and the Google API server. The image returned by this server is typically a PNG file, but either the server or a MitM attacker could replace it with something malicious that attempts to exploit a browser rendering bug or buffer overflow. (Note that browsers do not execute scripts inside IMG tags, even for SVG images). In addition, if your Tahoe node connects to its grid over Tor or i2p, but the web browser you use to access your node does not, then this image link may reveal your use of Tahoe (and that grid) to the outside world. It is not recommended to use a browser in this way, because other links in Tahoe-stored content would reveal even more information (e.g. an attacker could store an HTML file with unique CSS references into a shared Tahoe grid, then send your pseudonym a message with its URI, then observe your browser loading that CSS file, and thus link the source IP address of your web client to that pseudonym). A future version of Tahoe will probably replace the Google Chart API link (which was deprecated by Google in April 2012) with client-side javascript using d3.js, removing the information leak but requiring JS to see the chart. See ticket `#1942`_ for details. .. _#1942: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1942 ---- Known Issues in Tahoe-LAFS v1.9.0, released 31-Oct-2011 ======================================================= Integrity Failure during Mutable Downloads ------------------------------------------ Under certain circumstances, the integrity-verification code of the mutable downloader could be bypassed. Clients who receive carefully crafted shares (from attackers) will emit incorrect file contents, and the usual share-corruption errors would not be raised. This only affects mutable files (not immutable), and only affects downloads that use doctored shares. It is not persistent: the threat is resolved once you upgrade your client to a version without the bug. However, read-modify-write operations (such as directory manipulations) performed by vulnerable clients could cause the attacker's modifications to be written back out to the mutable file, making the corruption permanent. The attacker's ability to manipulate the file contents is limited. They can modify FEC-encoded ciphertext in all but one share. This gives them the ability to blindly flip bits in roughly 2/3rds of the file (for the default k=3 encoding parameter). Confidentiality remains intact, unless the attacker can deduce the file's contents by observing your reactions to corrupted downloads. This bug was introduced in 1.9.0, as part of the MDMF-capable downloader, and affects both SDMF and MDMF files. It was not present in 1.8.3. *how to manage it* There are three options: * Upgrade to 1.9.1, which fixes the bug * Downgrade to 1.8.3, which does not contain the bug * If using 1.9.0, do not trust the contents of mutable files (whether SDMF or MDMF) that the 1.9.0 client emits, and do not modify directories (which could write the corrupted data back into place, making the damage persistent) ---- Known Issues in Tahoe-LAFS v1.8.2, released 30-Jan-2011 ======================================================= Unauthorized deletion of an immutable file by its storage index --------------------------------------------------------------- Due to a flaw in the Tahoe-LAFS storage server software in v1.3.0 through v1.8.2, a person who knows the "storage index" that identifies an immutable file can cause the server to delete its shares of that file. If an attacker can cause enough shares to be deleted from enough storage servers, this deletes the file. This vulnerability does not enable anyone to read file contents without authorization (confidentiality), nor to change the contents of a file (integrity). A person could learn the storage index of a file in several ways: 1. By being granted the authority to read the immutable file: i.e. by being granted a read capability to the file. They can determine the file's storage index from its read capability. 2. By being granted a verify capability to the file. They can determine the file's storage index from its verify capability. This case probably doesn't happen often because users typically don't share verify caps. 3. By operating a storage server, and receiving a request from a client that has a read cap or a verify cap. If the client attempts to upload, download, or verify the file with their storage server, even if it doesn't actually have the file, then they can learn the storage index of the file. 4. By gaining read access to an existing storage server's local filesystem, and inspecting the directory structure that it stores its shares in. They can thus learn the storage indexes of all files that the server is holding at least one share of. Normally only the operator of an existing storage server would be able to inspect its local filesystem, so this requires either being such an operator of an existing storage server, or somehow gaining the ability to inspect the local filesystem of an existing storage server. *how to manage it* Tahoe-LAFS version v1.8.3 or newer (except v1.9a1) no longer has this flaw; if you upgrade a storage server to a fixed release then that server is no longer vulnerable to this problem. Note that the issue is local to each storage server independently of other storage servers: when you upgrade a storage server then that particular storage server can no longer be tricked into deleting its shares of the target file. If you can't immediately upgrade your storage server to a version of Tahoe-LAFS that eliminates this vulnerability, then you could temporarily shut down your storage server. This would of course negatively impact availability -- clients would not be able to upload or download shares to that particular storage server while it was shut down -- but it would protect the shares already stored on that server from being deleted as long as the server is shut down. If the servers that store shares of your file are running a version of Tahoe-LAFS with this vulnerability, then you should think about whether someone can learn the storage indexes of your files by one of the methods described above. A person can not exploit this vulnerability unless they have received a read cap or verify cap, or they control a storage server that has been queried about this file by a client that has a read cap or a verify cap. Tahoe-LAFS does not currently have a mechanism to limit which storage servers can connect to your grid, but it does have a way to see which storage servers have been connected to the grid. The Introducer's front page in the Web User Interface has a list of all storage servers that the Introducer has ever seen and the first time and the most recent time that it saw them. Each Tahoe-LAFS gateway maintains a similar list on its front page in its Web User Interface, showing all of the storage servers that it learned about from the Introducer, when it first connected to that storage server, and when it most recently connected to that storage server. These lists are stored in memory and are reset to empty when the process is restarted. See ticket `#1528`_ for technical details. .. _#1528: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1528 tahoe_lafs-1.20.0/docs/lease-tradeoffs.svg0000644000000000000000000004453013615410400015342 0ustar00 image/svg+xml leaserenewaltime leaseexpirationtime daily day week month year weekly monthly yearly less traffic more traffic lessgarbage moregarbage less safe more safe tahoe_lafs-1.20.0/docs/logging.rst0000644000000000000000000003160313615410400013732 0ustar00.. -*- coding: utf-8-with-signature -*- ============= Tahoe Logging ============= 1. `Overview`_ 2. `Realtime Logging`_ 3. `Incidents`_ 4. `Working with flogfiles`_ 5. `Gatherers`_ 1. `Incident Gatherer`_ 2. `Log Gatherer`_ 6. `Adding log messages`_ 7. `Log Messages During Unit Tests`_ Overview ======== Tahoe uses the Foolscap logging mechanism (known as the "flog" subsystem) to record information about what is happening inside the Tahoe node. This is primarily for use by programmers and grid operators who want to find out what went wrong. The Foolscap logging system is documented at ``__. The Foolscap distribution includes a utility named "``flogtool``" that is used to get access to many Foolscap logging features. ``flogtool`` should get installed into the same virtualenv as the ``tahoe`` command. Realtime Logging ================ When you are working on Tahoe code, and want to see what the node is doing, the easiest tool to use is "``flogtool tail``". This connects to the Tahoe node and subscribes to hear about all log events. These events are then displayed to stdout, and optionally saved to a file. "``flogtool tail``" connects to the "logport", for which the FURL is stored in ``BASEDIR/private/logport.furl`` . The following command will connect to this port and start emitting log information:: flogtool tail BASEDIR/private/logport.furl The ``--save-to FILENAME`` option will save all received events to a file, where then can be examined later with "``flogtool dump``" or "``flogtool web-viewer``". The ``--catch-up`` option will ask the node to dump all stored events before subscribing to new ones (without ``--catch-up``, you will only hear about events that occur after the tool has connected and subscribed). Incidents ========= Foolscap keeps a short list of recent events in memory. When something goes wrong, it writes all the history it has (and everything that gets logged in the next few seconds) into a file called an "incident". These files go into ``BASEDIR/logs/incidents/`` , in a file named "``incident-TIMESTAMP-UNIQUE.flog.bz2``". The default definition of "something goes wrong" is the generation of a log event at the ``log.WEIRD`` level or higher, but other criteria could be implemented. The typical "incident report" we've seen in a large Tahoe grid is about 40kB compressed, representing about 1800 recent events. These "flogfiles" have a similar format to the files saved by "``flogtool tail --save-to``". They are simply lists of log events, with a small header to indicate which event triggered the incident. The "``flogtool dump FLOGFILE``" command will take one of these ``.flog.bz2`` files and print their contents to stdout, one line per event. The raw event dictionaries can be dumped by using "``flogtool dump --verbose FLOGFILE``". The "``flogtool web-viewer``" command can be used to examine the flogfile in a web browser. It runs a small HTTP server and emits the URL on stdout. This view provides more structure than the output of "``flogtool dump``": the parent/child relationships of log events is displayed in a nested format. "``flogtool web-viewer``" is still fairly immature. Working with flogfiles ====================== The "``flogtool filter``" command can be used to take a large flogfile (perhaps one created by the log-gatherer, see below) and copy a subset of events into a second file. This smaller flogfile may be easier to work with than the original. The arguments to "``flogtool filter``" specify filtering criteria: a predicate that each event must match to be copied into the target file. ``--before`` and ``--after`` are used to exclude events outside a given window of time. ``--above`` will retain events above a certain severity level. ``--from`` retains events send by a specific tubid. ``--strip-facility`` removes events that were emitted with a given facility (like ``foolscap.negotiation`` or ``tahoe.upload``). Gatherers ========= In a deployed Tahoe grid, it is useful to get log information automatically transferred to a central log-gatherer host. This offloads the (admittedly modest) storage requirements to a different host and provides access to logfiles from multiple nodes (web-API, storage, or helper) in a single place. There are two kinds of gatherers: "log gatherer" and "stats gatherer". Each produces a FURL which needs to be placed in the ``NODEDIR/tahoe.cfg`` file of each node that is to publish to the gatherer, under the keys "log_gatherer.furl" and "stats_gatherer.furl" respectively. When the Tahoe node starts, it will connect to the configured gatherers and offer its logport: the gatherer will then use the logport to subscribe to hear about events. The gatherer will write to files in its working directory, which can then be examined with tools like "``flogtool dump``" as described above. Incident Gatherer ----------------- The "incident gatherer" only collects Incidents: records of the log events that occurred just before and slightly after some high-level "trigger event" was recorded. Each incident is classified into a "category": a short string that summarizes what sort of problem took place. These classification functions are written after examining a new/unknown incident. The idea is to recognize when the same problem is happening multiple times. A collection of classification functions that are useful for Tahoe nodes are provided in ``misc/incident-gatherer/support_classifiers.py`` . There is roughly one category for each ``log.WEIRD``-or-higher level event in the Tahoe source code. The incident gatherer is created with the "``flogtool create-incident-gatherer WORKDIR``" command, and started with "``tahoe run``". The generated "``gatherer.tac``" file should be modified to add classifier functions. The incident gatherer writes incident names (which are simply the relative pathname of the ``incident-\*.flog.bz2`` file) into ``classified/CATEGORY``. For example, the ``classified/mutable-retrieve-uncoordinated-write-error`` file contains a list of all incidents which were triggered by an uncoordinated write that was detected during mutable file retrieval (caused when somebody changed the contents of the mutable file in between the node's mapupdate step and the retrieve step). The ``classified/unknown`` file contains a list of all incidents that did not match any of the classification functions. At startup, the incident gatherer will automatically reclassify any incident report which is not mentioned in any of the ``classified/\*`` files. So the usual workflow is to examine the incidents in ``classified/unknown``, add a new classification function, delete ``classified/unknown``, then bound the gatherer with "``tahoe restart WORKDIR``". The incidents which can be classified with the new functions will be added to their own ``classified/FOO`` lists, and the remaining ones will be put in ``classified/unknown``, where the process can be repeated until all events are classifiable. The incident gatherer is still fairly immature: future versions will have a web interface and an RSS feed, so operations personnel can track problems in the storage grid. In our experience, each incident takes about two seconds to transfer from the node that generated it to the gatherer. The gatherer will automatically catch up to any incidents which occurred while it is offline. Log Gatherer ------------ The "Log Gatherer" subscribes to hear about every single event published by the connected nodes, regardless of severity. This server writes these log events into a large flogfile that is rotated (closed, compressed, and replaced with a new one) on a periodic basis. Each flogfile is named according to the range of time it represents, with names like "``from-2008-08-26-132256--to-2008-08-26-162256.flog.bz2``". The flogfiles contain events from many different sources, making it easier to correlate things that happened on multiple machines (such as comparing a client node making a request with the storage servers that respond to that request). Create the Log Gatherer with the "``flogtool create-gatherer WORKDIR``" command, and start it with "``twistd -ny gatherer.tac``". Then copy the contents of the ``log_gatherer.furl`` file it creates into the ``BASEDIR/tahoe.cfg`` file (under the key ``log_gatherer.furl`` of the section ``[node]``) of all nodes that should be sending it log events. (See :doc:`configuration`) The "``flogtool filter``" command, described above, is useful to cut down the potentially large flogfiles into a more focussed form. Busy nodes, particularly web-API nodes which are performing recursive deep-size/deep-stats/deep-check operations, can produce a lot of log events. To avoid overwhelming the node (and using an unbounded amount of memory for the outbound TCP queue), publishing nodes will start dropping log events when the outbound queue grows too large. When this occurs, there will be gaps (non-sequential event numbers) in the log-gatherer's flogfiles. Adding log messages =================== When adding new code, the Tahoe developer should add a reasonable number of new log events. For details, please see the Foolscap logging documentation, but a few notes are worth stating here: * use a facility prefix of "``tahoe.``", like "``tahoe.mutable.publish``" * assign each severe (``log.WEIRD`` or higher) event a unique message identifier, as the ``umid=`` argument to the ``log.msg()`` call. The ``misc/coding_tools/make_umid`` script may be useful for this purpose. This will make it easier to write a classification function for these messages. * use the ``parent=`` argument whenever the event is causally/temporally clustered with its parent. For example, a download process that involves three sequential hash fetches could announce the send and receipt of those hash-fetch messages with a ``parent=`` argument that ties them to the overall download process. However, each new web-API download request should be unparented. * use the ``format=`` argument in preference to the ``message=`` argument. E.g. use ``log.msg(format="got %(n)d shares, need %(k)d", n=n, k=k)`` instead of ``log.msg("got %d shares, need %d" % (n,k))``. This will allow later tools to analyze the event without needing to scrape/reconstruct the structured data out of the formatted string. * Pass extra information as extra keyword arguments, even if they aren't included in the ``format=`` string. This information will be displayed in the "``flogtool dump --verbose``" output, as well as being available to other tools. The ``umid=`` argument should be passed this way. * use ``log.err`` for the catch-all ``addErrback`` that gets attached to the end of any given Deferred chain. When used in conjunction with ``LOGTOTWISTED=1``, ``log.err()`` will tell Twisted about the error-nature of the log message, causing Trial to flunk the test (with an "ERROR" indication that prints a copy of the Failure, including a traceback). Don't use ``log.err`` for events that are ``BAD`` but handled (like hash failures: since these are often deliberately provoked by test code, they should not cause test failures): use ``log.msg(level=BAD)`` for those instead. Log Messages During Unit Tests ============================== If a test is failing and you aren't sure why, start by enabling ``FLOGTOTWISTED=1`` like this:: make test FLOGTOTWISTED=1 With ``FLOGTOTWISTED=1``, sufficiently-important log events will be written into ``_trial_temp/test.log``, which may give you more ideas about why the test is failing. By default, ``_trial_temp/test.log`` will not receive messages below the ``level=OPERATIONAL`` threshold. You can change the threshold via the ``FLOGLEVEL`` variable, e.g.:: make test FLOGLEVEL=10 FLOGTOTWISTED=1 (The level numbers are listed in src/allmydata/util/log.py.) To look at the detailed foolscap logging messages, run the tests like this:: make test FLOGFILE=flog.out.bz2 FLOGLEVEL=1 FLOGTOTWISTED=1 The first environment variable will cause foolscap log events to be written to ``./flog.out.bz2`` (instead of merely being recorded in the circular buffers for the use of remote subscribers or incident reports). The second will cause all log events to be written out, not just the higher-severity ones. The third will cause twisted log events (like the markers that indicate when each unit test is starting and stopping) to be copied into the flogfile, making it easier to correlate log events with unit tests. Enabling this form of logging appears to roughly double the runtime of the unit tests. The ``flog.out.bz2`` file is approximately 2MB. You can then use "``flogtool dump``" or "``flogtool web-viewer``" on the resulting ``flog.out`` file. ("``flogtool tail``" and the log-gatherer are not useful during unit tests, since there is no single Tub to which all the log messages are published). It is possible for setting these environment variables to cause spurious test failures in tests with race condition bugs. All known instances of this have been fixed as of Tahoe-LAFS v1.7.1. tahoe_lafs-1.20.0/docs/magic-wormhole-invites.rst0000644000000000000000000000507213615410400016676 0ustar00********************** Magic Wormhole Invites ********************** Magic Wormhole ============== `magic wormhole`_ is a server and a client which together use Password Authenticated Key Exchange (PAKE) to use a short code to establish a secure channel between two computers. These codes are one-time use and an attacker gets at most one "guess", thus allowing low-entropy codes to be used. .. _magic wormhole: https://github.com/warner/magic-wormhole#design Invites and Joins ================= Inside Tahoe-LAFS we are using a channel created using `magic wormhole`_ to exchange configuration and the secret fURL of the Introducer with new clients. This is a two-part process. Alice runs a grid and wishes to have her friend Bob use it as a client. She runs ``tahoe invite bob`` which will print out a short "wormhole code" like ``2-unicorn-quiver``. You may also include some options for total, happy and needed shares if you like. Alice then transmits this one-time secret code to Bob. Alice must keep her command running until Bob has done his step as it is waiting until a secure channel is established before sending the data. Bob then runs ``tahoe create-client --join `` with any other options he likes. This will "use up" the code establishing a secure session with Alice's computer. If an attacker tries to guess the code, they get only once chance to do so (and then Bob's side will fail). Once Bob's computer has connected to Alice's computer, the two computers performs the protocol described below, resulting in some JSON with the Introducer fURL, nickname and any other options being sent to Bob's computer. The ``tahoe create-client`` command then uses these options to set up Bob's client. Tahoe-LAFS Secret Exchange ========================== The protocol that the Alice (the one doing the invite) and Bob (the one being invited) sides perform once a magic wormhole secure channel has been established goes as follows: Alice and Bob both immediately send an "abilities" message as JSON. For Alice this is ``{"abilities": {"server-v1": {}}}``. For Bob, this is ``{"abilities": {"client-v1": {}}}``. After receiving the message from the other side and confirming the expected protocol, Alice transmits the configuration JSON:: { "needed": 3, "total": 10, "happy": 7, "nickname": "bob", "introducer": "pb://xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx@example.com:41505/yyyyyyyyyyyyyyyyyyyyyyy" } Both sides then disconnect. As you can see, there is room for future revisions of the protocol but as of yet none have been sketched out. tahoe_lafs-1.20.0/docs/managed-grid.rst0000644000000000000000000003214113615410400014621 0ustar00 Managed Grid ============ This document explains the "Grid Manager" concept and the `grid-manager` command. Someone operating a grid may choose to use a Grid Manager. Operators of storage-servers and clients will then be given additional configuration in this case. Overview and Motivation ----------------------- In a grid using an Introducer, a client will use any storage-server the Introducer announces (and the Introducer will announce any storage-server that connects to it). This means that anyone with the Introducer fURL can connect storage to the grid. Sometimes, this is just what you want! For some use-cases, though, you want to have clients only use certain servers. One case might be a "managed" grid, where some entity runs the grid; clients of this grid don't want their uploads to go to "unmanaged" storage if some other client decides to provide storage. One way to limit which storage servers a client connects to is via the "server list" (:ref:`server_list`) (aka "Introducerless" mode). Clients are given static lists of storage-servers, and connect only to those. This means manually updating these lists if the storage servers change, however. Another method is for clients to use `[client] peers.preferred=` configuration option (:ref:`Client Configuration`), which suffers from a similar disadvantage. Grid Manager ------------ A "grid-manager" consists of some data defining a keypair (along with some other details) and Tahoe sub-commands to manipulate the data and produce certificates to give to storage-servers. Certificates assert the statement: "Grid Manager X suggests you use storage-server Y to upload shares to" (X and Y are public-keys). Such a certificate consists of: - the version of the format the certificate conforms to (`1`) - the public-key of a storage-server - an expiry timestamp - a signature of the above A client will always use any storage-server for downloads (expired certificate, or no certificate) because clients check the ciphertext and re-assembled plaintext against the keys in the capability; "grid-manager" certificates only control uploads. Clients make use of this functionality by configuring one or more Grid Manager public keys. This tells the client to only upload to storage-servers that have a currently-valid certificate from any of the Grid Managers their client allows. In case none are configured, the default behavior (of using any storage server) prevails. Grid Manager Data Storage ------------------------- The data defining the grid-manager is stored in an arbitrary directory, which you indicate with the ``--config`` option (in the future, we may add the ability to store the data directly in a grid, at which time you may be able to pass a directory-capability to this option). If you don't want to store the configuration on disk at all, you may use ``--config -`` (the last character is a dash) and write a valid JSON configuration to stdin. All commands require the ``--config`` option and they all behave similarly for "data from stdin" versus "data from disk". A directory (and not a file) is used on disk because in that mode, each certificate issued is also stored alongside the configuration document; in "stdin / stdout" mode, an issued certificate is only ever available on stdout. The configuration is a JSON document. It is subject to change as Grid Manager evolves. It contains a version number in the `grid_manager_config_version` key which will increment whenever the document schema changes. grid-manager create ``````````````````` Create a new grid-manager. If you specify ``--config -`` then a new grid-manager configuration is written to stdout. Otherwise, a new grid-manager is created in the directory specified by the ``--config`` option. It is an error if the directory already exists. grid-manager public-identity ```````````````````````````` Print out a grid-manager's public key. This key is derived from the private-key of the grid-manager, so a valid grid-manager config must be given via ``--config`` This public key is what is put in clients' configuration to actually validate and use grid-manager certificates. grid-manager add ```````````````` Takes two args: ``name pubkey``. The ``name`` is an arbitrary local identifier for the new storage node (also sometimes called "a petname" or "nickname"). The pubkey is the tahoe-encoded key from a ``node.pubkey`` file in the storage-server's node directory (minus any whitespace). For example, if ``~/storage0`` contains a storage-node, you might do something like this:: grid-manager --config ./gm0 add storage0 $(cat ~/storage0/node.pubkey) This adds a new storage-server to a Grid Manager's configuration. (Since it mutates the configuration, if you used ``--config -`` the new configuration will be printed to stdout). The usefulness of the ``name`` is solely for reference within this Grid Manager. grid-manager list ````````````````` Lists all storage-servers that have previously been added using ``grid-manager add``. grid-manager sign ````````````````` Takes two args: ``name expiry_days``. The ``name`` is a nickname used previously in a ``grid-manager add`` command and ``expiry_days`` is the number of days in the future when the certificate should expire. Note that this mutates the state of the grid-manager if it is on disk, by adding this certificate to our collection of issued certificates. If you used ``--config -``, the certificate isn't persisted anywhere except to stdout (so if you wish to keep it somewhere, that is up to you). This command creates a new "version 1" certificate for a storage-server (identified by its public key). The new certificate is printed to stdout. If you stored the config on disk, the new certificate will (also) be in a file named like ``alice.cert.0``. Enrolling a Storage Server: CLI ------------------------------- tahoe admin add-grid-manager-cert ````````````````````````````````` - `--filename`: the file to read the cert from - `--name`: the name of this certificate Import a "version 1" storage-certificate produced by a grid-manager A storage server may have zero or more such certificates installed; for now just one is sufficient. You will have to re-start your node after this. Subsequent announcements to the Introducer will include this certificate. .. note:: This command will simply edit the `tahoe.cfg` file and direct you to re-start. In the Future(tm), we should consider (in exarkun's words): "A python program you run as a new process" might not be the best abstraction to layer on top of the configuration persistence system, though. It's a nice abstraction for users (although most users would probably rather have a GUI) but it's not a great abstraction for automation. So at some point it may be better if there is CLI -> public API -> configuration persistence system. And maybe "public API" is even a network API for the storage server so it's equally easy to access from an agent implemented in essentially any language and maybe if the API is exposed by the storage node itself then this also gives you live-configuration-updates, avoiding the need for node restarts (not that this is the only way to accomplish this, but I think it's a good way because it avoids the need for messes like inotify and it supports the notion that the storage node process is in charge of its own configuration persistence system, not just one consumer among many ... which has some nice things going for it ... though how this interacts exactly with further node management automation might bear closer scrutiny). Enrolling a Storage Server: Config ---------------------------------- You may edit the ``[storage]`` section of the ``tahoe.cfg`` file to turn on grid-management with ``grid_management = true``. You then must also provide a ``[grid_management_certificates]`` section in the config-file which lists ``name = path/to/certificate`` pairs. These certificate files are issued by the ``grid-manager sign`` command; these should be transmitted to the storage server operator who includes them in the config for the storage server. Relative paths are based from the node directory. Example:: [storage] grid_management = true [grid_management_certificates] default = example_grid.cert This will cause us to give this certificate to any Introducers we connect to (and subsequently, the Introducer will give the certificate out to clients). Enrolling a Client: Config -------------------------- You may instruct a Tahoe client to use only storage servers from given Grid Managers. If there are no such keys, any servers are used (but see https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3979). If there are one or more keys, the client will only upload to a storage server that has a valid certificate (from any of the keys). To specify public-keys, add a ``[grid_managers]`` section to the config. This consists of ``name = value`` pairs where ``name`` is an arbitrary name and ``value`` is a public-key of a Grid Manager. Example:: [grid_managers] example_grid = pub-v0-vqimc4s5eflwajttsofisp5st566dbq36xnpp4siz57ufdavpvlq See also https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3507 which proposes a command to edit the config. Example Setup of a New Managed Grid ----------------------------------- This example creates an actual grid, but it's all just on one machine with different "node directories" and a separate tahoe process for each node. Usually of course each storage server would be on a separate computer. Note that we use the ``daemonize`` command in the following but that's only one way to handle "running a command in the background". You could instead run commands that start with ``daemonize ...`` in their own shell/terminal window or via something like ``systemd`` We'll store our Grid Manager configuration on disk, in ``./gm0``. To initialize this directory:: grid-manager --config ./gm0 create (If you already have a grid, you can :ref:`skip ahead `.) First of all, create an Introducer. Note that we actually have to run it briefly before it creates the "Introducer fURL" we want for the next steps:: tahoe create-introducer --listen=tcp --port=5555 --location=tcp:localhost:5555 ./introducer daemonize tahoe -d introducer run Next, we attach a couple of storage nodes:: tahoe create-node --introducer $(cat introducer/private/introducer.furl) --nickname storage0 --webport 6001 --location tcp:localhost:6003 --port 6003 ./storage0 tahoe create-node --introducer $(cat introducer/private/introducer.furl) --nickname storage1 --webport 6101 --location tcp:localhost:6103 --port 6103 ./storage1 daemonize tahoe -d storage0 run daemonize tahoe -d storage1 run .. _skip_ahead: We can now tell the Grid Manager about our new storage servers:: grid-manager --config ./gm0 add storage0 $(cat storage0/node.pubkey) grid-manager --config ./gm0 add storage1 $(cat storage1/node.pubkey) To produce a new certificate for each node, we do this:: grid-manager --config ./gm0 sign storage0 > ./storage0/gridmanager.cert grid-manager --config ./gm0 sign storage1 > ./storage1/gridmanager.cert Now, we want our storage servers to actually announce these certificates into the grid. We do this by adding some configuration (in ``tahoe.cfg``):: [storage] grid_management = true [grid_manager_certificates] default = gridmanager.cert Add the above bit to each node's ``tahoe.cfg`` and re-start the storage nodes. (Alternatively, use the ``tahoe add-grid-manager`` command). Now try adding a new storage server ``storage2``. This client can join the grid just fine, and announce itself to the Introducer as providing storage:: tahoe create-node --introducer $(cat introducer/private/introducer.furl) --nickname storage2 --webport 6301 --location tcp:localhost:6303 --port 6303 ./storage2 daemonize tahoe -d storage2 run At this point any client will upload to any of these three storage-servers. Make a client "alice" and try! :: tahoe create-client --introducer $(cat introducer/private/introducer.furl) --nickname alice --webport 6401 --shares-total=3 --shares-needed=2 --shares-happy=3 ./alice daemonize tahoe -d alice run tahoe -d alice put README.rst # prints out a read-cap find storage2/storage/shares # confirm storage2 has a share Now we want to make Alice only upload to the storage servers that the grid-manager has given certificates to (``storage0`` and ``storage1``). We need the grid-manager's public key to put in Alice's configuration:: grid-manager --config ./gm0 public-identity Put the key printed out above into Alice's ``tahoe.cfg`` in section ``client``:: [grid_managers] example_name = pub-v0-vqimc4s5eflwajttsofisp5st566dbq36xnpp4siz57ufdavpvlq Now, re-start the "alice" client. Since we made Alice's parameters require 3 storage servers to be reachable (``--happy=3``), all their uploads should now fail (so ``tahoe put`` will fail) because they won't use storage2 and thus can't "achieve happiness". A proposal to expose more information about Grid Manager and certificate status in the Welcome page is discussed in https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3506 tahoe_lafs-1.20.0/docs/network-and-reliance-topology.svg0000644000000000000000000051061513615410400020163 0ustar00 image/svg+xml Tahoe-LAFS network topology Tahoe-LAFS storage servers Tahoe-LAFS client Tahoe-LAFS gateway over TCP/SSL FTP HTTP(S)server Tahoe-LAFSstorageclient Red means that whoever controls that link or that machine cansee your files and change their contents. In other words, yourely on that component for confidentiality and integrity. Black means that whoever controls that link or that machine cannotsee your files or change their contents. In other words, youdo not rely on that component for confidentiality and integrity. • Web browser• Command-line tool• tahoe backup tool• JavaScript frontends• duplicity• GridBackup (incomplete)• FTP and SFTP clients• FUSE via sshfs Tahoe-LAFS storage protocol Tahoe-LAFS web-API FTPserver SFTPserver SFTP Cloudstorage Cloudstorage • Disk backend• Cloud backend under development (S3, OpenStack, Google, Azure) tahoe_lafs-1.20.0/docs/nodekeys.rst0000644000000000000000000001505113615410400014124 0ustar00.. -*- coding: utf-8-with-signature -*- ======================= Node Keys in Tahoe-LAFS ======================= "Node Keys" are cryptographic signing/verifying keypairs used to identify Tahoe-LAFS nodes (client-only and client+server). The private signing key is stored in NODEDIR/private/node.privkey , and is used to sign the announcements that are distributed to all nodes by the Introducer. The public verifying key is used to identify the sending node from those other systems: it is displayed as a "Node ID" that looks like "v0-abc234xyz567..", which ends with a long base32-encoded string. These node keys were introduced in the 1.10 release (April 2013), as part of ticket #466. In previous releases, announcements were unsigned, and nodes were identified by their Foolscap "Tub ID" (a somewhat shorter base32 string, with no "v0-" prefix). Why Announcements Are Signed ---------------------------- All nodes (both client-only and client+server) publish announcements to the Introducer, which then relays them to all other nodes. These announcements contain information about the publishing node's nickname, how to reach the node, what services it offers, and what version of code it is running. The new private node key is used to sign these announcements, preventing the Introducer from modifying their contents en-route. This will enable future versions of Tahoe-LAFS to use other forms of introduction (gossip, multiple introducers) without weakening the security model. The Node ID is useful as a handle with which to talk about a node. For example, when clients eventually gain the ability to control which storage servers they are willing to use (#467), the configuration file might simply include a list of Node IDs for the approved servers. TubIDs are currently also suitable for this job, but they depend upon having a Foolscap connection to the server. Since our goal is to move away from Foolscap towards a simpler (faster and more portable) protocol, we want to reduce our dependence upon TubIDs. Node IDs and Ed25519 signatures can be used for non-Foolscap non-SSL based protocols. How The Node ID Is Computed --------------------------- The long-form Node ID is the Ed25519 public verifying key, 256 bits (32 bytes) long, base32-encoded, with a "v0-" prefix appended, and the trailing "=" padding removed, like so: v0-rlj3jnxqv4ee5rtpyngvzbhmhuikjfenjve7j5mzmfcxytwmyf6q The Node ID is displayed in this long form on the node's front Welcome page, and on the Introducer's status page. In most other places (share-placement lists, file health displays), the "short form" is used instead. This is simply the first 8 characters of the base32 portion, frequently enclosed in square brackets, like this: [rlj3jnxq] In contrast, old-style TubIDs are usually displayed with just 6 base32 characters. Version Compatibility, Fallbacks For Old Versions ------------------------------------------------- Since Tahoe-LAFS 1.9 does not know about signed announcements, 1.10 includes backwards-compatibility code to allow old and new versions to interoperate. There are three relevant participants: the node publishing an announcement, the Introducer which relays them, and the node receiving the (possibly signed) announcement. When a 1.10 node connects to an old Introducer (version 1.9 or earlier), it sends downgraded non-signed announcements. It likewise accepts non-signed announcements from the Introducer. The non-signed announcements use TubIDs to identify the sending node. The new 1.10 Introducer, when it connects to an old node, downgrades any signed announcements to non-signed ones before delivery. As a result, the only way to receive signed announcements is for all three systems to be running the new 1.10 code. In a grid with a mixture of old and new nodes, if the Introducer is old, then all nodes will see unsigned TubIDs. If the Introducer is new, then nodes will see signed Node IDs whenever possible. Share Placement --------------- Tahoe-LAFS uses a "permuted ring" algorithm to decide where to place shares for any given file. For each potential server, it uses that server's "permutation seed" to compute a pseudo-random but deterministic location on a ring, then walks the ring in clockwise order, asking each server in turn to hold a share until all are placed. When downloading a file, the servers are accessed in the same order. This minimizes the number of queries that must be done to download a file, and tolerates "churn" (nodes being added and removed from the grid) fairly well. This property depends upon server nodes having a stable permutation seed. If a server's permutation seed were to change, it would effectively wind up at a randomly selected place on the permuted ring. Downloads would still complete, but clients would spend more time asking other servers before querying the correct one. In the old 1.9 code, the permutation-seed was always equal to the TubID. In 1.10, servers include their permutation-seed as part of their announcement. To improve stability for existing grids, if an old server (one with existing shares) is upgraded to run the 1.10 codebase, it will use its old TubID as its permutation-seed. When a new empty server runs the 1.10 code, it will use its Node ID instead. In both cases, once the node has picked a permutation-seed, it will continue using that value forever. To be specific, when a node wakes up running the 1.10 code, it will look for a recorded NODEDIR/permutation-seed file, and use its contents if present. If that file does not exist, it creates it (with the TubID if it has any shares, otherwise with the Node ID), and uses the contents as the permutation-seed. There is one unfortunate consequence of this pattern. If new 1.10 server is created in a grid that has an old client, or has a new client but an old Introducer, then that client will see downgraded non-signed announcements, and thus will first upload shares with the TubID-based permutation-seed. Later, when the client and/or Introducer is upgraded, the client will start seeing signed announcements with the NodeID-based permutation-seed, and will then look for shares in the wrong place. This will hurt performance in a large grid, but should not affect reliability. This effect shouldn't even be noticeable in grids for which the number of servers is close to the "N" shares.total number (e.g. where num-servers < 3*N). And the as-yet-unimplemented "share rebalancing" feature should repair the misplacement. If you wish to avoid this effect, try to upgrade both Introducers and clients at about the same time. (Upgrading servers does not matter: they will continue to use the old permutation-seed). tahoe_lafs-1.20.0/docs/performance.rst0000644000000000000000000001732413615410400014611 0ustar00.. -*- coding: utf-8-with-signature -*- ============================================ Performance costs for some common operations ============================================ 1. `Publishing an A-byte immutable file`_ 2. `Publishing an A-byte mutable file`_ 3. `Downloading B bytes of an A-byte immutable file`_ 4. `Downloading B bytes of an A-byte mutable file`_ 5. `Modifying B bytes of an A-byte mutable file`_ 6. `Inserting/Removing B bytes in an A-byte mutable file`_ 7. `Adding an entry to an A-entry directory`_ 8. `Listing an A entry directory`_ 9. `Checking an A-byte file`_ 10. `Verifying an A-byte file (immutable)`_ 11. `Repairing an A-byte file (mutable or immutable)`_ ``K`` indicates the number of shares required to reconstruct the file (default: 3) ``N`` indicates the total number of shares produced (default: 10) ``S`` indicates the segment size (default: 128 KiB) ``A`` indicates the number of bytes in a file ``B`` indicates the number of bytes of a file that are being read or written ``G`` indicates the number of storage servers on your grid Most of these cost estimates may have a further constant multiplier: when a formula says ``N/K*S``, the cost may actually be ``2*N/K*S`` or ``3*N/K*S``. Also note that all references to mutable files are for SDMF-formatted files; this document has not yet been updated to describe the MDMF format. Publishing an ``A``-byte immutable file ======================================= when the file is already uploaded --------------------------------- If the file is already uploaded with the exact same contents, same erasure coding parameters (K, N), and same added convergence secret, then it reads the whole file from disk one time while hashing it to compute the storage index, then contacts about N servers to ask each one to store a share. All of the servers reply that they already have a copy of that share, and the upload is done. disk: A cpu: ~A network: ~N memory footprint: S when the file is not already uploaded ------------------------------------- If the file is not already uploaded with the exact same contents, same erasure coding parameters (K, N), and same added convergence secret, then it reads the whole file from disk one time while hashing it to compute the storage index, then contacts about N servers to ask each one to store a share. Then it uploads each share to a storage server. disk: 2*A cpu: 2*~A network: N/K*A memory footprint: N/K*S Publishing an ``A``-byte mutable file ===================================== cpu: ~A + a large constant for RSA keypair generation network: A memory footprint: N/K*A notes: Tahoe-LAFS generates a new RSA keypair for each mutable file that it publishes to a grid. This takes around 100 milliseconds on a relatively high-end laptop from 2021. Part of the process of encrypting, encoding, and uploading a mutable file to a Tahoe-LAFS grid requires that the entire file be in memory at once. For larger files, this may cause Tahoe-LAFS to have an unacceptably large memory footprint (at least when uploading a mutable file). Downloading ``B`` bytes of an ``A``-byte immutable file ======================================================= cpu: ~B network: B notes: When Tahoe-LAFS 1.8.0 or later is asked to read an arbitrary range of an immutable file, only the S-byte segments that overlap the requested range will be downloaded. (Earlier versions would download from the beginning of the file up until the end of the requested range, and then continue to download the rest of the file even after the request was satisfied.) Downloading ``B`` bytes of an ``A``-byte mutable file ===================================================== cpu: ~A network: A memory footprint: A notes: As currently implemented, mutable files must be downloaded in their entirety before any part of them can be read. We are exploring fixes for this; see ticket #393 for more information. Modifying ``B`` bytes of an ``A``-byte mutable file =================================================== cpu: ~A network: A memory footprint: N/K*A notes: If you upload a changed version of a mutable file that you earlier put onto your grid with, say, 'tahoe put --mutable', Tahoe-LAFS will replace the old file with the new file on the grid, rather than attempting to modify only those portions of the file that have changed. Modifying a file in this manner is essentially uploading the file over again, except that it re-uses the existing RSA keypair instead of generating a new one. Inserting/Removing ``B`` bytes in an ``A``-byte mutable file ============================================================ cpu: ~A network: A memory footprint: N/K*A notes: Modifying any part of a mutable file in Tahoe-LAFS requires that the entire file be downloaded, modified, held in memory while it is encrypted and encoded, and then re-uploaded. A future version of the mutable file layout ("LDMF") may provide efficient inserts and deletes. Note that this sort of modification is mostly used internally for directories, and isn't something that the WUI, CLI, or other interfaces will do -- instead, they will simply overwrite the file to be modified, as described in "Modifying B bytes of an A-byte mutable file". Adding an entry to an ``A``-entry directory =========================================== cpu: ~A network: ~A memory footprint: N/K*~A notes: In Tahoe-LAFS, directories are implemented as specialized mutable files. So adding an entry to a directory is essentially adding B (actually, 300-330) bytes somewhere in an existing mutable file. Listing an ``A`` entry directory ================================ cpu: ~A network: ~A memory footprint: N/K*~A notes: Listing a directory requires that the mutable file storing the directory be downloaded from the grid. So listing an A entry directory requires downloading a (roughly) 330 * A byte mutable file, since each directory entry is about 300-330 bytes in size. Checking an ``A``-byte file =========================== cpu: ~G network: ~G memory footprint: negligible notes: To check a file, Tahoe-LAFS queries all the servers that it knows about. Note that neither of these values directly depend on the size of the file. This is relatively inexpensive, compared to the verify and repair operations. Verifying an A-byte file (immutable) ==================================== cpu: ~N/K*A network: N/K*A memory footprint: N/K*S notes: To verify a file, Tahoe-LAFS downloads all of the ciphertext shares that were originally uploaded to the grid and integrity checks them. This is (for grids with good redundancy) more expensive than downloading an A-byte file, since only a fraction of these shares would be necessary to recover the file. Verifying an A-byte file (mutable) ================================== cpu: ~N/K*A network: N/K*A memory footprint: N/K*A notes: To verify a file, Tahoe-LAFS downloads all of the ciphertext shares that were originally uploaded to the grid and integrity checks them. This is (for grids with good redundancy) more expensive than downloading an A-byte file, since only a fraction of these shares would be necessary to recover the file. Repairing an ``A``-byte file (mutable or immutable) =================================================== cpu: variable, between ~A and ~N/K*A network: variable; between A and N/K*A memory footprint (immutable): (1+N/K)*S (SDMF mutable): (1+N/K)*A notes: To repair a file, Tahoe-LAFS downloads the file, and generates/uploads missing shares in the same way as when it initially uploads the file. So, depending on how many shares are missing, this can cost as little as a download or as much as a download followed by a full upload. Since SDMF files have only one segment, which must be processed in its entirety, repair requires a full-file download followed by a full-file upload. tahoe_lafs-1.20.0/docs/release-checklist.rst0000644000000000000000000002054613615410400015677 0ustar00 ================= Release Checklist ================= This release checklist specifies a series of checks that anyone engaged in releasing a version of Tahoe should follow. Any contributor can do the first part of the release preparation. Only certain contributors can perform other parts. These are the two main sections of this checklist (and could be done by different people). A final section describes how to announce the release. This checklist is based on the original instructions (in old revisions in the file `docs/how_to_make_a_tahoe-lafs_release.org`). Any Contributor =============== Anyone who can create normal PRs should be able to complete this portion of the release process. Prepare for the Release ``````````````````````` The `master` branch should always be releasable. It may be worth asking (on IRC or mailing-ist) if anything will be merged imminently (for example, "I will prepare a release this coming Tuesday if you want to get anything in"). - Create a ticket for the release in Trac - Ticket number needed in next section - Making first release? See `GPG Setup Instructions `__ to make sure you can sign releases. [One time setup] Get a clean checkout ```````````````````` The release proccess involves compressing source files and putting them in formats suitable for distribution such as ``.tar.gz`` and ``zip``. That said, it's neccesary to the release process begins with a clean checkout to avoid making a release with previously generated files. - Inside the tahoe root dir run ``git clone . ../tahoe-release-x.x.x`` where (x.x.x is the release number such as 1.16.0). .. note:: The above command would create a new directory at the same level as your original clone named ``tahoe-release-x.x.x``. You can name this folder however you want but it would be a good practice to give it the release name. You MAY also discard this directory once the release process is complete. Get into the release directory and install dependencies by running: - cd ../tahoe-release-x.x.x (assuming you are still in your original clone) - python -m venv venv - ./venv/bin/pip install --editable .[test] Create Branch and Apply Updates ``````````````````````````````` - Create a branch for the release/candidate: git checkout -b XXXX.release-1.16.0 - produce a new NEWS.txt file (this does a commit): tox -e news - create the news for the release: - touch newsfragments/.minor - git add newsfragments/.minor - git commit -m news - manually fix ``NEWS.txt``: - proper title for latest release ("Release 1.16.0" instead of "Release ...post1432") - double-check date (maybe release will be in the future) - spot-check the release notes (these come from the newsfragments files though so don't do heavy editing) - commit these changes - update ``relnotes.txt``: - update all mentions of ``1.16.0`` to new and higher release version for example ``1.16.1`` - update "previous release" statement and date - summarize major changes - commit it - update ``nix/tahoe-lafs.nix``: - change the value given for `version` from `OLD.post1` to `NEW.post1` - update ``docs/known_issues.rst`` if appropriate - Push the branch to github - Create a (draft) PR; this should trigger CI (note that github doesn't let you create a PR without some changes on the branch so running + committing the NEWS.txt file achieves that without changing any code) - Confirm CI runs successfully on all platforms Create Release Candidate ```````````````````````` Before "officially" tagging any release, we will make a release-candidate available. So there will be at least 1.15.0rc0 (for example). If there are any problems, an rc1 or rc2 etc may also be released. Anyone can sign these releases (ideally they'd be signed "officially" as well, but it's better to get them out than to wait for that). Typically expert users will be the ones testing release candidates and they will need to evaluate which contributors' signatures they trust. - (all steps above are completed) - sign the release - git tag -s -u 0xE34E62D06D0E69CFCA4179FFBDE0D31D68666A7A -m "release Tahoe-LAFS-1.16.0rc0" tahoe-lafs-1.16.0rc0 .. note:: - Replace the key-id above with your own, which can simply be your email if it's attached to your fingerprint. - Don't forget to put the correct tag message and name. In this example, the tag message is "release Tahoe-LAFS-1.16.0rc0" and the tag name is ``tahoe-lafs-1.16.0rc0`` - build all code locally - these should all pass: - tox -e py37,codechecks,docs,integration - these can fail (ideally they should not of course): - tox -e deprecations,upcoming-deprecations - clone to a clean, local checkout (to avoid extra files being included in the release) - cd /tmp - git clone /home/meejah/src/tahoe-lafs - build tarballs - tox -e tarballs - Confirm that release tarballs exist by runnig: - ls dist/ | grep 1.16.0rc0 - inspect and test the tarballs - install each in a fresh virtualenv - run `tahoe` command - when satisfied, sign the tarballs: - gpg --pinentry=loopback --armor -u 0xE34E62D06D0E69CFCA4179FFBDE0D31D68666A7A --detach-sign dist/tahoe_lafs-1.16.0rc0-py2.py3-none-any.whl - gpg --pinentry=loopback --armor --detach-sign dist/tahoe_lafs-1.16.0rc0.tar.gz Privileged Contributor ====================== Steps in this portion require special access to keys or infrastructure. For example, **access to tahoe-lafs.org** to upload binaries or edit HTML. Hack Tahoe-LAFS ``````````````` Did anyone contribute a hack since the last release? If so, then https://tahoe-lafs.org/hacktahoelafs/ needs to be updated. Sign Git Tag ```````````` - git tag -s -u 0xE34E62D06D0E69CFCA4179FFBDE0D31D68666A7A -m "release Tahoe-LAFS-X.Y.Z" tahoe-lafs-X.Y.Z Upload Artifacts ```````````````` Any release-candidate or actual release plus signature (.asc file) need to be uploaded to https://tahoe-lafs.org in `~source/downloads` - secure-copy all release artifacts to the download area on the tahoe-lafs.org host machine. `~source/downloads` on there maps to https://tahoe-lafs.org/downloads/ on the Web: - scp dist/*1.15.0* username@tahoe-lafs.org:/home/source/downloads - the following developers have access to do this: - exarkun - meejah - warner Push the signed tag to the main repository: - git push origin tahoe-lafs-1.17.1 For the actual release, the tarball and signature files need to be uploaded to PyPI as well. - ls dist/*1.19.0* - twine upload --username __token__ --password `cat SECRET-pypi-tahoe-publish-token` dist/*1.19.0* The following developers have access to do this: - warner - meejah - exarkun (partial?) Announcing the Release Candidate ```````````````````````````````` The release-candidate should be announced by posting to the mailing-list (tahoe-dev@lists.tahoe-lafs.org). For example: https://lists.tahoe-lafs.org/pipermail/tahoe-dev/2020-October/009978.html Is The Release Done Yet? ```````````````````````` If anyone reports a problem with a release-candidate then a new release-candidate should be made once a fix has been merged to master. Repeat the above instructions with `rc1` or `rc2` or whatever is appropriate. Once a release-candidate has marinated for some time then it can be made into a the actual release. The actual release follows the same steps as above, with some differences: - there is no "-rcX" on the end of release names - the release is uploaded to PyPI (using Twine) - the version is tagged in Git (ideally using "the tahoe release key" but can be done with any of the authorized core developers' personal key) - the release-candidate branches must be merged back to master after the release is official (e.g. causing newsfragments to be deleted on master, etc) Announcing the Release ---------------------- mailing-lists ````````````` A new Tahoe release is traditionally announced on our mailing-list (tahoe-dev@tahoe-lafs.org). The former version of these instructions also announced the release on the following other lists: - tahoe-announce@tahoe-lafs.org - twisted-python@twistedmatrix.com - liberationtech@lists.stanford.edu - lwn@lwn.net - p2p-hackers@lists.zooko.com - python-list@python.org - http://listcultures.org/pipermail/p2presearch_listcultures.org/ - cryptopp-users@googlegroups.com wiki ```` Edit the "News" section of the front page of https://tahoe-lafs.org with a link to the mailing-list archive of the announcement message. tahoe_lafs-1.20.0/docs/requirements.txt0000644000000000000000000000014713615410400015035 0ustar00sphinx docutils<0.18 # https://github.com/sphinx-doc/sphinx/issues/9788 recommonmark sphinx_rtd_theme tahoe_lafs-1.20.0/docs/running.rst0000644000000000000000000002762013615410400013770 0ustar00.. -*- coding: utf-8-with-signature-unix; fill-column: 73; -*- .. -*- indent-tabs-mode: nil -*- ********************* How To Run Tahoe-LAFS ********************* Introduction ============ This is how to run a Tahoe-LAFS client or a complete Tahoe-LAFS grid. First you have to install the Tahoe-LAFS software, as documented in :doc:`Installing Tahoe-LAFS <../Installation/install-tahoe>`. The ``tahoe`` program in your virtualenv's ``bin`` directory is used to create, start, and stop nodes. Each node lives in a separate base directory, in which there is a configuration file named ``tahoe.cfg``. Nodes read and write files within this base directory. A grid consists of a set of *storage nodes* and *client nodes* running the Tahoe-LAFS code. There is also an *introducer node* that is responsible for getting the other nodes talking to each other. If you're getting started we recommend you try connecting to the `public test grid`_ as you only need to create a client node. When you want to create your own grid you'll need to create the introducer and several initial storage nodes (see the note about small grids below). Being Introduced to a Grid -------------------------- A collection of Tahoe servers is called a Grid and usually has 1 Introducer (but sometimes more, and it's possible to run with zero). The Introducer announces which storage servers constitute the Grid and how to contact them. There is a secret "fURL" you need to know to talk to the Introducer. One way to get this secret is using traditional tools such as encrypted email, encrypted instant-messaging, etcetera. It is important to transmit this fURL secretly as knowing it gives you access to the Grid. An additional way to share the fURL securely is via `magic wormhole`_. This uses a weak one-time password and a server on the internet (at `wormhole.tahoe-lafs.org`) to open a secure channel between two computers. In Tahoe-LAFS this functions via the commands `tahoe invite` and `tahoe create-client --join`. A person who already has access to a Grid can use `tahoe invite` to create one end of the `magic wormhole`_ and then transmits some JSON (including the Introducer's secret fURL) to the other end. `tahoe invite` will print a one-time secret code; you must then communicate this code to the person who will join the Grid. The other end of the `magic wormhole`_ in this case is `tahoe create-client --join `, where the person being invited types in the code they were given. Ideally, this code would be transmitted securely. It is, however, only useful exactly once. Also, it is much easier to transcribe by a human. Codes look like `7-surrender-tunnel` (a short number and two words). Running a Client ---------------- To construct a client node, run “``tahoe create-client``”, which will create ``~/.tahoe`` to be the node's base directory. Acquire the ``introducer.furl`` (see below if you are running your own introducer, or use the one from the `TestGrid page`_), and write it to ``~/.tahoe/private/introducers.yaml`` (see :ref:`introducer-definitions`). Then use “``tahoe run ~/.tahoe``”. After that, the node should be off and running. The first thing it will do is connect to the introducer and get itself connected to all other nodes on the grid. Some Grids use "magic wormhole" one-time codes to configure the basic options. In such a case you use ``tahoe create-client --join `` and do not have to do any of the ``tahoe.cfg`` editing mentioned above. By default, “``tahoe create-client``” creates a client-only node, that does not offer its disk space to other nodes. To configure other behavior, use “``tahoe create-node``” or see :doc:`configuration`. The “``tahoe run``” command above will run the node in the foreground. ``tahoe --help`` gives a summary of all commands. Running a Server or Introducer ------------------------------ To build either a storage server node, or an introducer node, you'll need a way for clients to connect to it. The simplest case is when the computer is on the public internet (e.g. a "VPS" virtual private server, with a public IP address and a DNS hostname like ``example.net``). See :doc:`servers` for help with more complex scenarios, using the ``--port`` and ``--location`` arguments. To construct an introducer, create a new base directory for it (the name of the directory is up to you), ``cd`` into it, and run “``tahoe create-introducer --hostname=example.net .``” (but using the hostname of your VPS). Now run the introducer using “``tahoe run .``”. After it starts, it will write a file named ``introducer.furl`` into the ``private/`` subdirectory of that base directory. This file contains the URL the other nodes must use in order to connect to this introducer. You can distribute your Introducer fURL securely to new clients by using the ``tahoe invite`` command. This will prepare some JSON to send to the other side, request a `magic wormhole`_ code from ``wormhole.tahoe-lafs.org`` and print it out to the terminal. This one-time code should be transmitted to the user of the client, who can then run ``tahoe create-client --join ``. Storage servers are created the same way: ``tahoe create-node --hostname=HOSTNAME .`` from a new directory. You'll need to provide the introducer FURL (either as a ``--introducer=`` argument, or by editing the ``tahoe.cfg`` configuration file afterwards) to connect to the introducer of your choice. See :doc:`configuration` for more details about how to configure Tahoe-LAFS. .. _public test grid: https://tahoe-lafs.org/trac/tahoe-lafs/wiki/TestGrid .. _TestGrid page: https://tahoe-lafs.org/trac/tahoe-lafs/wiki/TestGrid .. _#937: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/937 .. _magic wormhole: https://magic-wormhole.io/ Multiple Instances ------------------ Running multiple instances against the same configuration directory isn't supported. This will lead to undefined behavior and could corrupt the configuration or state. We attempt to avoid this situation with a "pidfile"-style file in the config directory called ``running.process``. There may be a parallel file called ``running.process.lock`` in existence. The ``.lock`` file exists to make sure only one process modifies ``running.process`` at once. The lock file is managed by the `lockfile `_ library. If you wish to make use of ``running.process`` for any reason you should also lock it and follow the semantics of lockfile. If ``running.process`` exists then it contains the PID and the creation-time of the process. When no such file exists, there is no other process running on this configuration. If there is a ``running.process`` file, it may be a leftover file or it may indicate that another process is running against this config. To tell the difference, determine if the PID in the file exists currently. If it does, check the creation-time of the process versus the one in the file. If these match, there is another process currently running and using this config. Otherwise, the file is stale -- it should be removed before starting Tahoe-LAFS. Some example Python code to check the above situations: .. literalinclude:: check_running.py A note about small grids ------------------------ By default, Tahoe-LAFS ships with the configuration parameter ``shares.happy`` set to 7. If you are using Tahoe-LAFS on a grid with fewer than 7 storage nodes, this won't work well for you — none of your uploads will succeed. To fix this, see :doc:`configuration` to learn how to set ``shares.happy`` to a more suitable value for your grid. Development with Docker ----------------------- If you want to stand up a small local test environment, you can install `Docker`_ and `Docker Compose`_. Once you have cloned the repository, run ``docker-compose up`` from the project's root directory. This will start a introducer, server, and a client configured to connect to them. After the containers start, you can access the WUI by navigating to ``http://localhost:3456`` in your browser. .. _Docker: https://docs.docker.com/ .. _Docker Compose: https://docs.docker.com/compose/ Do Stuff With It ================ This is how to use your Tahoe-LAFS node. The WUI ------- Point your web browser to `http://127.0.0.1:3456`_ — which is the URL of the gateway running on your own local computer — to use your newly created node. Create a new directory (with the button labelled “create a directory”). Your web browser will load the new directory. Now if you want to be able to come back to this directory later, you have to bookmark it, or otherwise save a copy of the URL. If you lose the URL to this directory, then you can never again come back to this directory. .. _http://127.0.0.1:3456: http://127.0.0.1:3456 The CLI ------- Prefer the command-line? Run “``tahoe --help``” (the same command-line tool that is used to start and stop nodes serves to navigate and use the decentralized file store). To get started, create a new directory and mark it as the 'tahoe:' alias by running “``tahoe create-alias tahoe``”. Once you've done that, you can do “``tahoe ls tahoe:``” and “``tahoe cp LOCALFILE tahoe:foo.txt``” to work with your file store. The Tahoe-LAFS CLI uses similar syntax to the well-known scp and rsync tools. See :doc:`frontends/CLI` for more details. To backup a directory full of files and subdirectories, run “``tahoe backup LOCALDIRECTORY tahoe:``”. This will create a new LAFS subdirectory inside the “tahoe” LAFS directory named “Archive”, and inside “Archive”, it will create a new subdirectory whose name is the current date and time. That newly created subdirectory will be populated with a snapshot copy of all files and directories currently reachable from LOCALDIRECTORY. Then ``tahoe backup`` will make a link to that snapshot directory from the “tahoe” LAFS directory, and name the link “Latest”. ``tahoe backup`` cleverly avoids uploading any files or directories that haven't changed, and it also cleverly deduplicates any files or directories that have identical contents to other files or directories that it has previously backed-up. This means that running ``tahoe backup`` is a nice incremental operation that backs up your files and directories efficiently, and if it gets interrupted (for example by a network outage, or by you rebooting your computer during the backup, or so on), it will resume right where it left off the next time you run ``tahoe backup``. See :doc:`frontends/CLI` for more information about the ``tahoe backup`` command, as well as other commands. As with the WUI (and with all current interfaces to Tahoe-LAFS), you are responsible for remembering directory capabilities yourself. If you create a new directory and lose the capability to it, then you cannot access that directory ever again. The SFTP frontend ----------------- You can access your Tahoe-LAFS grid via any SFTP_ client. See :doc:`frontends/FTP-and-SFTP` for how to set this up. On most Unix platforms, you can also use SFTP to plug Tahoe-LAFS into your computer's local filesystem via ``sshfs``, but see the `FAQ about performance problems`_. The SftpFrontend_ page on the wiki has more information about using SFTP with Tahoe-LAFS. .. _SFTP: https://en.wikipedia.org/wiki/SSH_file_transfer_protocol .. _FAQ about performance problems: https://tahoe-lafs.org/trac/tahoe-lafs/wiki/FAQ#Q23_FUSE .. _SftpFrontend: https://tahoe-lafs.org/trac/tahoe-lafs/wiki/SftpFrontend The WAPI -------- Want to program your Tahoe-LAFS node to do your bidding? Easy! See :doc:`frontends/webapi`. Socialize ========= You can chat with other users of and hackers of this software on the #tahoe-lafs IRC channel at ``irc.libera.chat``, or on the `tahoe-dev mailing list`_. .. _tahoe-dev mailing list: https://lists.tahoe-lafs.org/mailman/listinfo/tahoe-dev Complain ======== Bugs can be filed on the Tahoe-LAFS "Trac" instance, at https://tahoe-lafs.org/trac/ . You can also "fork" the repo and submit Pull Requests on Github: https://github.com/tahoe-lafs/tahoe-lafs . tahoe_lafs-1.20.0/docs/servers.rst0000644000000000000000000002056613615410400014003 0ustar00========================= How To Configure A Server ========================= Many Tahoe-LAFS nodes run as "servers", meaning they provide services for other machines (i.e. "clients"). The two most important kinds are the Introducer, and Storage Servers. To be useful, servers must be reachable by clients. Tahoe servers can listen on TCP ports, and advertise their "location" (hostname and TCP port number) so clients can connect to them. They can also listen on Tor "onion services" and I2P ports. Storage servers advertise their location by announcing it to the Introducer, which then broadcasts the location to all clients. So once the location is determined, you don't need to do anything special to deliver it. The Introducer itself has a location, which must be manually delivered to all storage servers and clients. You might email it to the new members of your grid. This location (along with other important cryptographic identifiers) is written into a file named ``private/introducer.furl`` in the Introducer's base directory, and should be provided as the ``--introducer=`` argument to ``tahoe create-client`` or ``tahoe create-node``. The first step when setting up a server is to figure out how clients will reach it. Then you need to configure the server to listen on some ports, and then configure the location properly. Manual Configuration ==================== Each server has two settings in their ``tahoe.cfg`` file: ``tub.port``, and ``tub.location``. The "port" controls what the server node listens to: this is generally a TCP port. The "location" controls what is advertised to the outside world. This is a "foolscap connection hint", and it includes both the type of the connection (tcp, tor, or i2p) and the connection details (hostname/address, port number). Various proxies, port-forwardings, and privacy networks might be involved, so it's not uncommon for ``tub.port`` and ``tub.location`` to look different. You can directly control the ``tub.port`` and ``tub.location`` configuration settings by providing ``--port=`` and ``--location=`` when running ``tahoe create-node``. Automatic Configuration ======================= Instead of providing ``--port=/--location=``, you can use ``--listen=``. Servers can listen on TCP, Tor, I2P, a combination of those, or none at all. The ``--listen=`` argument controls which kinds of listeners the new server will use. ``--listen=none`` means the server should not listen at all. This doesn't make sense for a server, but is appropriate for a client-only node. The ``tahoe create-client`` command automatically includes ``--listen=none``. ``--listen=tcp`` is the default, and turns on a standard TCP listening port. Using ``--listen=tcp`` requires a ``--hostname=`` argument too, which will be incorporated into the node's advertised location. We've found that computers cannot reliably determine their externally-reachable hostname, so rather than having the server make a guess (or scanning its interfaces for IP addresses that might or might not be appropriate), node creation requires the user to provide the hostname. ``--listen=tor`` will talk to a local Tor daemon and create a new "onion server" address (which look like ``alzrgrdvxct6c63z.onion``). Likewise ``--listen=i2p`` will talk to a local I2P daemon and create a new server address. See :doc:`anonymity-configuration` for details. You could listen on all three by using ``--listen=tcp,tor,i2p``. Deployment Scenarios ==================== The following are some suggested scenarios for configuring servers using various network transports. These examples do not include specifying an introducer FURL which normally you would want when provisioning storage nodes. For these and other configuration details please refer to :doc:`configuration`. #. `Server has a public DNS name`_ #. `Server has a public IPv4/IPv6 address`_ #. `Server is behind a firewall with port forwarding`_ #. `Using I2P/Tor to Avoid Port-Forwarding`_ Server has a public DNS name ---------------------------- The simplest case is where your server host is directly connected to the internet, without a firewall or NAT box in the way. Most VPS (Virtual Private Server) and colocated servers are like this, although some providers block many inbound ports by default. For these servers, all you need to know is the external hostname. The system administrator will tell you this. The main requirement is that this hostname can be looked up in DNS, and it will map to an IPv4 or IPv6 address which will reach the machine. If your hostname is ``example.net``, then you'll create the introducer like this:: tahoe create-introducer --hostname example.com ~/introducer or a storage server like:: tahoe create-node --hostname=example.net These will allocate a TCP port (e.g. 12345), assign ``tub.port`` to be ``tcp:12345``, and ``tub.location`` will be ``tcp:example.com:12345``. Ideally this should work for IPv6-capable hosts too (where the DNS name provides an "AAAA" record, or both "A" and "AAAA"). However Tahoe-LAFS support for IPv6 is new, and may still have problems. Please see ticket `#867`_ for details. .. _#867: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/867 Server has a public IPv4/IPv6 address ------------------------------------- If the host has a routeable (public) IPv4 address (e.g. ``203.0.113.1``), but no DNS name, you will need to choose a TCP port (e.g. ``3457``), and use the following:: tahoe create-node --port=tcp:3457 --location=tcp:203.0.113.1:3457 ``--port`` is an "endpoint specification string" that controls which local port the node listens on. ``--location`` is the "connection hint" that it advertises to others, and describes the outbound connections that those clients will make, so it needs to work from their location on the network. Tahoe-LAFS nodes listen on all interfaces by default. When the host is multi-homed, you might want to make the listening port bind to just one specific interface by adding a ``interface=`` option to the ``--port=`` argument:: tahoe create-node --port=tcp:3457:interface=203.0.113.1 --location=tcp:203.0.113.1:3457 If the host's public address is IPv6 instead of IPv4, use square brackets to wrap the address, and change the endpoint type to ``tcp6``:: tahoe create-node --port=tcp6:3457 --location=tcp:[2001:db8::1]:3457 You can use ``interface=`` to bind to a specific IPv6 interface too, however you must backslash-escape the colons, because otherwise they are interpreted as delimiters by the Twisted "endpoint" specification language. The ``--location=`` argument does not need colons to be escaped, because they are wrapped by the square brackets:: tahoe create-node --port=tcp6:3457:interface=2001\:db8\:\:1 --location=tcp:[2001:db8::1]:3457 For IPv6-only hosts with AAAA DNS records, if the simple ``--hostname=`` configuration does not work, they can be told to listen specifically on an IPv6-enabled port with this:: tahoe create-node --port=tcp6:3457 --location=tcp:example.net:3457 Server is behind a firewall with port forwarding ------------------------------------------------ To configure a storage node behind a firewall with port forwarding you will need to know: * public IPv4 address of the router * the TCP port that is available from outside your network * the TCP port that is the forwarding destination * internal IPv4 address of the storage node (the storage node itself is unaware of this address, and it is not used during ``tahoe create-node``, but the firewall must be configured to send connections to this) The internal and external TCP port numbers could be the same or different depending on how the port forwarding is configured. If it is mapping ports 1-to-1, and the public IPv4 address of the firewall is 203.0.113.1 (and perhaps the internal IPv4 address of the storage node is 192.168.1.5), then use a CLI command like this:: tahoe create-node --port=tcp:3457 --location=tcp:203.0.113.1:3457 If however the firewall/NAT-box forwards external port *6656* to internal port 3457, then do this:: tahoe create-node --port=tcp:3457 --location=tcp:203.0.113.1:6656 Using I2P/Tor to Avoid Port-Forwarding -------------------------------------- I2P and Tor onion services, among other great properties, also provide NAT penetration without port-forwarding, hostnames, or IP addresses. So setting up a server that listens only on Tor is simple:: tahoe create-node --listen=tor For more information about using Tahoe-LAFS with I2p and Tor see :doc:`anonymity-configuration` tahoe_lafs-1.20.0/docs/stats.rst0000644000000000000000000002677013615410400013453 0ustar00.. -*- coding: utf-8-with-signature -*- ================ Tahoe Statistics ================ 1. `Overview`_ 2. `Statistics Categories`_ 3. `Using Munin To Graph Stats Values`_ Overview ======== Each Tahoe node collects and publishes statistics about its operations as it runs. These include counters of how many files have been uploaded and downloaded, CPU usage information, performance numbers like latency of storage server operations, and available disk space. The easiest way to see the stats for any given node is use the web interface. From the main "Welcome Page", follow the "Operational Statistics" link inside the small "This Client" box. If the welcome page lives at http://localhost:3456/, then the statistics page will live at http://localhost:3456/statistics . This presents a summary of the stats block, along with a copy of the raw counters. To obtain just the raw counters (in JSON format), use /statistics?t=json instead. Statistics Categories ===================== The stats dictionary contains two keys: 'counters' and 'stats'. 'counters' are strictly counters: they are reset to zero when the node is started, and grow upwards. 'stats' are non-incrementing values, used to measure the current state of various systems. Some stats are actually booleans, expressed as '1' for true and '0' for false (internal restrictions require all stats values to be numbers). Under both the 'counters' and 'stats' dictionaries, each individual stat has a key with a dot-separated name, breaking them up into groups like 'cpu_monitor' and 'storage_server'. The currently available stats (as of release 1.6.0 or so) are described here: **counters.storage_server.\*** this group counts inbound storage-server operations. They are not provided by client-only nodes which have been configured to not run a storage server (with [storage]enabled=false in tahoe.cfg) allocate, write, close, abort these are for immutable file uploads. 'allocate' is incremented when a client asks if it can upload a share to the server. 'write' is incremented for each chunk of data written. 'close' is incremented when the share is finished. 'abort' is incremented if the client abandons the upload. get, read these are for immutable file downloads. 'get' is incremented when a client asks if the server has a specific share. 'read' is incremented for each chunk of data read. readv, writev these are for immutable file creation, publish, and retrieve. 'readv' is incremented each time a client reads part of a mutable share. 'writev' is incremented each time a client sends a modification request. add-lease, renew, cancel these are for share lease modifications. 'add-lease' is incremented when an 'add-lease' operation is performed (which either adds a new lease or renews an existing lease). 'renew' is for the 'renew-lease' operation (which can only be used to renew an existing one). 'cancel' is used for the 'cancel-lease' operation. bytes_freed this counts how many bytes were freed when a 'cancel-lease' operation removed the last lease from a share and the share was thus deleted. bytes_added this counts how many bytes were consumed by immutable share uploads. It is incremented at the same time as the 'close' counter. **stats.storage_server.\*** allocated this counts how many bytes are currently 'allocated', which tracks the space that will eventually be consumed by immutable share upload operations. The stat is increased as soon as the upload begins (at the same time the 'allocated' counter is incremented), and goes back to zero when the 'close' or 'abort' message is received (at which point the 'disk_used' stat should incremented by the same amount). disk_total, disk_used, disk_free_for_root, disk_free_for_nonroot, disk_avail, reserved_space these all reflect disk-space usage policies and status. 'disk_total' is the total size of disk where the storage server's BASEDIR/storage/shares directory lives, as reported by /bin/df or equivalent. 'disk_used', 'disk_free_for_root', and 'disk_free_for_nonroot' show related information. 'reserved_space' reports the reservation configured by the tahoe.cfg [storage]reserved_space value. 'disk_avail' reports the remaining disk space available for the Tahoe server after subtracting reserved_space from disk_avail. All values are in bytes. accepting_immutable_shares this is '1' if the storage server is currently accepting uploads of immutable shares. It may be '0' if a server is disabled by configuration, or if the disk is full (i.e. disk_avail is less than reserved_space). total_bucket_count this counts the number of 'buckets' (i.e. unique storage-index values) currently managed by the storage server. It indicates roughly how many files are managed by the server. latencies.*.* these stats keep track of local disk latencies for storage-server operations. A number of percentile values are tracked for many operations. For example, 'storage_server.latencies.readv.50_0_percentile' records the median response time for a 'readv' request. All values are in seconds. These are recorded by the storage server, starting from the time the request arrives (post-deserialization) and ending when the response begins serialization. As such, they are mostly useful for measuring disk speeds. The operations tracked are the same as the counters.storage_server.* counter values (allocate, write, close, get, read, add-lease, renew, cancel, readv, writev). The percentile values tracked are: mean, 01_0_percentile, 10_0_percentile, 50_0_percentile, 90_0_percentile, 95_0_percentile, 99_0_percentile, 99_9_percentile. (the last value, 99.9 percentile, means that 999 out of the last 1000 operations were faster than the given number, and is the same threshold used by Amazon's internal SLA, according to the Dynamo paper). Percentiles are only reported in the case of a sufficient number of observations for unambiguous interpretation. For example, the 99.9th percentile is (at the level of thousandths precision) 9 thousandths greater than the 99th percentile for sample sizes greater than or equal to 1000, thus the 99.9th percentile is only reported for samples of 1000 or more observations. **counters.uploader.files_uploaded** **counters.uploader.bytes_uploaded** **counters.downloader.files_downloaded** **counters.downloader.bytes_downloaded** These count client activity: a Tahoe client will increment these when it uploads or downloads an immutable file. 'files_uploaded' is incremented by one for each operation, while 'bytes_uploaded' is incremented by the size of the file. **counters.mutable.files_published** **counters.mutable.bytes_published** **counters.mutable.files_retrieved** **counters.mutable.bytes_retrieved** These count client activity for mutable files. 'published' is the act of changing an existing mutable file (or creating a brand-new mutable file). 'retrieved' is the act of reading its current contents. **counters.chk_upload_helper.\*** These count activity of the "Helper", which receives ciphertext from clients and performs erasure-coding and share upload for files that are not already in the grid. The code which implements these counters is in src/allmydata/immutable/offloaded.py . upload_requests incremented each time a client asks to upload a file upload_already_present: incremented when the file is already in the grid upload_need_upload incremented when the file is not already in the grid resumes incremented when the helper already has partial ciphertext for the requested upload, indicating that the client is resuming an earlier upload fetched_bytes this counts how many bytes of ciphertext have been fetched from uploading clients encoded_bytes this counts how many bytes of ciphertext have been encoded and turned into successfully-uploaded shares. If no uploads have failed or been abandoned, encoded_bytes should eventually equal fetched_bytes. **stats.chk_upload_helper.\*** These also track Helper activity: active_uploads how many files are currently being uploaded. 0 when idle. incoming_count how many cache files are present in the incoming/ directory, which holds ciphertext files that are still being fetched from the client incoming_size total size of cache files in the incoming/ directory incoming_size_old total size of 'old' cache files (more than 48 hours) encoding_count how many cache files are present in the encoding/ directory, which holds ciphertext files that are being encoded and uploaded encoding_size total size of cache files in the encoding/ directory encoding_size_old total size of 'old' cache files (more than 48 hours) **stats.node.uptime** how many seconds since the node process was started **stats.cpu_monitor.\*** 1min_avg, 5min_avg, 15min_avg estimate of what percentage of system CPU time was consumed by the node process, over the given time interval. Expressed as a float, 0.0 for 0%, 1.0 for 100% total estimate of total number of CPU seconds consumed by node since the process was started. Ticket #472 indicates that .total may sometimes be negative due to wraparound of the kernel's counter. Using Munin To Graph Stats Values ================================= The misc/operations_helpers/munin/ directory contains various plugins to graph stats for Tahoe nodes. They are intended for use with the Munin_ system-management tool, which typically polls target systems every 5 minutes and produces a web page with graphs of various things over multiple time scales (last hour, last month, last year). Most of the plugins are designed to pull stats from a single Tahoe node, and are configured with the e.g. http://localhost:3456/statistics?t=json URL. The "tahoe_stats" plugin is designed to read from the JSON file created by the stats-gatherer. Some plugins are to be used with the disk watcher, and a few (like tahoe_nodememory) are designed to watch the node processes directly (and must therefore run on the same host as the target node). Please see the docstrings at the beginning of each plugin for details, and the "tahoe-conf" file for notes about configuration and installing these plugins into a Munin environment. .. _Munin: http://munin-monitoring.org/ Scraping Stats Values in OpenMetrics Format =========================================== Time Series DataBase (TSDB) software like Prometheus_ and VictoriaMetrics_ can parse statistics from the e.g. http://localhost:3456/statistics?t=openmetrics URL in OpenMetrics_ format. Software like Grafana_ can then be used to graph and alert on these numbers. You can find a pre-configured dashboard for Grafana at https://grafana.com/grafana/dashboards/16894-tahoe-lafs/. .. _OpenMetrics: https://openmetrics.io/ .. _Prometheus: https://prometheus.io/ .. _VictoriaMetrics: https://victoriametrics.com/ .. _Grafana: https://grafana.com/ tahoe_lafs-1.20.0/docs/subtree1.svg0000644000000000000000000004502013615410400014023 0ustar00 image/svg+xml File CHK: URI... File CHK: URI... File CHK: URI... foo bar baz.jpg My Music My Stuff cool.mp3 boring.mp3 ROOT tahoe_lafs-1.20.0/docs/ticket-triage.rst0000644000000000000000000000242513615410400015040 0ustar00============= Ticket Triage ============= Ticket triage is a weekly, informal ritual that is meant to solve the problem of tickets getting opened and then forgotten about. It is simple and keeps project momentum going and prevents ticket cruft. It fosters conversation around project tasks and philosophies as they relate to milestones. Process ------- - The role of Ticket Triager rotates regularly-ish, and is assigned ad hoc - The Triager needs a ``Trac`` account - The Triager looks at all the tickets that have been created in the last week (or month, etc.) - They can use a custom query or do this as the week progresses - BONUS ROUND: Dig up a stale ticket from the past - Assign each ticket to a milestone on the Roadmap - The following situations merit discussion: - A ticket doesn't have an appropriate milestone and we should create one - A ticket, in vanishingly rare circumstances, should be deleted - The ticket is spam - The ticket contains sensitive information and harm will come to one or more people if it continues to be distributed - A ticket could be assigned to multiple milestones - There is another question about a ticket - These tickets will be brought as necessary to one of our meetings (currently Tuesdays) for discussion tahoe_lafs-1.20.0/docs/write_coordination.rst0000644000000000000000000000166013615410400016206 0ustar00.. -*- coding: utf-8-with-signature -*- ================================== Avoiding Write Collisions in Tahoe ================================== Tahoe does not provide locking of mutable files and directories. If there is more than one simultaneous attempt to change a mutable file or directory, then an ``UncoordinatedWriteError`` may result. This might, in rare cases, cause the file or directory contents to be accidentally deleted. The user is expected to ensure that there is at most one outstanding write or update request for a given file or directory at a time. One convenient way to accomplish this is to make a different file or directory for each person or process that wants to write. If mutable parts of a file store are accessed via sshfs, only a single sshfs mount should be used. There may be data loss if mutable files or directories are accessed via two sshfs mounts, or written both via sshfs and from other clients. tahoe_lafs-1.20.0/docs/Installation/install-tahoe.rst0000644000000000000000000000543413615410400017514 0ustar00.. -*- coding: utf-8-with-signature-unix; fill-column: 77 -*- .. note: if you aren't reading the rendered form of these docs at http://tahoe-lafs.readthedocs.io/en/latest/ , then be aware that any ":doc:" links refer to other files in this docs/ directory ********************* Installing Tahoe-LAFS ********************* `Tahoe-LAFS`_ is a secure, decentralized, and fault-tolerant storage system. To see an overview of the architecture and security properties, see :doc:`Welcome to Tahoe LAFS! <../about-tahoe>` Tahoe-LAFS can be installed and used on any of the following operating systems. .. _Tahoe-LAFS: https://tahoe-lafs.org Microsoft Windows ================= To install Tahoe-LAFS on Windows: 1. Make sure you have Powershell installed. See `PowerShell installation `_. 2. Install the latest version of Python 3. Download the .exe file at the `python website `_. 3. Open the installer by double-clicking it. Select the **Add Python to PATH** check-box, then click **Install Now**. 4. Start PowerShell and enter the following command to verify python installation:: python --version 5. Enter the following command to install Tahoe-LAFS:: pip install tahoe-lafs 6. Verify installation by checking for the version:: tahoe --version If you want to hack on Tahoe's source code, you can install Tahoe in a ``virtualenv`` on your Windows Machine. To learn more, see :doc:`../build/build-on-windows`. Linux, BSD, or MacOS ==================== Tahoe-LAFS can be installed on MacOS, many Linux and BSD distributions. If you are using Ubuntu or Debian, run the following command to install Tahoe-LAFS:: apt-get install tahoe-lafs If you are working on MacOS or a Linux distribution which does not have Tahoe-LAFS packages, you can build it yourself: 1. Make sure the following are installed: * **Python 3's latest version**: Check for the version by running ``python --version``. * **pip**: Most python installations already include `pip`. However, if your installation does not, see `pip installation `_. 2. Install Tahoe-LAFS using pip:: pip install tahoe-lafs 3. Verify installation by checking for the version:: tahoe --version If you are looking to hack on the source code or run pre-release code, we recommend you install Tahoe-LAFS on a `virtualenv` instance. To learn more, see :doc:`../build/build-on-linux`. You can always write to the `tahoe-dev mailing list `_ or chat on the `Libera.chat IRC `_ if you are not able to get Tahoe-LAFS up and running on your deployment. tahoe_lafs-1.20.0/docs/_static/.empty0000644000000000000000000000000013615410400014322 0ustar00tahoe_lafs-1.20.0/docs/_static/media/image1.png0000644000000000000000000033656213615410400016144 0ustar00PNG  IHDR@y6bKGD pHYs ;tIME  %3B= IDATxwtU7$@BI$!{X@TE@QD(E(MT@RDE&) % B' ldI",&3wfg y+.|/}nW-?S 42_ӄJ*a~RN>[KYzηƕEE܇+j6S //o/SɒTh>C*d*He'sYk3U". ]2-|_œi9p߉?^EnL*TIE]hc5T8~_.mq[1j.0cWͣܟRSu/T`h84#U̙T89)9"z@GC S*0j tCJx`0H+_Өh@!8p@!8p@!8p@!8p@!8p@!8p@!8p@!8p@!8p@!8p@!8p@!8p@!8p@!8p@!8p@!8p@!8p@!8p@!8p@!8p@!8p@!8p@!8p@!8p@!8p@!8p@!8p@!8p@!8p@!8p@!8p@!8pTsV,"Œ>mN7}bM_+qYƲ2[˒e&˜^NL&LFFL&L&\L.22r1d4Ϳ^kFgc6\l7rvv @ᥤˊQLl ?ŦhD ),k#l0lÍAhMKJ{}5SbXdM엓] rc1\1x&Lw ϟw=j*..\r%`U*p … n;Ђ军ZZ\oAuˊ e.2rvoia2d0=djm֢b1$C[$'[lQ1xɸN|B_ObbburrR޼yoG]eZuGF(<"BQgNF"E3,<绺>0n0zOgٜ~sbbcu=XE3IIIYn_bS)*+;TpA:EDF*<2R 3gdZ%=_pa"x[>޷~RR-@iY(. P.\`YE|}󗯷Aȵd ;!!w<#Ge4+O+ͣ۞/bEŅ quu|}|rbQ(PDd#t2""# :[y+ʩRPTAA*WmL@`zqPCБ#22 *]*k(_yqO3L ,ٖWxdNb:Çb8uV\\\TlYU NF*rPQv,6nޤkת3"Mj˯rP*Te˩|r%%%)P@HZ@8ut=xk= 5mԘOw]zU?N+|֭˗TzvMJp%,XnUzjv󣣣?~VFU|íT6zVʗ//"`VUSgN׈ѣjOLTp ( R齷S'5Z{=L& F`ϿWWԐR>54 ,W_W_h͞?Owk :{-[/>]B@2pJ^=u*<\*L?[+I1gBh$)?c+@ˀ (Ţ Ny:{ .]/-T\bbc('''*CAoԔqd67+b95z&NDǎ;p MyLE˕֋WLl>󑦎@x9;;s=繞Տ?4t0Jp*rP*|r2LTb0 Ѿt Sʛ7mlZzD^^^uE l<==եCGuQfY7oҶ?kH\LVJJL&ʕ)JA\IT  eXǎ#aa2rrrR@U RNUn=5kXT]F K...jբZhiC!u 4DL qrrJ_~~*﯀@os:HEDFTDmdxΝ?I*y+T+ݝeyQ5Tz .]R TxD""#u*"\nQxd.^h+k2ThQ[0W xS<=%/OOmIJJRLlbccX(2*JAG2Ͷ ,??fz*/[Nޅ S0 L*UwEհ^l'&&*"2R$<=$rP11Օ+W\\ HL(l6ŵ%n~-v$%%e]۹RhQ-]F7Qq[ 777}e.rAt_IR"!Tti)]:rVU/_VtL OfnW&!!!6@qwwdhhL6d2drIwm$(ggZV%''+99YEf%},%YִE2 vō1nl^׏[Udc{f,%OOX<@3U@(PmN*:Z '''L&9;;ۂCk\=}\L&[r:1eLd$bI՚,sz6l(jCd[0aεyƵMMG \\\lAEA//[ P<=i滸7Ea[>޷~bb%b 3Vv[laj6TH=>bn(q7!˵0Ƙ>?-D1-]ʸ4׷eM&^;m.ʓ'<=Z wsssSnQ www*Tu r =zTý3ޝt @ p)XZhqOKBB999z;fYf9={OWW\CK@GCp8!s;Fppj֮a7oŷ+u[F T^myk\gj_\陎Tve/ڿo.^+qWTP!W:*\"ggg*=`?7޺m < Swt,-o+}3 m[j_ ,xd㯛ϝhT'S8Ybh%7y)55Uީꭠ $8'#=5%U#}Ox.(]Z=:?I[6W``$)99Y+[kkh]zUV jX+OGSjj$)66VUn/T@h^Pn]rΧӦkq$__إ X=k7 tߜc|]M.I2 bo :X%g*{Q|f|:S w_O٢OpbGJ--3υБ;iSǎ`ԪEK=պTLE`Zt.ڵwRSSmZb`;Pz4,=]\~㯛4iF-[(4 *$ߢ7 WWW-Xڴmmrj?uvӑUb]NK˳'Ҍ95dg3ھsF ikN2 ˆ 9IIIhcGk݊T@'N[.F&O5;)1ICi}19իձZt*O=[V8@ĉ^g^']o&O<ʓ'O_(>76 ;Cj`5lZ?:+ӷWБ#%7fBv쒛`2LX,9KNN֯aGULY* pWǧ:K}{K}}_={VTbyԯo7L򺯎W``*TnZ}0fq~|ժc9ncg}j٪7g t).ѨTu|i$/Tl[ZTx.^߶lfM* Ţ ~ӡ#%INi"Fj:Ϳ}GVRCMWk֮*X ?y’nY'_.[IGɪ+V ;]zC>Sլ]#tg9.ܵMY3fȥm^ltx߸Aʕ_1r4X[۷k}\:<^5UZ-ugΝ>񋯿R`W RI89sMn*Zʕ){[cG""#ڟiyƍT)(n~ieVtt6 ?$U[vݛm+'''999)!!Ajи{.@twP(*"J*S*\L.wgYVU@I۵ifDǨdR>TLUC8!o_oPӖMTاpKE> ,֭Žm;y]8Annnz烷wWZEqǮ?v)55U T}d^ܣ+qWTJj֭2BbMu8ma*T]ĄN֯k^S]ok[ޅd0dܫ!m\sM˱ן랗 .EG+0J( qÇԸAC,Qkw4t=f/_˗Q05fl捛bٳlO-dmW매(55U΄ws8[b3A*h%*V}]RJvد6:(iyxxO⪃!EڰiZ=6VeJ_ȶQi5_]6kΝɤd}={5n7G /_״ TֶozǶLO`۝=q%L&m'rrO&[$T9N%ALW rnX<% :@-d*6yCv#4o|k :u_MN:zK{{zu?a[c*dת ;X/[7nUfOH>1.i)ũ Ϫ{guLxwMlDʮܻ˴Gs>RSRիGJh1FQ¶mWz=_(BxO7˗ip$ɯX15jPM> -v+_pss'&h`b%%%imj1jBϒ}-6S4iTIҐd6:!:r0S>M5CΟjhT:uÏK*Kvݻ*11QTpaUXQuUp^[QQ_:{] @VN$t9=wNU*UzS\\:Ž IDATzxn3GF hU7 R֏ɓz:[V|*TH/^T^ww|i݊dg+ KxYO?VC T*ictz?^;Y;-ɤ*5腗{skÊ m6h4lR }MAnĄD}1c֬AwVBl PJT'վK;~˒.;SNڰnfL-ի2 :~>LoxKΝW1ԛgO/?I~j|{5%I ?DVUeʕQ;ULLUEEVJJ$闟֫#2w NTdBNmh;$IZj*Z֍[?DsXDzVtZ7}BaGTB9UQEIIfm߲]Ξ}/٬%P'QdD$ɷԨeJs ;{l!66VO4RÎ˷*U޺r6oۦ۶ɓ˯լqL_zU~]/ooW(///N>JLLfNsϫLR , ~ӄ-[Uy&8uJڍeߪUns= Č9+..N^^^Zhϟi}??}9s?J8~YyԶQZjE>-k']]]5d 8HgΞhT|⒩lsfM5CE˖jjSp~VyAW\ͳX,.c6jֻxfWNkO3-<ЃZl;B/ m =;=iYttmަm)!>Af[_*IS>|w&h~JJUԯz$˽TvlWRb0AѪ^- .^Sݴ{q>U?հ}+⭛c~ *ĉ,|?\ ܷpBٳgeZըY WW۵׬?vy-_BWTCٺ̪V= wz兾zkgܽ[!׬)nߙԬ6#զsG)_|a| =5Uܡ_ÇxvZhq!Be_-P__M:Emŋe4vr/a´w~]xAIf\]UrU\%׃߫IBfOvm̶lp W FDFj۵kL&.THA+dX$$$C ҿ/YCSjjT< @ϜѰ!oj%:}^m#۱b6nlnָ Jue!Ifk1q$))IQrMد:kM=U-4uѹܞv')%%E...2 5oL П;vi׋מ}9n'ْu%IkUW/Bp\LڿgƎcGAo+_|ޫ[MZ|4nXwR*raǵkn}W9Ϛ?hV~UJzծ'(%%Ef1Yl횢Kg[n =:S_)ҥKzU;mٻI僮ml4]N;+%%E-mdmDvGT_5gzCإה)_FsVMР7fT}8Cϧ ƩvźJIIѪ3}`~ޙJcȰ7{C|f9F}I28WCwҜ_he5C$|ї~oq25stusռ_NP=;zLFW‰[r(zT999i٪T}<@-N˺ U9 7´EӦjٴ^y=1xx__}pJTd-vMd-_l7%t9999˙c__uUkTYcEFEqc3h?n闟5˴.W]\\奘Y2 4Sbbb֔qn+HXN:reʪ^:.22%%ֺbre펎Ӂ& zzG{e}^ښ0jծc!_>yzz*66VV~JJ4zծ#WWW%''h4jzWO;~\MC6l$IX|}|… E:t=CGPQQvu8mJE9sջdzzuҠ^˒dpQJIIi4>S2%hGζ`#;?V5rv-<*WP[멇i;[#dʗ?Ljjt{~|0~~n;e+#mѐao⅋_ݯ*k*Rm~ӇJJRc-n:-[\&}CsgͷMoߥ] C_*)^5uj蹎=eX4j}lm_O?bjh NNU`1eV4tĐlU}musԣW,.WZʤ~*:>NܣtS:rLb{D=ܼ2TF֌S#ivW|ZukezN~{U)(HYMjd;?nI:LJNNm2=J'PkڬڰixV_W5O:/ZK!mKʹw6d_cb/ܢjs==c~\/>8:icVZIG˲{+xD ?w^_Lk| ٟ=Ï?W_m7YFQEsn=8vǍt>ڲE˳,sQmۜsN9hn01Y޶RVZ: -@PCHjׯNF:v<2UȊq өY~PJRsAZJP mPWlCsgS[mo#r}]ެeS%iVSŽIqxüi[uI8;;[>M,qWgwg9V5'N'-jwpQ۸'9udX$Ih㾵hT}_4&ɵ=<<9n' 0 EI:q'nj;i$I+׼8]85ӏJMM}Ŋgn{4s `65sL5jmzsz… zB zsj'ȕTEYZuP.PH[7rwwW7lÇukFww^%JܵjZu Y,=J .Qnյc'+{%:^ ́zkgޕRtєro J]kb04u܄oNNp ͜s=߽Wk\Dc&MЧg)!!AZqohW ӇF~v0Z"Sd~\St/NM nZt?Ao #-ZZl$Z*Y/|^+_xM{띛޾]X6=ӱ}E ]\4hQ1Bק~]2\?Zh{>bŲ]v{_Zcι}TG3bf͚U߮]ݬ'I*P,兼 ]:q [7. /m͜l_28u?M}ekǣn(*2MOP\\1R|6mm:%%E}{!(WL#[.kNgtR4xJIIPP<@{(6N:W->>*|nlVdddGvF+VLL&srR1AE l@~W~)\X@ٵnoɣ˽_зWU+vzص6GF]:tԴ3u'uyc]낔 ]elᑕu68f5o 9uujggjYϥhew={e:];vҀo(...S Ţ/-Lr<=vkC^W||6oۚD<==5ڴmܡ]{+<"BVUm5za ?1/H]VtL.E_ҥhO\\? 7oWWWM7AwhȨ(h>,K\Ӫy ծQC;w֨dݭѰ=q,Y3fڅZƘf>zN7USeԿhB<&NEGnFY=O]-u(I׺noA[*WاVK.҅4j҇^Θ'Iʟ?u9(xvF_INNn[vYH$~ժIN.PdT79~b~BB>5;[ذu v݃\,^آgb^ = . u5WX u:4alXw[z3GӼ ~}ŕs,)j֬SNa޽%̙ѣGr+"Ĥ At/}Y|k^V,}COf~]e6 {wCjj*܇A]0t{(PukEHLx] /"qܼj}e[ N?~7&pjOcͨemyK~l7߽!QâFs ˂];դ#ãx@ t)uҫ&T龙V3|+k+o'py/SըffWb'E" 66ƳgϊT\KK FFFhРAᆁ7 IXBm' ץ'fL\悏())n=׭Wu;{mc=ҫZu}&|? z{A?vJ[|̻K͚4E&MѸA0qViKݺcPWXjjj+U? %p:s)))5[ /plܤpØ1W^BY_^9txiY[ժ[]\\о}{̞='NĦM&M'WDBBaG):oJ+==/G杄OMM˸.+]>9anx9W&uhlk^=lۂA] ~^ <ßU<|"mRp"oa.T,A4z(l߸_Ƹ=pS;0#=DMJkdtniii<kJ}MC݇Ick׽wYx%Vo\Uh^Rb֮ԩWo/ =zHBB:4_f uu1n=@ "zFTT666E{ٲs;$ B!pISm9QQhkiy߲ GOq)?u vD^^ fƈ_h׭3fN͚C$IXΞ.yLGIޗ[{Θ<ukׁD"~s::_5s8fׯ boh`Zp= ?PTTDZZ#JU9 .]wX XK-sg8 22 Ԃc&_e 1h 9nKWǖ]y﹍ƪD8\*Ecۋ>0]]Cqqx'=%c\OL0Ъak,Y  j#f-ܣLEګ+'vmߍX,_ fe232qh!\~>]I9oz iY!Cpif-}*_?G""$99FDDD}*))͚5+2022B*U]Mrss}n 3&Mh0r9gdff"C A f2` 454.^vݺ@^^NjjqjLӏtzC'$$`ZO:uSEDgFEI:66 ܡwdddFy?{p=9q?w%B"$4T-@`ؼsܽ+⺿*,:m?lll-[`8x .\aÆ7U:zwY 5+AQEo {SN8_k SBZZT͡~Ba~=j~ֈĬI^HIIоå)a kRk0y֤BE>ӦC"r`R3"#vn߼CEE{&c{$8}:ݱqF^z,U(r4s"(io% %9sU~PRR %KÌ3qzfΝ^`، < }=bXf:vX`~F6]V Zc"/Ы@0u CG}Z((^XOҳK^ !H0nX8/_?.E#z0lhvv Au!C#R 8r#WTUU1o\ :t cX/BC7J""%++ QQQ 7^xQ .6ѩWkhhTpK37p:T9nh`P###MlCФa#dddѓX1040@dTbq`Ƕpl< ]mt}jNN[v(p2/(wܺswqkDȩ%ztݺIj ۶k/rr>iJeTUUqd^4nㄸ88膺uuZ $ ޿;w DF$O薧a}X(_X, www888χ DDBA}qMl^!hdUu!ͅ!^ ݎ>~gQp`uJJJ053E«D?-ZR̜8 iii4Le p?.Ek>}7Hq- 1Js4X g/yްijg.;iL]fL[v!%%EZ7455E(X*=_;t"dE00ԇ! BDm%Zu@8 gϡ cSc,/ИO~Io [nBXܺ~ k Z%:ӸYc4n֘!}7`B Zjh޼yᆞB6Mg Xpq.41a#b1{޿R:_GG;7l®Uh7֯E Y Q}"m=})jk(+\ܷN==cɊ?Ə [kk#1)x1;c'KWPԴĵQQV[~k!//m--Tс##)jLYY@#ejbװY燀۷pv,ѻ p)|RdDsі:3󃧧'ۇKb,UxB+â\I)(*-l~}F6-, /%`֩;ȃDϞ=+vVMMM~Eڜ{^oբ%\kWq-| c[^|M bȮaf1it>~ "CD"aMж\0Vtw ׬Eu1{)޸ރ?  K6VRsqM"2* QoO(((fMn ݞzb8 ^WΆ9-^FF<`01ݻwɓ 6,Uxc#~ [;[Wk8t[ųqmDG"%9ZZ3Ѓ15?f8| 232AɊW* W\ = 4j,J j8| =x{@]{;4+zzX"Tb @:}4._硫6tQ4m7nU?˗/G=X" (1>K{R1f..Y*P2o\8Az_ `5,}sAA$$$ :*VVzԇ@Wfgga#XZY)- ^ߗVu5kVuRRRRdddJWdg *" מ"~ .G7\WVV/6Y¥͚j=~ݻS~񀢢"VIewA((e+2sBA$zm۶իQF DDܳgxAwm~2"YVry%IJ^~ %%%}:-6:Ejj*MYPdnž]{`Jc׎]Xv}zʔrH)ǞW/3W[&CFF'%%z[8::z }s-ɗ?ᔝhDED!2" QQBdx7__C(BOOVVVņDQeqE̚?&ƨ_̪UD?Hy]_U>Aj}<}}}ڵ Æ èQ`cc)S`ڴiPP`WDDҹ䷹V?Ё, @r^䳷qA>X\@F.?AGG)zrsr~?#;;u .VC}IDB!jԬdܼ-044铧`((C0ngQܭ3p?>$+0n~h)mq7.NjHKO?q//VיsP pn_X8wQ2 @"ztv9unà k7bnzw/{;}Le"WX,ENnn^}"7'gO+VKSPW>@&qyL],u \]stXQ#hjku`̜7ksz_?ChL~lŢ 0!_FFF ?蛫nj,ܽ:mKg#Q؍6{!?zdUB /_b@>݃ :t耮]"0ʶ%#&=t_xΙ**?hĤGsN,*}sMD%$$sݏU-K.nV۶nG]tGrr2={q=xޡpBتG`Q8gOߗA$_V7!x@w1x+Zs X*wׇ"/ Z,&QϮ..Sg¾bOѣG1vXXYYa̙0adee+s~hff&222J 999_cДFff& QREII0152LLM`=t+} }; ]=]8{{؃Vcv!===KP*015,ZVHJJ*΂v^[UTuj*rss ͕y_ `hrxt(*Xt_Uuǚxtty@wѢu *9998h{)uaO0lh}~_N*sر#`xyya˖-AV*H$E ?MAA?}Qy.1PJJ/Sgahh deeZ'*2 Uu<|OѽgwԲBB|4kG~}++WBV$ff*%FO*<5͑y']];\;"""quhjj@NN‰'*ժs;^!ؾeGc+̜6 Ͻ^=յED|-GBb"R5Drrrn-j7iH;z4_QQsE`` 䄾}"::}U @Anp  #W^Xu8 E$ OƊUaWrr8_^}{A#Դ0Ǩ1#u6  ^yݷTURWcv:- YE\ŵ{DX41C ++v:-m-9N0f ""<"]xWزq+zEj1d XY[#@N^:o6ZqBrJ 7i 6=mLDD@6nɳg6ʽW+QҰf,)>h0n_fMVzԬYNž={KKK\999v3C;v8z@$a ,ԣKW\mMM8mI3<Нx!7Y[,Y >}]lY=***F@@TUU777x!""/ijUж}[Գb}e&&I̝1[ :66O?rrrpEL> 5lQYS[ ;8<}%sC_OcӦM8z(,,,ItI BL灰{s:v؏vm0؏$* }C{j $''jUMD׾ڷqY:vaE040@y F5@ `INN۷p=&zM< 蠾~z_tYoLFF#F@1ydb5 DDDD%Ξ<&ɣ'7Ǿ{X"ɹ:9w@NN.^~J*ccu`g[vei99/;;x1>~Ǐ#n#wpQ$%%Lami #c&FF064^U=ȰPRRcyL cb<<'cGD ''QZufPzuU]D@^0d̙3/_jԨQ,וLH$8r(F@^QQ7B\^WX 4@(r؏_ZD"ሱa~@[Iʋlys$IfΟ}C} 151)tׯ_#<2BDd##ׯ!2* abdUuuuhCS]P uuA( 999HJJBBb"'$&"11cc8^|YUVV::ЯZ::mcuu[zUBOWUuu _a魺u"00ޘ7omۆիWٙ!""" @Łbܥxԫ gш%%%XִeMHȂӈDDE@ǫxZ] @UUb14 u }uUTyy(((@QA Pϟy(+H{-˂Lddd =##wz:22~KODJj*ކyF4x󓚚ZȠJ*AMUb55²4ԨnTFrrrD߾}1n8t;wʕ+abbUr @R?a\o< }C @ `~Pī::Az\4## @U|<*LKH@|B<=i)))AKJNN./$ys[^^A" 02 5ۊ###EEEQS C 5T#oY5Ԡ@9SSS:tGرcaeeYfa„ e*) DDTdgec= O=`D"cC]N^J i+t. 222;/|HϿ4/?.@ E^ rPPP|g4QPP;Zb*~'n ,ljՊ!""Q?e^;[U3Mg*!P%%%NT(**bܹ0`F'''˖->r#""LdX""232ifԭa!"<JD"URE$"""*Gj֬ӧOcݸp,--rRwmGDDD""23g=T  vlNbAʩ={>|8&L{{{\x!""Q:5|~Ejv4z2Evj JDDDTUR޸uЬY3 2/^`q*0BDDJRb5mHMIErr2SdNLL+7k_PQ$ccR)Z177UIIIAJJJ։?""J:uŒ30blذk֬AzX"" 7Q+Ԫ3N?]:u222oDNRWW]DcHLLdq* DDTiDGc{\rbHMM-D"`О#""";v@͚5m6७DDT)ܿ{@hL5~ HI~e@ bʸxNjJ* 鏆 Y@""""*1:tG۷aeeE!++!""*CQ<-: p{GSTRĜ%DDDDI:v술 ?gFڵq9DDT=}nz qF̓@ (Ѻf5;꾹tj^#Df~[_SAVtSrs%~iGDDDDEQQsŀ0zh899O>XlX ""[߂K^xsƔZF@r@G" K-[< ADDDf͚8u XZZbΜ9=z4B! DDD0!"r_M>ڻ웺:XP$31!"2ʿW1cs(DDDDTD"xxx 88M6 ڵk8DDD'@6]M7bH7*}}}ڵ gΜAXXllltQu66b/:ĢQ;w`֬Y5;""-a㠩#BDDDD<==q=ؠcǎڵ+Y""`BDDMnN.&y3 / CDDDD:tG۷aeeE!++!"" DD]dfdbpo7Y'`R݄!""""*Fǎc٨]6Ν;}sIm~󇡥}"Ν;wNNN۷/Y""0!"o*y,[tąҷeej֬SNaϞ=%V\(""f>ƏM!V Fk!+'}"cذa8q"q% @u:'OX|> CDDDDTTT퍀nnnxCDD""9: q5Mˢ}a666͛qQXXX!"J}UwK^v>, W4p@wpwwGFpM* DDլY? ; ***8r8aQX \z=z4Y""4W9 CxgQ1{{{\|>>>عs',,,m6* DDEegecXxp04ga1!!!pvv+Zh *{ K@DD_J4;6Dׅ3cammmlܸ.\@BB0i$8DDT!1!"/"e<:9uc 5u5iڴ)nܸ%K`ڵ¾}X""pgB[v\Ķ[ Q"FӦMvÇ,U @MxgNu+  Y""""r@__v™3gxyy!==!"r}+^A;Gg<~o3E!""""*p̚5 ްƱcX""*'9q:銴4lڽCG(DDDDD嘜<==q=ؠcǎڵ+Y""*Qm۰ ,߃=:(DDDDD :#G۰¢EQJ{2:Z8Q4kՌE!"""":v술 ?gFڵq9  DDT"9ݱn((0DDDDD UUU8:: /^`qLDDTǡѭ]=_1y, ѻ$deg#;'++ YYooge!;'_wɻ\dfe";+K\fVrBrD BtHo[۲ o͟x3@T)[nŤIpA,\Æ w""*Q!߂K^ïfcܔ, [HHLDBbB?$$&7=x";;}/򃊷`㝠BYD+yC"|} EIQ!VS{'o{yqDN0}tcÆ Xf իw ?u}aQ#^L"233 mS J*NWՃ% d!̗P(ʿ-.Z@@ɽH$P(uyJ6)!x= edee4 aBTֈb`Ȑ!pww1|@DD]0!""u$B!8m۰(DD%%%!<2QQӈp/UJJJy'oOhiY ŅN]N UUUT!B!B!"99Yh' ޻yt%55~(@PW `d`cC#%L|2}aҥ0`CDD""Y?xuuu9 gQp#"*QQDDdֆj*PSSV{'P/t8/e+&~vv6k)6DI(t?2*IɈ˗/QVV ad`0[#н{wL2X~=|}}amm79Xx 04gQx/ANPիWeah`cCC}}]iodcH/V$&455KnZZ""#) ""#.""2ain[k֪kZe uuuDƍ憑#G X5 L$﷠u%ؾ$H$@D@\I.$Hr d%O @ ކ@ w;Y;9ޙv{OMD H" (H" *H"?B$ݝ?fv.…xvwnwKUE|u ŃS5qL,ciL:m봵gDF-_;?nʘ;{6sfbά̝?adNo]w_σ>I'h41 jeu1lY  |C {Bv¿~IwgϘ5 i_yl{쵲{ǼMsSVDQeaZc+qMv! IDATiy 8L~i+57ϻ{nWb*M`pԱGT9zzzxgOԳۋeYLmMvvN:7tc?k/Ƨ/mH28jfeeyP(w;.bc6ekq NM<_y'x<{o}79C1m1hUUTWU#̳m%˖/'Eq:9V J|A>l.̙ÕW^UW]5ߵ%ygɯLoWop^7EzzzǼMKVA[´,,klq"5-e}jm,(PyG}i\!BJH !  j:DI % # ] 3\~&^aZvcb~)/%EI?D)0 t)J> P*] 0% hXgB:::xswqŷ .o~knhvt;0~m,X10 ̙oz1ooh4#U2#"E ?4&,YuMM\˹ƫuP4f'3O_y/Mi5B|i|H&yw?=) \}| _ࢋ.cN;虜VHьKϽS#6,/iT6 [VG~E!0:]UIVa0Ġw\1Q C àuv~{VL&1!sKrhv: J"Æ;ۭ eFwF.)%jH(Ǜwι3# sma0R!?2%^;Ĕ$Bx!O i&&t!a|g:) K&PE ~ ?#}1 F]a'Jˇi4MW |Tzzzs_Ah45kSOq~O:YDNDQʙsȚkP7AFRWWǣ>?E]̙3kB:@=2} {Ixv=ˊlgjg !(<=g18valFiab$qb#Ha(Ȏ1ۇYϒg+!slTup]׳q]wXs'8ٶ?wm۸vI:I\'$\I/禐n C%]?q-ajrBĒ]TYWhG)\qm\Fz6tf{2%m7}PATupP5~/ x?rh4&; QXP'{4]==K <;LK06 FI6R%"%L sCBxY9 4{*0l u="Y٬ZzN$*'NnE&Mѹf ! z6ກyx/FZvJv q)^h̿}e驇\FlǞ'G \͜Y8SpܧaЁh>>8o/\HNNGq$Wdeeq' i4#0uTyz!.f̘ 7^i:@fѳzQyV~= yV/| [zVo zZ \CF-~=A$FƊ :!'9@Tt6+/1(N*L?A2?ԙ3-, J.t|PAAfF@JB`/y.ao^ς[oc˱B&e5dUao%Vb 7($28WC#b:/8Tz{zyXIJbf$ߺ*~zϘ4eyOMӁh48Ǚ3s&sgaٳ3k6sg&??_N ŋXxo/Y^JKK9cy |as:p89裙7o_~9/{otp4[r!;qoγƥֳgga=ٽ|g(=q޾[%xC2@{+$] |eM#=W #e`\.령.ʓ RTGOO9eE=㊕/k]̌sI$SC!TLp ߫ :WFTA.i*ey`.p ۶Rq)DzL1͡nB-EsP@?^2I2)L_W (Cbq]QvZ[{KfNٜsyGmx?PRVh4Q 8C9CтxExgBPYQYe̚3HR`]C=.];΢hMMdgg3gLv3ϟx{s/BOJ8 ,3_W[(..h!޴{1Oq;hW2;R)c *X?h<{[hdIS>Q˧O< ϱq$D&`Au㻥U(00$BH݆2` nb2~Zx*'hQT P``ZT/3\򵂡%<-%&P|_WL+V 6HΔo*|yRfkhFH`4#=,!(./'{,r JBn 'Ƶ<_sQZYAǯef(%\Ӵsq)aB!\r=HnIyȨdMr\!r0D[YLg Nyc3x!iBaBHɆٛ;n39[si'|?ddzu`4Fu^{^{9 dmfD5 BC)D/ŶDyWy'xofQRXBnN>SMfĉQ3HYj)//\bt8TT`xH4M}1j4!sO^~e~ӟrWs뭷/}iiO-׳~W?kf=1|#mYr?sc&~# ,eD"9ڬ1MczÖa-`h4>:,?|vm7.bzfˎgsߏInn~M>7\w-oͻr\f=ն~o>76F6GY]sղ-zV{xmY#d .B8CNfc&(#* }~UL)|1qIDXp&L!vjzz5/d՚ml2adV.}K,Y.EE YPbY1zz:%??v1VZ ~IyU kֱB60~ rް4]+ZOҕ5s&H]) %Q@ ?-K窂n87 cO_)_|W^5ҊnqsO=G?4~|O꒫'^쥃h460 bXLChv@x .z|AnvN:n'SO;PQ3.y7lf*+((.ws? IDATygRRV2nc9j!ŶNVo;dTeeX!}{VC!91%ƶY!f}cA֗Hgm$b(Kas5713D]a!(ȍ롱e%]'Ξ> JXh$XbB0tl@wo7bV==xCnn.Y(PTTŠUY!ܤCEU ,\HUU%kV4MkXD4B 'bS_@<"TGoo7 zx*]N"G,V^1\8/$qV2W^+q葇bԉf熫n䶛o??0fNAh4Fh4!X%\)¥^'QGw͔)S6Y]?S>}*OUJ[·r΅gNyeyf߼_W<~p{oֳ`4հx{xV#,rY=guݨ8}{VruRFYg}c+0UJP#ސ3@F$p0 i*hokMhBbF"//`ɬm'SgқHP@M]-k׭ŕSRVUCWZדJ9$7tBbݺu f,[HX,Btw c݅(6l袴(dKHjdeǑR0)$3@[A6KfR$n:[Aи_;f̚_U:0Fh4F*++y9쳹 3gW^y%Srب>D" G|^~3j%_s9qy\}5yL6y\dٜg5 ĿbYRkg ^mխ-HkuT=ik@}I f+ڣELI&HZ FcC LJ|G$E8eDI9=]c"FR.`EXt% )(*?00GvN6ڠ s1k'f+cڬ:"a'餷 y$S)zC*2:ZY| hi`YaK1B%5p葇q] |i_߮ C\kyϏ˯cC3gyVnγcDxWimkՁgA]w2z6bwioiz6֭[CKc=@?-D AOO'no[OKs==8$+`ͪU45ѱTN]()W_駟a$]ɴga( <WxV瀸B4 xCR%M cgl"CI$RJRxR"e0_)C ao0q\P,FdG16I ()-q=)SNe欙TTTd =),p8LKk+4brL$z{)˧ zǞTUTŰBabla! I--+'j&^&ObHdpc' ˇSIj8rnsCfO)^{5|֗Ͽ!h4Fh4VRWW;b3XBqn>DZmu_[G|ޞ^4N '{W\?.ϯ64(~ N0 m2]󆍯nnuguueɾVfz{0Lsgu<+FrdetvrmLˠ"EV4a JXO guvI֬X=nѐaI}}pyVK{<<)U+:vYM404BpMP~>rd$L_2KIE53)ә8iӧOgpBH$hjieKH oÊUɍJj`x,FEi)~Ρx,"(E0LR6ݞҔ`UgѢBkk<.s@x|Ҥ[c?m:rľGݥ\ĴLFh4FP+^.yn6.K/s|T2x_/_wf"FlM} RBH j7xVx,߫toijeuDsݟ Hv6Eee؞K<T ۶YlPlam}mmOmmm,_pȤ>! 6dg)(,S&SU[CMM V( ݟ HD4کS4s<<߉Fjx/6y\qG?~Mf\^[>xFh4Flc(^5vvd\u]^}UNW\OLSAqԱG1[70CVY<Ob{8Rd5Hzt53nxV AVN>gRY-xVqjjHP;q*Kd]C Hag" oY=KN†8ljK ʕilh'AN~٠gtē~d)ۆLp \`wsX:m{,f(^6!mNb0Pw~? i~3M D&´P&RBuUp0u H8Rv}g?K0tl蠳8W`wYXLA*Xz%.i} KV.e54of"YL0 **(*)!I$RĢq;GJT2.vc[߸g|@BGь_vj?Opdʕ--kpu]k4C!s/P*|cѲ, cCdJ)^PN57^MwW7~9yV Yzc6FY˾ڳfxVynm8ౝycHR6l0m1o?qZևKOr%W~ʤ)f b ٘^B8f尣#eu4͸׿/x9je|rq749_:Gz܍%e%:0?2KQJa6/>"/qQr葇r_*fc3a˂fiiyN?tz{{[%f?ϟxͿ_?;*+*N ;9ƿX;n?̲**8ù{my=D8ҋLC<|8)4;$<3b&w}faY̚; /d"K|w~V,[e_N~A>=3xM>;M;*_ ~_`8t=Y=ӷǞRxR! 5I{V<=0sʊs@<^ JJ82RKNNa#D_o0j+VlqQ",+dl젮nPՕ454K"dѸv H$Rd沮Dcg)4ʗu"DzOk?!6V0;@Kb``SAXd3(6)i$ ahooVlc¤X!A' QTÔ26 SDڅ0} n?@4;p8J]m%OG8zWR1.9X^^<ϤlմPRcCJL0 TRK(), ]ܓ)J%sx̜;CvRU;0@wZ%702iVAQ:RʌPY#BL,ͦu6|swqD~ :ݙy^+~< G:Xې/ 3/;&TBt#})t) ^p#AkbA@E$IA]C5, ׏8N**J;x\;9a8l)8\".(dbc)))RIKkAX0ZDR$~Դt":%ɤ@jJ>?~ U񓑑ATl,E"#T2"O]E\a$şFTd씗WкuKpW=HMBtÏo%%MmA4OMD# EIk˅x=""#(,*"6>I ɂqBGGDy)ziHש4DPdY6arTx'زi;hiF=|S{5ܾi`ګobXԹ=zwӳ9SѾmz x^($~mkl'Iy2&yy˼'[nӡ]; ROY7bcbY0|mލ(ޝk}bs&^zeV^͍w܎΅X߾億kv"SEF~cFŎ;hզq׾4P45EQD4DS_:"]!iV'&&ԨfpkV^;Uzq/5ij:PGtը( ]"?g/%%x<^$tt^/11W`ZðXbb~;I)MAOL#I)MVg agϞ,ʽFnײ**۬ð[(Bee%SZV. ܄Rœ4M3)0$ɂ*AdzN}M7Zh~˓ޛ>#fZBUe9!ݰvC@Dy7>Re :TgF-WV{n9,_9M6a؈K')~/?Qw`]5vթv&,V-bl#:84ugcL~~>} M~!:fp8xyD0Oce :\ oIB||zi0ibXӳ'= Yjy:H/a^ ..NۓڌJ degnrr lڲxR4a֭r[7^Ge+kyy{ߝcxg6 s.\}%.䚛o"{vn7`DlRXX鬣hbbbbbrӽWqؚպnLF?Ւ${4:"թ뻅YmZCӬD58Ҭ>$H@|C픵1¾]EUM$$b[Dei)a UJJJHKkrӲU+|~fiix^dUeoa1`{ {22Z6kMj:qŘ%99vr!Z,9F^n. ɔxHNLb "ՆP̩Fb^O4bS`Mc.' 5ZqCwD#d9s247vfTTgwCYCmvn4YE G4M 1qDGG#Y-$%%syp:l6dBH[ug}t@$dj4ެT3Baaa91k߸|~?ΧJSNWDj ^ʰ.nnؾqq߽9{Y,[aZ}vw]YGE躎bakSf,v;gO3zMǬZ'/z˒ŀW;Efpq~h={زm+n!::"> ȀyVڵik^MLLLLNJ^xr=dАAMj;? ?Y)/ YWa9[[zgIkfuRJ2ѱjVciV뺎Ԭ>DgX:PJgF h:U]q9ϭv*CEpEDRPXlR"9Ð$mӉOgڴ̤*& x\DĢ*E}^VJh$pљ,@m6n={p! T+k}v+=pR1DDFpLcUzq,?`QxjGXGEV骛~ >\g9eChڬi@bYv奇/ƺNrb =[4 {mǎF6m#UV$%&ͧiީ=0믘:q6-PgUMO'""m}!N{+HAO^8k= +;gƿX;##XԦM:hn-۶4INB攔4x߾yeFSAɗٶ};MtEHg]ddepнiFȲ]h:qX鏿 ,wa?UUUufϝo(w~gPn'>><;3xeDv{P'MAۖ_/{׮\yktgJ|Ɨl޺۶y(fQV^ivm}˖E l2]ϋH4uz#Y-bȚ>b›fd_ffa1$jZc5c,pH4*xwMP#ݕOS=tf$SEQa!Ux^TU<@p.B<>?)͈":&︕g>̙ٵk0TUaێ(~?-Rѯ_?JJJ픖)I)4MIbUxغu3Y;DEEOLl< DdYE 2"WXIħ(7j+!_n!sO&!c_x.69LokMNy\|ee\}5ɛ\tمюr5ԉHNI6<.q9LÙSok۰ޢ0.Bn ዯMt9su%'7VIYf {4+--e?pÂHLH+G0n$~[qZ{vW[듦pu&gI=r5`3_a~63f~]wϟϔ_#**3yf܋(EnGe&6w?]w((5kCFR fs #ov݁4n.ݺ#m兗&?p=BRbW ,h͚G~~>s^~ڹ oNcW3u9:YoS+rM{NxzvڴMִMoCt5)S %-ckI3=G~x5CѬ&DꪪcYKqEiHa.7^Z84@BmR_먊 UUеEQt)~4MRDGpPU4iJ4 UHIIfҤы^| ݁ᠴvbpk.bc4M󑚚Wk*KU J6@BBB\<'%"JoD!MOGȼ$uM1L î3^=ؖO?= b72웇zmrfAs΅Cz:v6nLLLn6[m xMqobj{!z"jص{$' oپRc@f>0< M; "@~qnFrQ+NA n NFf&}~W3F#]RrRO>(]O=o+W2 bz*`8{ Xn-7v n ͊-$?6m]{l̞>NCc w:Oܰ˸hբE*h钀cۃ{uϾѣ|ƗszIfXЅ[ly/f~C^^c|K~ .ZQQQ̳mz:m[>{3w-[j^LLLLLN:UW1ǙE{ff,dj;-[f_VJH5黎f 5UKUPut4@5j]3"4M"HvS^R-$|.GTt X"^U.<>dP4Qbs0eaԦ-g=h*6U!IæIq% =&DRP\NTI91Q1.+`au8)DttTjMjc>2ve{Ը'ޱ1:V508*^ C((Wvr)ΪUz",p9ď2Sn3{ދ|ϥC.;CnN.# {(<#dgZdd$̹e91dbbr|<ƽ8Oq*;wBe>~#/"h {FUU-x@|y|= ,h$ES͋'<ٝ'p߿;dбӱ}MyEk&:k<2?Lt-@yvK9KX8OS_KFf&myɜ@Pg|ɭ͢Ky'xeKuLr~ K~{9 ΝЮ=-7]֥m=cQWM0w}MM2<K\g&7Iodo1f]lyn?^z6}쨏J*S*NTd$jE r|deg0qAΏ"""x~S |\\IDDI-1r>77y6o?&M䵷P\\_~a/ٳ_%-˯4/j&&&9)6)Əsd͊5^ #YYwQfu\ѬFIInfuee%ѱkVҬ:@Q5E+ >EE5$A A"Yt]!n%5-œL,,^_%+N2=-ٴy;e'"鈚Bt}#ѱ1ݍxC y"<죤4M1abbrp?aaaj4_Ϟ׳f2oBVY͈3uKujb Z1ݺ~)aOs`TUomzkOQvH k\n7C ЍN7ju(ȪOQeW&X S5ލWEfmOiY)?VQQy8ldgg Y݇Q"y<~:6Iң__e??#8dd&$'+p$ P^QcS&/7fMRP~J `5jUՂl#r 5L|%bb֑1L[cp&7\q#sfᒳO4 I LLLN/[w`}z~3V++~i) Dd Xj-s~}>^fuȆhV+iVWYC[:55MBҬ.+- QZ?šr yk*X8FCՍVW7t1j a7&$Y vgQ'&[Q_XHrJQ$"E˴TfeiҬ)Jii޲"rcrOFr:vQR#go.7Ill,w&#cmZYfXV~[޽{S(YHm%7DSvEAϏjA G.u z f|<ҳlz0g~1r/)?oERJiIѭi!s#x㯿xxz܋ ۗm1좋qp8L jHqiT6;`>jF1uK6jtf}?a\@|\\P:⡾vOMʍ7g`> D4[k_Ա߷OϞm ZcXP;n}:$=< |=?9g :!'W'ŃgbK۵iÀ~sWүwz7S;"d/PjY( YSکakV h!kVkNMR3vPv##핢)Ȫ"WQt IS:A8@ zEW8]lNIq;l&.*V͉ӱmk, W#{+ݛOtOEY)Cjra bOh;*Nt$??rӣ{w6[$$'fZ|>q,fӦdQ\XYѼY3v;1XV=Ul6TY9$ꆚdb³כ09!X-Ta.-[hC}MӔ6lH޽TyVHmc=LS;E%^ #ɨGQUU$bA^Xt)_ϞŔq)mc()) 9:%.2vÛ ~u."."BE$I"6&α5111119ٙ|֭^EsN}82bX:;vYrGjUUhߡmHՒEkVkɆCrh^-QT+X}ԤxVfxTMGV O\ڣh G& ZCDD$+2"'vp;d"#q8tҖrebc? VOU5 ٵc4A4@kAN3BT]EVTMETTUE MPYVP5D@ i_Q;QEw2eDGHKAUV^O%A-h ؝VV+vhGYAQdlEC/NQa1Iˣ ?Ąd Xa>w/>GN ,yzw'+DD|)RUVpe06+e刢jEUUdYjijƾ5dGf e#5^Ljfۯ0aLLLLLN4oo9N'Z<`TG ӿ G!gqM۫7>dˣi&dfeU1ѡ]{q| SYyݻX|y {X/==c:`۫X,lm7'ރ`ŗpŰ۳Weee}9pV=uᴷ(.)E?e2/:;u"<<]c&ԗeq#9)#o4O#Bcݚ_b歁>a悯qgA{m<5Y-Y Y:?7iYc6&&Y}fe5-Gsz TY먺Z>"W;AjVݡ4M3r jJM,UEAœNdʋn0V f!Ȳ)HVf/T M+*.]ON] {fb3[ʞ];y)G5VRtRR\z4GCQQ>jZLq{cN##)9F]kީLNx5~_7I^^>w?xi?4/iôw!iS**+)..&""リ믥:۸kG+O/q9cGRQZVJFf&wd3~_͛lENn.Sߜ7aۉ#)!ɞL223\.OB=lpubƿ<-b5rF۫7W }{&&&ǔf歨Z=(\pa4IYsٰv ;nx5B׬լO Yzݪ$%fu#Djtn4Y];:DԬ>4 M "@# Zu=@5 EN} U3Bi:Բ$IP!YAwر;]݉ e/ >t4UED,Euʊ2\p,DE dT0 J2ZnΒe[,_sf3~TTV4 )MRpGINNTW딖y,ٸ\.l6EQsԄ v$)p\ ͗3}ƣό;sN>h:_n3V& t¼%svpxh,99<7Y3坉́g,_SXTDASNq:ӠxXX1 WTЩ}{:]FddT@Cn7ѽkW8k0lwwP Gcլ\<2 q8kӆ Q:6?RQQAaQ!_ nll,8HͥFGc _8k`vsԶ_UYżq`³Iiµ7^cީLULoɂey}ڻSڬqLLLLLLv=h BMuøa͙C`멧[fqy|<36o;&*"87Ḱg0"2u$I Oii efMSIIJj\!n7nP&&&aV\ɪUXb6m7TCxzSaM 6@tLQ۾)ڜo9X>jM׍IGR:&"$j+ͯfo '+;$SNiOdբ(КՇ%D p!&?."JӌϱQ{M"Z='i\hkh"!HdA;*A H"ETUaEMjP=H Q+CW}$aj wHEhGmExx頪¼"޾ _\,')){iݺ.rv;>YJVVH Q5C+b(ʾ[(=9nQ0;#o|ɗLz%sP_I|b~@aǓFLxxxڶm{>kmebbrN0g''66-ZCVvv%oa;YBڦn(ӴiSƌØ1c(**׸NپzA`;rHsbC3vG7菳;zvp55]ïYjfU5m6KȚMRtG6Y(QScmRVxQ(FT4@44EP%"!HFć(JdHe肈(zT",u@$D,z SCU%A$ 0'fȊjb"6͆!Y%5d͇E~8l4>OpAd(Y ::EQpt҅?{N HInlذvڱyfDQDe,GI >1줻L~q M5GwFQ8x;<2oI/@ ]Q:(Xu&"ZʮkETT **+( X׊"HSB"ɴsDŽ6r+d2r=swx^}s up @viG=%11힯wu;0;[4h[QvSHUUo+Vk gefҽKn:Cjxut.~!Oy睬~['KM4M&4' ܹsټ"i(0hڴ) ̬^V3}d6b`K,Yò[#hbJ233'.>0ciQ۠IV k~`cI-i`Jeq s@M,xxDq\ #6x7BDq-Mls˖\e,\ܨ̞aVD4IKV8XEee9yOUe%4t*1-p8峈ѶM[>cKJ|~,./)-'Wfl~TIp/m[s٧鯕4Hg,22pis{UK""w8{OBIII$%%i ~}f26h("Pyy9> s0rHJN\s_ql&0u+:' %qG~xͿY 6Ymԩ_pp8VձwVk۵vLVծiPZuV|aYDwoe n>i[-#bLxNzk+L+8Ķw24ٲiNӲF#aHҲ22R1 A5E! a[6aŶ2,#u0L۶ 1MLω-G00,?.abeq\#gFTXHk9]; ~$?>?@|qqK :4k֌.]ŗ_MaXh{M6B(XeV+v[GN zRr}ڗ3Ҡ??~/>__?mXid4iB^Ky~S9H Ed <裌;r.Фo](LŢ2btEÓq @^g۱h;gG;MvRg#RnՌo,Ug5(YWjɐKsq*ǍqB8^׋>k`x``aat`23-WD^ݑe,p0G .hq~ _ G8TY^+)9-v]h8 A >sj뷬֝ծ(ZwV|բuZwVl5aw>ءk 9LsK{lFp0[B8n%Eu0]* 句\h2c8ô&&iNli9"gYTVVQ;O5b?mp]Uo`Y?縄!\7i8{.`4HJLMXm%c8(~Ңę%ʊבJfc>M23q"a|IeeqDA"aI$4!!‡/!PU9e[eѼ$7jLiyyl.aixoC;;o 7p`ٖ#""523224=~t Fd'ؼy3q㟠W_~F]q&N7L~~>ÇgԨQh7ϿqN9`-ZEo7ǟ8h[ {њ^uVS;C`: ӝqqLvś78_֝Ձ@?N٭ {Q wKTQ\7Ean/H ux.X0L#Mec.UƸxĖXضۂ3Ė(Q%1D#8Ӭ3c9.Z˶}8ĺI<ߏZM²}xi(sHLiY H kb!el*,#59xEB\+3k`>JJJ)).!R\LrjiMtF VIMN[T~Y|8C0²m<3t`Mm`mg-ҀeforQX#""5zܓ7^~sF\HǞ/}j0";hoxf <ܳeŗd2ydFʕ+93=z4ڵ˔R?PVՙl(YCH00=</Fc x"UvJ {DJG>38X. Fl60(gbUl'6bgu\,LeR '==%X縸^l1q ;XbUb l"՝i9n,3bLU0gGj ( UP\X */h39-XFM(..&4'qXn͚g =zioͤG|GFİL¦/)000- 4[Dw^4z3_ɉǞK_$5=Uӗ! v&^q%m۴pDq>|i3k+PDv^{QFdN:$NJǎ{7|Rh~ ?D4ܝYGjYm?Ym`$&ժzÏ?RQjqہZuVS1oYٻ7١gCN![Iȭ"V* Bn!SDUd#U?Jq)ǣ*<7Ŷr[]שIb,4bA\۲( #.íޗl\pH$Kb?e8m.p8Ȱp]""ö`ؾXG0g  JQQVJFFo&cY))/(74 CTUUa&inђTiѺ%b/#Db?N,mKD .>IS䳏?c@AX624aWxxpuY|# `o2m t<1eqUW+fϞI'D-?>J"=;էa#bnͿun4s鬎A"n!섉pqe[ϝ՞W0 uV|x$&%tVV~ab?wV'$ i|SIHH0|;ög?GeUJq vX㙸OE%5Ջ}$5խ[&>>D222p;UA_uV"TVSQ$Ղt؅PPʈCE$JE8JE!ZRbyQ/q%HD-"لU`qxnQ#)f[ayIG\@qXjL|m;x<`~ !`8.@p]Q<0z,ˌmyTTT}A(u]8ALG8ັ ; ڦ@\aFW`U奸 ~_MOj݆)+v2ڶmǪUqS H||<Kmh4e++q ֹ-9e_ jbAcX$21MuD# ("_>q&3+ϱ9Ү}; GDD~f\xy{YL}t}(G~8C℁hղ$ Rqq1o5i3wߡ;sɅqҩ$ }p |'/O?1bNG3Wng/Rjϭ]g5϶Y]U"jYDtg4ۿV~(6vV`[i6[ٰc%n%K\/m9EO+9`1Q/gy$3]D¶LX]Vus\agX0e6׍-]mhm#`8Ɩ ;\%.>>|>0M\*TNf,6HUE9 iEU+# ѬiS5kY$$5jƍILN<{:uF4@P6M58&VxTc`Z>fo;e 7lc74kG^q3edu#""|{Y:u^zu z W\w-;vdȠ2pݻvӰd8˾]_}_}W_/o<ݛFĐ"W"ܹsxwݻ7G}>P_Ⱦ;x^xզoծƬUguUEZuV6QOwVWTֺ:%1v;=CguZ:1yەo}+--[-PsZύz񘘱b1 Ӱ`&[c6eǂ Lr y.0,bdhƲ-<;L 8W5BlST_疃Rb:iĒA3F\'JYq !gZd"V+,z_8 #Xm>r>ƲaxFFњc&RsXy=߀{uyX|U/yu̲,lۮuz|]uUUUU$%%2``0\6̦2/Cʳu_C(""yԓNԓN& 1}Lxfcl ĐAsGHy\ bcB,۷gp|Jd7믹y7ѣ3gduVCwV8|Yj۴YII6eZuVWдiv;-&>.V՞Lze1A|֬r=q2'6ƍqг9a<2aN;4H;ɺ1sr],X&Mԋ,Zo͚5uܕuqtQH-1x@bgdڬ‰DmܨC$r0Xu$\IljFvWxN8L$RY^N٦MlqUVtV~ѿNJN%1-CQĸqzLyAug>U=YgϾԴTt_L8׷I:vWl9RzbXv]Նe96ZuV'%jYmӝQp:W\Qꤤ$Bp:c+Yl.٭džA]lN 70[/p炋AH~a._|,b71MJKKɡ[tDw!6r*VZիXfMMeša4o֌ڶ[Am 0u0﬎ng1O=P04~!Gu#11?7|:HzGYzt:`q'SRT͛qG>}D";~[ @DDd1iuޙ|ѧ 3 4}… 2dz3fgq1h8""R)f&<̕]oraXJ FDDDDd/|rN;4wΊ+2e _}#""{  ""x Xw"""""Zz5{.:u/cŜr)^E#uO$eee 3] EDDDDd/?8{={1/_gi#;/4'6)3'c&>OPDDDDD 6pӮ];Mƽˊ+ m[٩yqH=yfZڶm /-ªUK NץG|2VmZqGq7y#""""r7Ӻuk{1j֬YW_MBB$""e]ud6 O*++7nwu`K/t GDDYZ"""LFV3?Ac)6 *+*5$ CѦMn&>lV^wܡCDDy @DDdJJNb =m(yA}p#"""" EQƏ~UW]СCYr%?4 @DDdLS_#X0ޟkj0""""";xnU+Vql~,YD K֭5 inuŵ#li'| 3f̠Gz~|׼Ko^Mvg&^{p8IOa34:zw8C9d޼yL6.]h8"""(=483H-|ۗc=@ ~[oEϞ=5(=a=y7iּWJƎSCϧyTUU1gqGj8"""AQ?ot87W:#""""RoaСٓMαለ ""5ifѫw/,\B#"""" w}gA׮]Y|9'Ofp H-(z!Qyko0)-)`DDDD9С9< /SO0 HDDH S'rgGȁXH?ro#2|p,ҀDDDH+mqW{X?V~R}Vaa!W\qm۶W_fʕ1ϧ  ""R/0zzNw""""""zZns=M7իȟDDDꭋ.eee 3] EDDDDzeeez뭴nݚqqUWf5 DkCO=WfeYq0&??YCRUUsmڴaر1իW3zhRRR4 L{Gs3?AZz~13NCF8fܸqmۖnaÆzjN7n" @DDdХG-Zm͍ Wވy[hzۏ#G2x`VX>HVV$"")Fx7vP78D FDDDDu4ix #FO>|<j@"""٫ddesL&OzAeE#""""{ytܙ>ݻxb&NH6m4 Lu2s2'~y>M4cf͚A)B6mꫯx95=DDdoy'[Ңuzu;++*_o^4MV.]ʚ5kmYp!?<˗/[n{エoߞ|u?yt]N n'i&7_{ ٯбkZ]?戣إӏ>7k/ڒ5z.иq:_ֲr 裏2|u9蠃8v%&&պuB:hEDdP"""{\NfV&]t9Mor4I]'q.z+qqquL  2u00ͺ{l鹉#JDDd/G|9W/Zowynݺ1yd9߼UWp&ܩҬYz}****""{'y02208)<9i<'</SUUŨo巯iӦ$$$A}]Ìxt4w(˃sٝ22$"""k;3gr0qDc*A}qw_'!!sO= ?OJNg&1իW3b믿s2|*N=O10x3`q'SZRZ B\qUHSRR]w!‹/ȑ#7o#F h@"""{1 ""Oㅩ9gG2AlXmKS54|^zOrE1o<l@DDumS,ñ9 ˮ!5=UB!}Y~a***8?5jለcDDDoE޺<ݟ͛6oq(Xޢac0'NCncȐ!̝;QF)G)K.`KOS^^c<3YϝaaۗѣGs1gqmô3*+*8sfϵnٲ%-Z=k8N>f  """{sr=W_qa+УG FDD#}٧u܁<şr'aI930H=`>l I4iHѾC{&)-igZ 0 ?i""""˹8)..f„ +vaP""" JxbyZzdž z ODDDd[t)6l}j8"""+myqėF0xѧ9޳'"muD"(hGkh4##'JtH$ G"D|[.χmY|?|X>?m|;vm_vϷNm}LVZѣyi۶-v={m۶.3UVBm? c"q([BxRRHMM%-5_u07P֭[og!;;3|p/^̦M4 '0 !,≇sj`""Ɏ9M$99WO7oڌ@Zj* 5&,'+~J 6`k؞Gx$+X~k5ʖ G"Tl}\z6R\M|5HzM@e_VPPiԨ<^x!~_ZS"""lynJAyYBPG_s3CN9l KDdTVVڼ<.?;==IO$'׿>0'X`y*:+))8Vns:(͚6%7;lZ撛CBB=dӦMu]7ƌ%\KDDDvдyS>xW\;|@("sr1wɧ/a~\#C|#/KؑOQQQ͛ݬ)0 INN&7'#0t:@v9g StQOJyl۱L9Xl),Y7Khb^>R|>}8!1x rvX$駟f̘1rpӼysd"""i"""u(Fߚ:]eYjժN<t8*++<}{K[ݷL>f__e=qL>lRCvH/;ddduчxsi*w qx`3#qx[Xv-s7t-[-[ ֎2q]w.uLmapϯv"""[zׯ'2qrqj߯P(CH]/S`fS" Sii)FzY2227\} {t[V\;$ cnv=,s>wy/27t+VNc:@DDDdS"""""Ҁt,Xx}C"@Gc{煗'suװoxλqwCdw1 :tA~׬ާ9sp!p'ҼysϟԩS~nDDDDD|| 71n{rWiҸ>5}uQ׏D>c|M: }EDDdP"""""Ҁ|>ztʐOqm2/},}`CiM8 zؼy8ӧHwy?\t٣4`aڋywL<7E=hr;#/c֜ԠDv@0dλpɕ#iz='>G{2| {׷pB B^(,,dƌ|gs1拈Ht`mڵ!;7kKd#sđ 6mTs r!7'-rsΦiVS,Ҁp] XuXǺ|呗| k:ܡwagѹcG:؁}n6yyy3 &дiS{1;G~~>呗O^^rY^YV_Ppw9?ܼ<\Ӵ2=4rrrʜFFFe x{s=njj*îGFFg޼yhhh0a>grX/!B<$B!lllE*닊S EJCw^ZZZ"%?eZssھPrrr{GDm--뵵fr(8CJQ:W ,TYPP@GG}}}LMU kxKeeeh"fϞB`̘1?~R IDAT jUF]8Z甪-7Rytuu133t}&BTL!1ԩFFFU&==*mS ݪ$""ʏiU_Q~~^Ő&S.瑛g!E}d%!=.zzz ]ѹۛTmFWG--;hkT6eC,zR(,Y3fȑ#>GЪysEf͉oڈB @BD!<;Ȧb(,,o_oƌMݪݹ&}0l@=;8y/,dz'#7OtI,??pرm۶700Cd춶BgR7s(((PFe(x{0q@6ncҀB!xI"D: S[-4ok1151ɡu ^zE4j^ޞϲ~cjrrrx"cyp;rV]t)>44Tm8r2211Q;OM^@}zrs!ؼ};)w;zuV.xB!H"FfF&ήμڧu|]6Yٜ>y#9)GOQjq[6n%''uM$XXs3&/]t-wD@_n!QԩS:u)uVJϜ9SF<==166V{͌1MyO5O܅ `_5100Y೯DSSSGyrB!$BjYf}]ztASKSI& :5 .bffFx??.C#2==[8z߳;ˍ#sj'cr Ǐ/S^m8YړΡuJWfLWW`̯UYEfxѓsOZPEEE=8~ Y aʓMQG]Q.?G_+~'H"(‚&MФI2ej'c?uT]]]~4/Ã6oƶ];J4> ⏍K}u0].irC![[[Zj8pG68ݼZZhi{fhjjئ-!!@k{3჏01s@2_?^[ni6V_{ްyβAOOBQO՜GKPUQ2aШchB!L!xDGF+j9egeGUgeeqiN8ͦuϟhӱ<77}٦:33ϝ:\[Su풒8''kTqo333|jaÃҒE+ prr}eۈT>vZz7ꊧʟ^";;ٸ3dOݾ]PˢU(l` ןc'ObiaQ&oٌͧ' )JYYYܳłof}Ѯukv{iҰs JJϚy,B!D !Sd/4|?wDz4jڈ񟎥~@}n$`}LkRo?/sl7_LCGqok7 SpptOյKNN7VGG*mcnniSU Ň}ϊgӹ{DOXANND[[–522 /OOOww<ܱԽxM>==KWPTT,SPy/{{za&FOGRRRlmmf|1_|P烑=)((`rqB!!B<%""Y8rsח.^f͊Ԏ[BO_[{[ԯC}z܇>.DGWMehnarn:bbb)**"91kWs` qtgU)B'KWC--< yeت5=6CC UJŋeennם#ѱ1=vxD E1˯hߦc .wsmmm|k;y2(jڨTPICCBޥ oxׇ @vx} |(H1e*NACCLZo G!!BQ1P222dFFiLMaa!̘7]~֦czɺֳo>:NVMur>Vf10mM! W׏zuʔeddLHXA!ʵ`N9S Wө}{:~c`F C3w Fz9~gNs%uqY6nµhjjR OmѠ p1.\ 7m)=,?3S3>> //}}}~n!OB!!S`艜=uбsϒ4jO]r3z*q&7/‚BHLH.]{Y?CT255a6(S|ڶ"&6Jܳo/[`ќo!`Edff>gcFː* j |guN]:񡎏/u||1KqkAI3tvvxyx>\ٞ}{>fome÷ʄ mZd͆ʿw>ĈCد?;vML\,ihҔ-Z'7B!D9$Bjq_(')ob{mu?R>u}.c^ .6Oo _:h ysr5y'/[x)sKHPZKn!3U?JqaZdԈw|¤ߧ444mԘ/kAAD0O6>5qM'~4;t[oP(077'%N>cmmM|Ph/,,Hyvw,k%)))j[6WpgŤ)s1,-ٴjJ+iҰ!erU]'Nf{bllB(mռrƆV͚?chhHW{ +BQ!D5Y,v15fp_@m{nDG*'vprLLM 55 tm߽]wT71O77; 8kDLٴmmg``+5j0xhцރWvcu,a1˙3~l^& lSޞ| S&N# ի\r]]տw2IMƍyK݇ZNNrUkPF T[o|#pb8HbRc @ .rulqrpZPI7 ï,;zJ ?oJe_ޞ^t2я{2`⮇(<XYYqM ACCn$!BjJ!]7_?wJ vzNhp({w핛DL6u}}iА hw'BSS}1o?ϾSgr :z[m:\nܰNW={w%33ccclu6ѣZ0]6e/)9sgccmMEaa!ׂ|:::xycaa\sQ >X`!vvWᅨ?Ȳ$%'riҰ_Ljj*Hr#+wG#//=hOxzx ?JV~|'++570t[V͚iV㡨HB!BjJ!~n  ir*f~r섎yyyDTܾ}7wJ?<]6dffz3 + aτ~DtLdw775I@!꣯_mGh۪5}忑ɔٺvJO4P1tgœqIFҧWU⟗]vBn޼ӶU+ٲ(^+$4/r Q߯/S;1hy,.\L=ʭ[2/'ij8ѡ>޵XרQfMMM><"bu zzzmՊZN2!,_SgŌygolm՞ÇQJPzCf=v+~ ]‚\JҺyqP(Dm/L!:D!¤'P~]6ވC=\jʅ+{y岛oH%B<3V]백q4hH4jSx7Z۷o{> d'WIIIIhhhǒ*s1AMgvkMl\_L\1w6'Nv062zp5 iBZZԤ{WX8{}JIIa5'7UX[Atl,4oPa/xB}ptpxter-u_rc&#++L֝;ᗥLSƼJ$%%ӯHNNf蠷X8{n{y hڡt2+Q?ChJYk^kA׹̵*K꒑Iaa!jBY~Q @B!) @mr&O6#erE=H[4xMMMZiDꥫ\| B<3lt?}8L5ּ;t}100@__۷oAvnn./t/~n?Ϭ8v$gϟSr+2*R|9~!oRꩤ'Dzz:999tjߞ& anf1I,fڞSa>me[o?11rx^aޢq@C444om.>d2vv[e{ZHJJ"99;Ej_:?/a͆ }s۴UsmΜ/G{J e,uwkkk5h@m/o<<'..ez >vo'2!BjH!&O>~w,EEExxe&&MȚka˜Ilص̄?.{[SNGr./^j?}*,74BgFv퟊ܸe3W]cm,]}:zD9Ĕ/^P~˾IȖ;8ֽsЦ- (4T5)dڧ3b2ۗadd͈r{\z})g2uA%1u׫>Ndwyge``6P( LMM׫7nnhiiqVoXϚ >NݏKg-Yfuq̚9Tc7λ*M6bӪ >1t{hRnj97lD/X$%%1jX>2ّǔ< IKK=?'UmI˦Mٹg'1!BjJ!232`ću;/p7Vlくi`0kl]FF4l@bBwaˆ-Oy&i|EVpqu.DWh^䱻r!cTPP3W]cx5OmyJw,Y.}z\v>[n1j\Dޞݳ3CYW{rW9xsus2j( zҕe('cѹwO(aLEVr1ԣ4_>Р _3 M ?J[8kk7n ;;{8q.99:Vx&&&̛  !(8y2q OFlFᗥ$$&Ҽq<D-ٹg11DD$OYNNJ/GI1Nt_2ttt VܺuKyP!x$BjPNtT4Qnot&9 [{[>S5mv:6◕?cW=~]]]444P(gLLLxg}:ƿcy㬡ޠW9ƍU[nH hiiy:N=Ô_?(ihh`gkuDĨ̟ѫ{zUe_ѱe; 5L+;psfzp ܝEӦj/K),,ӯdկ+K|T7~[waGZ!uB;@Rr2ޞ^=Σ~flZL}jSofذyeYبԋf?0gagLnn.׃1WB!wBQ 1i'v|ڞL2ၶݿC965Fo=`&8"(H.Ęwg̻CJ"7ڞ^ΧкEK~WzMVeꕞc?{Ub4P["19Y9dl۵mvImVnhki_Xq6oEӦXYZPJHh~ƍpb~\]c&B!< @BXR( 3KKJٵmeӶń?W@0G|΍7x!]^|Ie~r}U!3S kco9tz[vl׀⹨kP)˽AyKGG(;ȵ '9vd5Ý АV[Ъy 5nc6ummmyopUڔf3yxV[KxDLO玝a37(C ͝G6ms_œQ̼[d@~hLnJB!Տ4B!qq1fLv[sԛT4559q4{@xDʵk٭{ТdPeζ];psu-7J8֬ @\||ōPT :^b2nFDs#!ҧǫ՛捛=##eΔ_˧'HC?,%5-d|ԯ[t*IIIMlmlʜ+֘U1gۃ?B!P!B!5s4fNVl]&"2 SײXa=1Σ?WbɯX2u6mz HVV& Фa#b |{xA_ǯN]j99ATL4!aalٹ$]RbldDBb"^⟗u`hhHTL 11ʡƌa6j\f_ټ1{ 'ϜQRחMzhլy}z&s)7B!xh$B!B;ڷi9~$gs3%}}գ}6r ,-b000ŗؼc;N -PǗ\9om;ڶlO's1~\z4ȨJĄi~39y,IIIOm//ûC+'*/OhӲnfMqqvԤB!B=^%;;=#84O򷵱^=LXϱtm+cGB!xDd,!9Gmyf/i͸Kc !cVvm9s4C[ۇSqacI#zI7Ė5%x) z菖oCD!D!#umIJLR[r3dҥ1fff?tH266MMy%ÏA sl[Gi!BGD^ !BԤ[vBcAxĎ8ܳQ@Ci!BGG@!ētIc!٘7͕HII hKrq^QF!B6.b`h/VVvyyyԮSZU>91PNmlmfBZ6gɂCFPPPXr֯ܜOǍa#055B!xd,!x1Q1J?j;݈ӪALѱ'ankշ}al h׸oH&q0qbԐdge\ҹe놿g>uxr4x"u@[vO}wj0jhne޺7B횾8֡otЃ|]232}-շ罷/Svlշ?t^S]ztI.}\??tg^iM.}ѩ'>uMYH!7(!gK֛ys/JQ 999lٱᅨ;vȉ̟ .3q~!B<!DkZgrrr044ħVVVq`Zo[nB 77Ƞ>o)N(**bo+Ҧ+EEEjSXPȴ_ѡi'9B@GG^8rBKK~a999e%thډΒ1^"33.Pn[r6oLB|vԩW"VX+m]qB //c,,DPP(ml^"\\ :*WucgE}T 67mS󗸑tCy,,e!zzz'_{ݦLJaa!= ;@LF~ETDfΗ\!]^#lX II|#[0mL/^ϭXzt˝a#5-_L!e<'S4B!D5 s!D=Bj:dѿ14W)oиBW#222_2On߾M@v؆݀ޖ?7N'?233ѧʶi)iL\a O+s.::e^rc1q*:w/weNDSSSfo?/gT1s,»ZYoϬߐ|(m6ѺCk:-m-:,sx&UzLc]88ݝWWOQcGSSO>Tnr!(AW㕮7?3훙rr{Wxlmmy[!=nlyN;s8},aR,]=]_ꌅ4B!D5%=@É'12C {{? 7Vs%LLyG""ʔ2n޼ o{. W%(m`E*eW.^ྃ5|JQ=^|EW.^1Q1o~6d`mFBu:?w=w7wӍ-}4x*pI-GnX:;Ҿkf|;kId+y~!BTsU-!;sahjjҮc 4{-HA eҨirQ˵ѱ,t2Z*=A~[Ȑ|DGE׮\WݿWprqQF;|?--svuV.GED\Q.wyK7Ч]]Nnv!(U̙>+׮evNK6mhݢ%/ÿ^}D˗8swǩgz: ty%8I !B<$BJ @GGVX7==]6R m-,Qϭ[Y)˂NHR\vrvr9<4_?_@Wc- Q TFWW\RTBk9?;ʍ.U[>Lx lݹ;1۹dddPzx/ԫ ~puv i@AHx8!!FHXQQ ~~nނGQxyx;M!B<$BJ *@ vv)7RԮ7s .1:ёev-(eQ--r1*K_+1>QlUo! BT=#a!DDFpE_@ z_ϝ)U"]iHQi p#,TgtL ₧u݃kB!qjO!*ªG56vޞ\m*7\ #=kԛ@> ̴}=Vӱr99)tX"qqvمWv7!-/ E.A~Z yyy憓#N舓SOj:H@#1)xOH >1B䈍SFFFxN^psW^Nz !B<$BJINJˣJ?lDEF21`kcCM;;\]hڨq [\!BQ BQI޾{nO\j{o>bx:Tzҁ}o_n{C9RzG-7TsOyxAQQ[^fF&{w]!3]]⡰/SGl\,Q11,GKDĐo``VXXX`ancia-[+  iddNzFgzF:*IMK#19nݺ/333mmƖ>`og vvbia!8B!DI"ԥ԰L8tOx/?,#??'La+mӖMqpt 6&ȻDO_L zoS˹QQ'3M˟7t}Y٥4222HhdddP(^OO333̰03l=m[NQj`gkxG!B:6O]Et=}=Y8g]r !KKK=HUPP@ZZ)) IRSS IRSSIIM82oR99!BAAAAOCC]]G 1r &*^qs'140s33lmrPfʲ?%B!Ս BQ |ⷕ{f~-h֪XXZEXh8]@PЩKGv.Lc8}4Wm`}4l֐~u!<,wxlz4B 7)bAlwݟ`^ŎbbJMh !cÒl! Hv̜9;3|O)//yl~~njU||֬ZV...zdírkh%ϴbJ}xE鏥+?/ܺ|A_7EEEE榠 Js.<[Ij8Z!uuuQUQM%MTmm]a\q@𐗧=ipxÞC...\pZ@<%vdveԂ qOtً O'zREEE\qnt{v[,.}zCO(/7O_}U411z5nT77W}=KMJݞ#"#ާyE_>"}1 IDATz뮛rsr/9OLX/<$ۚ'm-gsss|}}) F:SOcПTmټEeeE11z&u7_+_6sNW$aZW~mvZmXQ;P`Pc5nXܿm-.MvwɹZb6ۨ,%MR}5tvzAt/ O?I>|.}7TYQ%M|+IX|=KYV:L3x`]:[5Jޘ,www$iǶx.r9 A^^5n$M.KD>#u©'SO8󈉋QL\N;krˢ;q2b4>jC4{LͳI7g3MS?MykocꋯUmmο<%.]T_[/i⥒IN:$82#444H|S'M>I/xp@yuK"[n֣N?4y GȂbttC8 Np:@!tC8 Np:@!tC8 Np:@!tC8 Np:@!tC8 Np:@!tC8 Np:@!tC8 Np:@!tC8 NǕ"p(T\P 12Cf\<p*Y"}(1!O5黫uOP0d1L/0Ngʒ4黫u) `Rvms$I;Wa^NsnnY[+Ň[Y77~&I:$ JXHK̇4fy9);[f2/dŻW˰oNun/Pe5[''͇g~OMtKϸ vݔn*.qskMZߦNQXSb,MЄv_kR}s'l6g64C۬wS:ݹ}'gFZ~R^WIC8SJ+ʹ\fwQ$)P9dהjNgZ$LА>km8({:iUg[MSQW+YB)O19Hy\T\TY_y!=[=VU}RR8M;Jw);DIAC>U;J2g~S!=5>j,N,KR%J/R4:r"}6H +xJj+z%{@ qx[NUG$I hIjT++H#i@h.S6Tc44z&85sr'LB/][G^ Jb}#x)E;TU_@?%^4MOβ^f}~we)8M[TY_^ J JTvMu u5%D}Q 4U+mȌr6yT^.Ȩ6nѽ@uuҏ?HIuuRRoiذJQR"m"ed}q*^^+۶MN)7Wru9hݙKvɨy2{QV&$[9+l#30_ve͕-#8XfҸol.JUU^{̾Z?VIl7oҤ_i Vij%E F%5Fa%àD;v+*V=٢.(I};2UT]*OW%%v-UveL.K:]]|#URSƶAp8zmNe~Zݕy*.=?V#"ȳBmZRSE;x2+rկE[m*ڮu[Z`AagU-iV{)7JGE_ۦJ)NiJjl?qԎ-j6(;DQ>2Mg֚dVOX-zߵL)iRRPWf;ϖ"[=4"w]2SJ ͧBJ%I=Im Җ4egۓn,ܦʂfwk4^-iJ)NUIA CQ>~O~k+4;cvVy]1QCQkZFyΙ}[צ,yEݯIOM!IZ{ޗlzjLY!a}WUhoYNpW:3x=y-c'zc*hb(C ʾ)MO-lq.Z7-5?c[ [Y%_^kӃϷi^^=N kjO`6fd}8V2^\^0KVaܘ7>KZA#镣ߟ/HwJњ>NZ\]< [[|~ Tca>>[ޡۚE}:Enz똇ײ~ <4]kre^Smemkq =ffmtw^j?^wiz5m- ˳}yT%O*ОA@Vtm7 cJ'-f ˤ]sq{)wʴ44/_&y4Kj9Ԗ& U:_ )8X )d<_Y^~I}.1Gz\+SlGiXi?{wNeRuug&wsu[2uG+㛯]HF1쒒԰e޻wTʼױ6(-qſeoc)>8 ^;~z̴Jg_}ék7?6fFjY:ݸp$S^SϠ棼~]KNoF厹iri17?Uv9Z5fhX_͚>7"S&׼Zk]tgd ܰpVnhƟil=;N kJW] ]s-˽B,Il}GsTuCsqnX0M)ũ<-"4"['(ySOCZ~hI7-zT6Hwt00>yxouf]-ФC9 ]\IRJqq݂[lαjlfeeiOiM+FC.} o讥ѲuzxūjId@|ܼ4.j=̇kng= ^^-nCqʪʗg{/kM_$)3P WGY4gԾME Ƣm*.{ŭűeo$7_ XHUieFWEꚂst+[7yjUFIRp '7YESt{4"|AKvE.zZOpǪ\i廵>R[l[S_KcykHhoxic6m,ڦ5yu7գjt`7u݂v.TOUh}m/Yo=,E㢆^n})dE>ͅuo)2ϾMyziKIVn܌ZYOz0?E%Zk+k Љ֬CJHYeTU%cy{ yq2ӥ >#_Zsߒe+ر]ګ?bces`RFu뤬,doGLƲeREn{@^F]_,㧟l/DD2&vK YN;Uʼ|YN9QڲBϞ2G\\d\2:Cfd$,b5Tie^㢆Hrd)Sw|\`m+|>E}c$)/Z")O][UP]ڽK9TkSwz4gGU}!-ͱN"40"C^mQMӏCv=E^*ru;Y+IUzZUy8{]L504I ߬6Wߤ{:}QM.{*s'z'*Hs֩Z}(P顋ܥzACzW`2sGzU6Tƅɯwp7ǪLUJqz$hpXoYMVnPjY^s~M}֗qQC]%;%I#cE>ǝXw-ʆj244cd5Z_U)ũKF_B#1O/H*?Fr5\A-JpjQ>yܣ7w?Ҁ^zefVFWO4&r =%4y\-$ WåEb]16mӬ?jGi*z*,EsVNe60̜|sN?ZOuGlz`K*.cfU3{TE;4%I7Pc84wsʮSuK 3WOXO~K/1BM)3PύYPs6e}v0r濤L/xJ~{0)SQ#͛e-{}_?7(&Ӂ99.˃Se=TLs[ٔW3O26q!6:rFGskӫ>n:ove zlY˪ȵ?.}zm4VkeRtil "Vh|0=66\{n]eYicPUV ]FA).;ieZO~Koo˥N,}V%r5\tKuMsZ;l.ܮV!IOy۞_[^,O>Ӯv_l@w}#eid=*. >"C{YVW2UuzcYFtKuC ]swS֒zskt feI[*?VOR#+^SrqYD$[exz{6gp,_DZ7@eWGv\tzIw855l$_wM?NEjZ귚_P7~jճcn3M <KVkvIY&ѷ$'\WsR3IRx<~jEOOpSyι\708wOz{gN5}U[Mc.#\=;25s󗒤{N.n?/J/W7Tƶ=2ʲ%⸜# co~}uƛ5_G_tȏ.,IV|juM="/ZnjsssMվU}v@gݕh]{ຶ\$'0.2>j9X;eT@IR٠Ʃϙ߻mҪ/~Vm%I8I~]qʆj/0=NVW?>І!a}1B!IߖtגZ{eQta`h6 M{UkSVޛ͞R}{wpAw[_H{жeY{ov҈~-m_>-FiwX {O ʑ6+H={l#χC^Lkk7߃YCb@kED $dt^D#F>أLMe2:anf=kv$$ҥʹf߾)54y8"ťE>}pY8 =u=AVУ+^ov4]'E3mKpksvTM}2mm Gr`쩟Gx9jWP"C-HPQ>M›d~0"I"ȥq=mpH-˔8!v:}E;f:L70Y: 9EZU<}:OGtcbF6k@ q-ZkrVU$,߭͞ ϑh0Urj6o1>2 }WmQ͊JVDt|”ZF:8ߥɌ9h? V6*|Žk%s^qt=6|04i?pSL}18Mz9Ֆ;LWYWHFFԝC3+˶("Cy8cAuqΈa(q*g fr*~3 23 @lC|ݼ.-s}cx(5Vp]@m=#Ng]&hٱ}wiz{noH8ݲeۿ̉'ȼb55{ᰟ[W>}hG㢥)u/&~u f}ΰgMдiRX7^gQuIsk4N͞ l34YߡtՕ+x*df|;9}kYkcIAueu5*l,D#F (Nlv5h7Rm[w؏Sqn{^TYϰZe<4_8'{OubIO_[MS\S?;po[Gu]Ly;_ήS_u}vFq\flՑ zӃ.[^"ya݇=N.%qQ}gDᢋzz\36}jzlüuj m 񓶿/M;k+I#w>)tcn~7_QwVSUEʯ*:(6]?,ڞQKݫ;nqku2Wѻj;A6lJgz46'I~r[Xyʯ*j[F&aQE}Mr\YYV 3WES 5t{;RsG,Ygb$iZ5?cnYh˞ܪBmJ+eFx]nN4$&ō="UWEE:e5QPol73WB+.\E5)6ʙU+~ytq~U=G -ZƂKפ;Z~ʷq#UbYz},=:cƚh.#E˘?_>Gr=o!dl.#7GFnN l]Z7 rqRiߔe2>PFyY_le:xL}<0aF=wɘ7WFv Njtv2~m}wJ_%˙xpjK-XVϑd/\eXphC.IÛ>;),^ܷ aWm$iqjfm9?ߪ6:?a.sKUv3MX I_K[qm_5CKV+PUE*) =C$(i?ޠkZ1k+͎pzuCVvdؿ/MyC ӽ߷y=fm6{ڹUoh}P'psquRZnX{4~?C> z{J+ۭw*/Z*U?K.l+IףУPg,ze zO R&[yxhSUVbݽM]|]9n1iz?[m,ڦq_]DXxjc6Z;(QB{?v󀋕_URiCVMz{`Jk˵.UWzV.ouьikNX*7Jeu/|>WK3,>b=f1r]<ߏY5uZR3hvx*/ZuU(ReC5?D;ÆXRƼy2zur)6)2.r!͊%x-[bf[M!7K+3ްgvm]E? dδʕM7H2%iiRz-]`gʫem]o˘dhR7iC>>7U6Tu yݫNsp3Gzk*;L ~2e*l=#+XW>S|*fnа5vzkVK+/%UY)$iDĀǻ93/YWkvtz'ڠ4YMNK8FYy-@=9zn_VnýelJo-٩߳ٹ+0$ݞAe%G̵:6zVSs2~OiQtJx]V߹|\U赍/UbXҸuGG>SnX[J4uG56z$[gݗO틟Җ4MJQ W~U2+rm?<?ƕ}aJ>n.nu\([״*t ޣ'=59a}b۬%kccFj hhxELzF DCl5gu;A'Ф1rZZskï״7˷qH ۬zA'ǍGAs^=ߥuZIJUPVofaѴQSbՆ7nq=Nַ'bHq۠뾡G&ojur1\tZ15qBZQI-VӪԲLW)3PObvٖѤ1J􏕷ô!^Azizf] :oRSUP-C}Civ 1 C7< Vm-0,3|?-?'2Ǎm wyȺ`፝Pjkd[#1Q֯yi'̑dN>6iJYY2,HOrΙ'{Ⱥx7$s`),L2?W.%2Ǐ\]ji7+e^ֲ!en˶(u2֬,W^%MsY)'\.S$h}skuZSYѨV {ccF"+,O-Y|[tn ('y ]suW|:E -CjhO?tUuzc[Ko.K:C75 PUvpU}SGM锲 GҽCWnUVnβ,՛ S&O||\$IK3Wo3*w;bS rT]TY ^V*O4b=?# ַ'~!=㶒tU_}tSexm8/KטjWP9e6RYZAe #ӓKLIuM!@)ũ*.VoŲ?Ve"WyUE2dK0/ZnmPzyJޡ UoT&zMm+Im S,TZST*Qg|# ~*UE\]U]r osllExj9;kj6?Y+r'BBz9*P]|"48gNJ34SUo)M}Ze}K#.@f2bbdڧC>L'm(eeI11҈2;)qĜiJҦMQ.!wm[L)'6TLMfpsmc-`cJRt̸A}lۇJC DYZP:D>c֞b6 p3Mg)8U uJUnԖt+WU(o7/E+/Z~$˵ J-ݥ%2 C^! o5Қ:m.ڡ% Ѡ$;s+ \"E4c(VrmX#3i?\E;t|(6!IRYm'X=u|LSʳRj%wQ\ d*hk?񋒗'57:Sw½C:}_tvh%~1@#"|P ][F{ |(Rm}NChHX~f&=0xݾGO+RRo*s i .rpvqPamS?^MH9n.nKB{v}0#Eü3a ,tC8#@oRgЊDu|=[#u/u/ xjS`C8 Np:@!tC8 Np:@!tC8 Np:@!tC8 Np:@!tٵ9aT^_A!p l S]ZOA8"WVMZ1'TZ_&Ir=5$}_%hBOKS}Z5=y8'鬰eo=pvu*kE46S:s&+E8ɓU1k1N LPnIDATx] UQI(B*IId*E&PfJS#Ei|Dd(BȐ!SYu:ss}|9{M{w B䰲fByXYYX-DL1K{2.޲? @,5ityŠADÆ Er0nݺ+GyX22FEcR| XXR|8 Gkǎ)dabIfJV>~G+VBJH0$z#k۶m;[o*% W48' >Uc<, @,HHoWeabIF d_T0001F(c7, @ @ ǑVC&T ?0(F<DQ P9IjدÖzԩ##6uLdi5jJ/}%־rb ve).[ 6L??  Pߔ@Y߯6m-FI0caah<p|s  !F4[:7.=نMj zCt58c۶aC!f۷_Z7YX :x`  з`e - mM7$lfa*Y986(>ttNO,Y^Ё0cu[(05aΙ3GԫWo7}$쳼@S#|bYevA}-=*D͏ ^6N I52Sհ @As C홇{ؽ~'ՓhCr.}K-[,=9L/#JQPx]1UV0xnIs{CoQ~'+|֏(w(oAoaW_};ѱ*A-"{,«F+\E2&ϟ/QvYmF>6@<x%ϧ?Zݥ|yϞ=[x㍢[nCj-[1b3~(O)K1*d iT-HW"77F7?FlF/4N7?=VB,Yb'\r4'_F}8eT"vءq\:vvr7Pg{챇ѣظq."@vdZFiǶmD oPfWEe?xvvms?<{'x1E.@3gΌׅ]w7לxrmKe#//O矺-eY>^1`(S;v.ny @(t%ӧ}@1e,\06GH3&-y뭷V/Kcƌ='ZZףL%Rn4ΝtIƶ4oޜ*g Yˌ{O/_3_qc/^\Ʌ)y6o,f͚_|!]t:29f;tWT(v q?>'aNS&uH]~g;#+Wͅ /ݨQte]p*q@/ xΧÞ):L{h}pu RXX(ʖ-/R;^F%)ȳ@վ}{# $7=Zjb0Bb$tcKw}PKEaeQr h)7ozecJIw'%]~)Oի@ư/|VB^xy(\0=#^Bؽ+|@=tڎ MU-m :U}-f:Y_H=\}J@|%) $?9 &W5FU\R @j2)Buc-V&`_u05E!6m2+zn.1>uX *4v onFW+>o$ ?6.K+%\o/FV%+g?Ma8-f$wa.tjW$iB2BꩀB/QfMc]W=/G֗jbc[J 6 uҥvCZ$@%ڱh"}R:N|&ǥd݈;w fx@\ +>O1z\<~dŎ/O>S*gdmH+3 JvЕCYLDI9?a:ej)^Ľ>@(N:˷r,G5̯.TsiG-tGV8j)(>ǫ)A'B f:s饗c| P#M"yCŅHw? 8)4-=TLr+,Ȑ7.2HZ(Dc%x񺦢%.G dΝ; Qn(g9@MmE>E=*e bO@zũ:r[>tZ $nO=TFhp KICQ&HI/駟t<:-]v5Q ɿ3xyd_~9tdP1MhDS|*=J^/l dra#KE Hkww H*dsGɥHfu])c$??r.gk:^MݡylWP Kɩ^@/dm} : WB<,{8#Y56҉Ȧ:\ j%T^4G3)iEClbf݂R"sU<1꧒lu[hX]?=sׯc= ce<\ \#uÆgDX&N]h+'fK.X#Ґ >uK5*Dsxcuc$g1iG4;vUVuF8Z77>r^aagBn9)1:tMұ8 V \xɬ:sE:ekEr䂑Af ԩ< i;q5" >2Z=fYhT3@oD;E(\%43t|%J 3ɉN%2@u.c}ϔ?ڬr%j+ZG&Q um  L5 O?=E49KjV$‰Nlo1>=Lݍ!{^e(Am*6\‡~u6R^bN,?zGg Ut7O x*Ubƌ8Wl.c=Iʃ5'sQ xxLւ5U rg^ؤWa%EgSv5- %kN@a øqǪ_sBAyXuzѳYf$r\L!}f1"@BX$v7%۾e<w%[v{ :!_Zai7a F<'l")q(*Ɵ 'a̚.v7L`.7UBFU&NE qC+hb@N8o,r{Y R"7駟<'LAL\:CNIr`Vaq,*›B- VORH{=x8'tlK5l}`pnjm)X__CzˇK.VA Cd(  9 =fC%>!uE*VꪫD׮]%'?H%i6=ŐvC&<>?%:Fz'b[[?^O}VZwNLJTV4R6g8gOm7ח]FNFM6Rwj!yůbr#6~z0+;ֱIHz6^=7k)w}XXjX3:xYYY3< @ @ @ @ @ @ @ @I?GoIENDB`tahoe_lafs-1.20.0/docs/_templates/.empty0000644000000000000000000000000013615410400015031 0ustar00tahoe_lafs-1.20.0/docs/build/build-on-desert-island.rst0000644000000000000000000001276213615410400017655 0ustar00*************************************** Building Tahoe-LAFS On A Desert Island *************************************** (or an airplane, or anywhere else without internet connectivity) Here's the story: you leave for the airport in an hour, you know you want to do some Tahoe hacking on the flight. What can you grab right now that will let you install the necessary dependencies later, when you are offline? Pip can help, with a technique described in the pip documentation https://pip.pypa.io/en/stable/user_guide/#installing-from-local-packages . First, do two setup steps: * ``mkdir ~/.pip/wheels`` * edit ``~/.pip/pip.conf`` to set ``[global] find-links = ~/.pip/wheels`` (the filename may vary on non-unix platforms: check the pip documentation for details) This instructs all ``pip install`` commands to look in your local directory for compiled wheels, in addition to asking PyPI and the normal wheel cache. Before you get shipwrecked (or leave the internet for a while), do this from your tahoe source tree (or any python source tree that you want to hack on): * ``pip wheel -w ~/.pip/wheels .`` That command will require network and time: it will download and compile whatever is necessary right away. Schedule your shipwreck for *after* it completes. Specifically, it will get wheels for everything that the current project (".", i.e. tahoe) needs, and write them to the ``~/.pip/wheels`` directory. It will query PyPI to learn the current version of every dependency, then acquire wheels from the first source that has one: * copy from our ``~/.pip/wheels`` directory * copy from the local wheel cache (see below for where this lives) * download a wheel from PyPI * build a wheel from a tarball (cached or downloaded) Later, on the plane, do this: * ``virtualenv --no-download ve`` * ``. ve/bin/activate`` * ``pip install --no-index --editable .`` That tells virtualenv/pip to not try to contact PyPI, and your ``pip.conf`` "find-links" tells them to use the wheels in ``~/.pip/wheels/`` instead. How This Works ============== The pip wheel cache ------------------- Modern versions of pip and setuptools will, by default, cache both their HTTP downloads and their generated wheels. When pip is asked to install a package, it will first check with PyPI. If the PyPI index says it needs to download a newer version, but it can find a copy of the tarball/zipball/wheel in the HTTP cache, it will not actually download anything. Then it tries to build a wheel: if it already has one in the wheel cache (downloaded or built earlier), it will not actually build anything. If it cannot contact PyPI, it will fail. The ``--no-index`` above is to tell it to skip the PyPI step, but that leaves it with no source of packages. The ``find-links`` setting is what provides an alternate source of packages. The HTTP and wheel caches are not single flat directories: they use a hierarchy of subdirectories, named after a hash of the URL or name of the object being stored (this is to avoid filesystem limitations on the size of a directory). As a result, the wheel cache is not suitable for use as a ``find-links`` target (but see below). There is a command named ``pip wheel`` which only creates wheels (and stores them in ``--wheel-dir=``, which defaults to the current directory). This command does not populate the wheel cache: it reads from (and writes to) the HTTP cache, and reads from the wheel cache, but will only save the generated wheels into the directory you specify with ``--wheel-dir=``. Where Does The Cache Live? -------------------------- Pip's cache location depends upon the platform. On linux, it defaults to ~/.cache/pip/ (both http/ and wheels/). On OS-X (homebrew), it uses ~/Library/Caches/pip/ . On Windows, try ~\AppData\Local\pip\cache . The location can be overridden by ``pip.conf``. Look for the "wheel-dir", "cache-dir", and "find-links" options. How Can I Tell If It's Using The Cache? --------------------------------------- When "pip install" has to download a source tarball (and build a wheel), it will say things like:: Collecting zfec Downloading zfec-1.4.24.tar.gz (175kB) Building wheels for collected packages: zfec Running setup.py bdist_wheel for zfec ... done Stored in directory: $CACHEDIR Successfully built zfec Installing collected packages: zfec Successfully installed zfec-1.4.24 When "pip install" can use a cached downloaded tarball, but does not have a cached wheel, it will say:: Collecting zfec Using cached zfec-1.4.24.tar.gz Building wheels for collected packages: zfec Running setup.py bdist_wheel for zfec ... done Stored in directory: $CACHEDIR Successfully built zfec Installing collected packages: zfec Successfully installed zfec-1.4.24 When "pip install" can use a cached wheel, it will just say:: Collecting zfec Installed collected packages: zfec Successfully installed zfec-1.4.24 Many packages publish pre-built wheels next to their source tarballs. This is common for non-platform-specific (pure-python) packages. It is also common for them to provide pre-compiled windows and OS-X wheel, so users do not have to have a compiler installed (pre-compiled Linux wheels are not common, because there are too many platform variations). When "pip install" can use a downloaded wheel like this, it will say:: Collecting six Downloading six-1.10.0-py2.py3-none-any.whl Installing collected packages: six Successfully installed six-1.10.0 Note that older versions of pip do not always use wheels, or the cache. Pip 8.0.0 or newer should be ok. The version of setuptools may also be significant. tahoe_lafs-1.20.0/docs/build/build-on-linux.rst0000644000000000000000000000546213615410400016255 0ustar00**************************** Building Tahoe-LAFS on Linux **************************** Tahoe-LAFS has made packages available for installing on many linux and BSD distributions. Debian and Ubuntu users can use ``apt-get install tahoe-lafs``. If you are working on a Linux distribution which does not have Tahoe-LAFS or are looking to hack on the source code, you can build Tahoe-LAFS yourself: Prerequisites ============= Make sure the following are installed: * **Python 3's latest version**: Check for the version by running ``python --version``. * **pip**: Most python installations already include ``pip``. However, if your installation does not, see `pip installation `_. * **virtualenv**: Use ``pip`` to install virtualenv:: pip install --user virtualenv * **C compiler and libraries**: * ``python-dev``: Python development headers. * ``libffi-dev``: Foreign Functions Interface library. * ``libssl-dev``: SSL library, Tahoe-LAFS needs OpenSSL version 1.1.1c or greater. .. note:: If you are working on Debian or Ubuntu, you can install the necessary libraries using ``apt-get``:: apt-get install python-dev libffi-dev libssl-dev On an RPM-based system such as Fedora, you can install the necessary libraries using ``yum`` or ``rpm``. However, the packages may be named differently. Install the Latest Tahoe-LAFS Release ===================================== If you are looking to hack on the source code or run pre-release code, we recommend you install Tahoe-LAFS directly from source by creating a ``virtualenv`` instance: 1. Clone the Tahoe-LAFS repository:: git clone https://github.com/tahoe-lafs/tahoe-lafs.git 2. Move into the tahoe-lafs directory:: cd tahoe-lafs 3. Create a fresh virtualenv for your Tahoe-LAFS install:: virtualenv venv .. note:: venv is the name of the virtual environment in this example. Use any name for your environment. 4. Upgrade ``pip`` and ``setuptools`` on the newly created virtual environment:: venv/bin/pip install -U pip setuptools 5. If you'd like to modify the Tahoe source code, you need to install Tahoe-LAFS with the ``--editable`` flag with the ``test`` extra:: venv/bin/pip install --editable .[test] .. note:: Tahoe-LAFS provides extra functionality when requested explicitly at installation using the "extras" feature of setuptools. To learn more about the extras which Tahoe supports, see Tahoe extras. 6. Verify installation by checking for the version:: venv/bin/tahoe --version If you do not want to use the full path, i.e., ``venv/bin/tahoe`` everytime you want to run tahoe, you can activate the ``virtualenv``:: . venv/bin/activate This will generate a subshell with a ``$PATH`` that includes the ``venv/bin/`` directory. tahoe_lafs-1.20.0/docs/build/build-on-windows.rst0000644000000000000000000000315713615410400016607 0ustar00****************************** Building Tahoe-LAFS on Windows ****************************** If you are looking to hack on the source code or run pre-release code, we recommend you create a virtualenv instance and install Tahoe-LAFS into that: 1. Make sure you have Powershell installed. See `PowerShell installation `_. 2. Install the latest version of Python 3. Download the .exe file at the `python website `_. 3. Open the installer by double-clicking it. Select the **Add Python to PATH** check-box, then click **Install Now**. 4. Start PowerShell and enter the following command to verify python installation:: python --version 5. Use ``pip`` to install ``virtualenv``:: pip install --user virtualenv 6. Create a fresh virtualenv for your Tahoe-LAFS install using the following command:: virtualenv venv .. note:: venv is the name of the virtual environment in this example. Use any name for your environment. 7. Use pip to install Tahoe-LAFS in the virtualenv instance:: venv\Scripts\pip install tahoe-lafs 6. Verify installation by checking for the version:: venv\Scripts\tahoe --version If you do not want to use the full path, i.e. ``venv\Scripts\tahoe`` everytime you want to run tahoe, you can: * Activate the virtualenv:: . venv\Scripts\activate This will generate a subshell with a ``$PATH`` that includes the ``venv\Scripts\`` directory. * Change your ``$PATH`` to include the ``venv\Scripts`` directory.tahoe_lafs-1.20.0/docs/build/build-pyOpenSSL.rst0000644000000000000000000000754213615410400016341 0ustar00Building pyOpenSSL on Windows ============================= This document details the steps to build an pyOpenSSL egg with embedded OpenSSL library, for use by Tahoe-LAFS on Windows. The instructions were tried on Windows 7 64-bit and Windows XP 32-bit. They should work on other versions of Windows, maybe with minor variations. Download and install Microsoft Visual C++ compiler for Python 2.7 ----------------------------------------------------------------- For reasons detailed in `the Python documentation`_, Python extension modules need to be built using a compiler compatible with the same version of Visual C++ that was used to build Python itself. Until recently, this meant downloading Microsoft Visual Studio 2008 Express Edition and Windows SDK 3.5. The recent release of the Microsoft Visual C++ compiler for Python 2.7 made things a lot simpler. So, the first step is to download and install the C++ compiler from Microsoft from `this link`_. Find the location where it installed the ``vcvarsall.bat`` file; depending on the version of Windows it could be either ``"%USERPROFILE%\AppData\Local\Programs\Common\Microsoft\Visual C++ for Python\9.0"`` or ``"%CommonProgramFiles%\Microsoft\Visual C++ for Python\9.0"``, for example. We'll call this ``%VCDIR%`` below. .. _the Python documentation: https://docs.python.org/2/extending/windows.html .. _this link: https://www.microsoft.com/en-us/download/details.aspx?id=44266 Download and install Perl ------------------------- Download and install ActiveState Perl: * go to `the ActiveState Perl download page`_. * identify the correct link and manually change it from http to https. .. _the ActiveState Perl download page: https://www.activestate.com/activeperl/downloads Download and install the latest OpenSSL version ----------------------------------------------- * Download the latest OpenSSL from `the OpenSSL source download page`_ and untar it. At the time of writing, the latest version was OpenSSL 1.0.1m. * Set up the build environment. For 64-bit Windows:: "%VCDIR%\vcvarsall.bat" amd64 or for 32-bit Windows:: "%VCDIR%\vcvarsall.bat" x86 * Go to the untar'ed OpenSSL source base directory. For 64-bit Windows, run:: mkdir c:\dist perl Configure VC-WIN64A --prefix=c:\dist\openssl no-asm enable-tlsext ms\do_win64a.bat nmake -f ms\ntdll.mak nmake -f ms\ntdll.mak install or for 32-bit Windows, run:: mkdir c:\dist perl Configure VC-WIN32 --prefix=c:\dist\openssl no-asm enable-tlsext ms\do_ms.bat nmake -f ms\ntdll.mak nmake -f ms\ntdll.mak install To check that it is working, run ``c:\dist\openssl\bin\openssl version``. .. _the OpenSSL source download page: https://www.openssl.org/source/ Building PyOpenSSL ------------------ * Download and untar pyOpenSSL 0.13.1 (see `ticket #2221`_ for why we currently use this version). The MD5 hash of pyOpenSSL-0.13.1.tar.gz is e27a3b76734c39ea03952ca94cc56715. * Set up the build environment by running ``vcvarsall.bat`` as for building OpenSSL above. * Set OpenSSL ``LIB``, ``INCLUDE`` and ``PATH``:: set LIB=c:\dist\openssl\lib;%LIB% set INCLUDE=c:\dist\openssl\include;%INCLUDE% set PATH=c:\dist\openssl\bin;%PATH% * A workaround is needed to ensure that the setuptools ``bdist_egg`` command is available. Edit pyOpenSSL's ``setup.py`` around line 13 as follows:: < from distutils.core import Extension, setup --- > from setuptools import setup > from distutils.core import Extension * Run ``python setup.py bdist_egg`` The generated egg will be in the ``dist`` directory. It is a good idea to check that Tahoe-LAFS is able to use it before uploading the egg to tahoe-lafs.org. This can be done by putting it in the ``tahoe-deps`` directory of a Tahoe-LAFS checkout or release, then running ``python setup.py test``. .. _ticket #2221: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2221 tahoe_lafs-1.20.0/docs/frontends/CLI.rst0000644000000000000000000005621413615410400014722 0ustar00.. -*- coding: utf-8-with-signature -*- =========================== The Tahoe-LAFS CLI commands =========================== 1. `Overview`_ 2. `CLI Command Overview`_ 1. `Unicode Support`_ 3. `Node Management`_ 4. `File Store Manipulation`_ 1. `Starting Directories`_ 2. `Command Syntax Summary`_ 3. `Command Examples`_ 5. `Storage Grid Maintenance`_ 6. `Debugging`_ Overview ======== Tahoe-LAFS provides a single executable named "``tahoe``", which can be used to create and manage client/server nodes, manipulate the file store, and perform several debugging/maintenance tasks. This executable is installed into your virtualenv when you run ``pip install tahoe-lafs``. CLI Command Overview ==================== The "``tahoe``" tool provides access to three categories of commands. * node management: create a client/server node, start/stop/restart it * file store manipulation: list files, upload, download, unlink, rename * debugging: unpack cap-strings, examine share files To get a list of all commands, just run "``tahoe``" with no additional arguments. "``tahoe --help``" might also provide something useful. Running "``tahoe --version``" will display a list of version strings, starting with the "allmydata" module (which contains the majority of the Tahoe-LAFS functionality) and including versions for a number of dependent libraries, like Twisted, Foolscap, cryptography, and zfec. "``tahoe --version-and-path``" will also show the path from which each library was imported. On Unix systems, the shell expands filename wildcards (``'*'`` and ``'?'``) before the program is able to read them, which may produce unexpected results for many ``tahoe`` comands. We recommend, if you use wildcards, to start the path with "``./``", for example "``tahoe cp -r ./* somewhere:``". This prevents the expanded filename from being interpreted as an option or as an alias, allowing filenames that start with a dash or contain colons to be handled correctly. On Windows, a single letter followed by a colon is treated as a drive specification rather than an alias (and is invalid unless a local path is allowed in that context). Wildcards cannot be used to specify multiple filenames to ``tahoe`` on Windows. Unicode Support --------------- As of Tahoe-LAFS v1.7.0 (v1.8.0 on Windows), the ``tahoe`` tool supports non-ASCII characters in command lines and output. On Unix, the command-line arguments are assumed to use the character encoding specified by the current locale (usually given by the ``LANG`` environment variable). If a name to be output contains control characters or characters that cannot be represented in the encoding used on your terminal, it will be quoted. The quoting scheme used is similar to `POSIX shell quoting`_: in a "double-quoted" string, backslashes introduce escape sequences (like those in Python strings), but in a 'single-quoted' string all characters stand for themselves. This quoting is only used for output, on all operating systems. Your shell interprets any quoting or escapes used on the command line. .. _`POSIX shell quoting`: http://pubs.opengroup.org/onlinepubs/009695399/utilities/xcu_chap02.html Node Management =============== "``tahoe create-node [NODEDIR]``" is the basic make-a-new-node command. It creates a new directory and populates it with files that will allow the "``tahoe run``" and related commands to use it later on. ``tahoe create-node`` creates nodes that have client functionality (upload/download files), web API services (controlled by the '[node]web.port' configuration), and storage services (unless ``--no-storage`` is specified). NODEDIR defaults to ``~/.tahoe/`` , and newly-created nodes default to publishing a web server on port 3456 (limited to the loopback interface, at 127.0.0.1, to restrict access to other programs on the same host). All of the other "``tahoe``" subcommands use corresponding defaults. "``tahoe create-client [NODEDIR]``" creates a node with no storage service. That is, it behaves like "``tahoe create-node --no-storage [NODEDIR]``". (This is a change from versions prior to v1.6.0.) "``tahoe create-introducer [NODEDIR]``" is used to create the Introducer node. This node provides introduction services and nothing else. When started, this node will produce a ``private/introducer.furl`` file, which should be published to all clients. Running Nodes ------------- No matter what kind of node you created, the correct way to run it is to use the ``tahoe run`` command. "``tahoe run [NODEDIR]``" will start a previously-created node in the foreground. This command functions the same way on all platforms and logs to stdout. If you want to run the process as a daemon, it is recommended that you use your favourite daemonization tool. File Store Manipulation ======================= These commands let you exmaine a Tahoe-LAFS file store, providing basic list/upload/download/unlink/rename/mkdir functionality. They can be used as primitives by other scripts. Most of these commands are fairly thin wrappers around web-API calls, which are described in :doc:`webapi`. By default, all file store manipulation commands look in ``~/.tahoe/`` to figure out which Tahoe-LAFS node they should use. When the CLI command makes web-API calls, it will use ``~/.tahoe/node.url`` for this purpose: a running Tahoe-LAFS node that provides a web-API port will write its URL into this file. If you want to use a node on some other host, just create ``~/.tahoe/`` and copy that node's web-API URL into this file, and the CLI commands will contact that node instead of a local one. These commands also use a table of "aliases" to figure out which directory they ought to use a starting point. This is explained in more detail below. Starting Directories -------------------- As described in :doc:`../architecture`, the Tahoe-LAFS distributed file store consists of a collection of directories and files, each of which has a "read-cap" or a "write-cap" (also known as a URI). Each directory is simply a table that maps a name to a child file or directory, and this table is turned into a string and stored in a mutable file. The whole set of directory and file "nodes" are connected together into a directed graph. To use this collection of files and directories, you need to choose a starting point: some specific directory that we will refer to as a "starting directory". For a given starting directory, the "``ls [STARTING_DIR]``" command would list the contents of this directory, the "``ls [STARTING_DIR]/dir1``" command would look inside this directory for a child named "``dir1``" and list its contents, "``ls [STARTING_DIR]/dir1/subdir2``" would look two levels deep, etc. Note that there is no real global "root" directory, but instead each starting directory provides a different, possibly overlapping perspective on the graph of files and directories. Each Tahoe-LAFS node remembers a list of starting points, called "aliases", which are short Unicode strings that stand in for a directory read- or write- cap. They are stored (encoded as UTF-8) in the file ``NODEDIR/private/aliases`` . If you use the command line "``tahoe ls``" without any "[STARTING_DIR]" argument, then it will use the default alias, which is ``tahoe:``, therefore "``tahoe ls``" has the same effect as "``tahoe ls tahoe:``". The same goes for the other commands that can reasonably use a default alias: ``get``, ``put``, ``mkdir``, ``mv``, and ``rm``. For backwards compatibility with Tahoe-LAFS v1.0, if the ``tahoe:`` alias is not found in ``~/.tahoe/private/aliases``, the CLI will use the contents of ``~/.tahoe/private/root_dir.cap`` instead. Tahoe-LAFS v1.0 had only a single starting point, and stored it in this ``root_dir.cap`` file, so v1.1 and later will use it if necessary. However, once you've set a ``tahoe:`` alias with "``tahoe set-alias``", that will override anything in the old ``root_dir.cap`` file. The Tahoe-LAFS CLI commands use a similar path syntax to ``scp`` and ``rsync`` -- an optional ``ALIAS:`` prefix, followed by the pathname or filename. Some commands (like "``tahoe cp``") use the lack of an alias to mean that you want to refer to a local file, instead of something from the Tahoe-LAFS file store. Another way to indicate this is to start the pathname with "./", "~/", "~username/", or "/". On Windows, aliases cannot be a single character, so that it is possible to distinguish a path relative to an alias from a path starting with a local drive specifier. When you're dealing a single starting directory, the ``tahoe:`` alias is all you need. But when you want to refer to something that isn't yet attached to the graph rooted at that starting directory, you need to refer to it by its capability. The way to do that is either to use its capability directory as an argument on the command line, or to add an alias to it, with the "``tahoe add-alias``" command. Once you've added an alias, you can use that alias as an argument to commands. The best way to get started with Tahoe-LAFS is to create a node, start it, then use the following command to create a new directory and set it as your ``tahoe:`` alias:: tahoe create-alias tahoe After that you can use "``tahoe ls tahoe:``" and "``tahoe cp local.txt tahoe:``", and both will refer to the directory that you've just created. SECURITY NOTE: For users of shared systems `````````````````````````````````````````` Another way to achieve the same effect as the above "``tahoe create-alias``" command is:: tahoe add-alias tahoe `tahoe mkdir` However, command-line arguments are visible to other users (through the ``ps`` command or ``/proc`` filesystem, or the Windows Process Explorer tool), so if you are using a Tahoe-LAFS node on a shared host, your login neighbors will be able to see (and capture) any directory caps that you set up with the "``tahoe add-alias``" command. The "``tahoe create-alias``" command avoids this problem by creating a new directory and putting the cap into your aliases file for you. Alternatively, you can edit the ``NODEDIR/private/aliases`` file directly, by adding a line like this:: fun: URI:DIR2:ovjy4yhylqlfoqg2vcze36dhde:4d4f47qko2xm5g7osgo2yyidi5m4muyo2vjjy53q4vjju2u55mfa By entering the dircap through the editor, the command-line arguments are bypassed, and other users will not be able to see them. Once you've added the alias, no other secrets are passed through the command line, so this vulnerability becomes less significant: they can still see your filenames and other arguments you type there, but not the caps that Tahoe-LAFS uses to permit access to your files and directories. Command Syntax Summary ---------------------- ``tahoe add-alias ALIAS[:] DIRCAP`` ``tahoe create-alias ALIAS[:]`` ``tahoe list-aliases`` ``tahoe mkdir`` ``tahoe mkdir PATH`` ``tahoe ls [PATH]`` ``tahoe webopen [PATH]`` ``tahoe put [--mutable] [FROMLOCAL|-]`` ``tahoe put [--mutable] FROMLOCAL|- TOPATH`` ``tahoe put [FROMLOCAL|-] mutable-file-writecap`` ``tahoe get FROMPATH [TOLOCAL|-]`` ``tahoe cp [-r] FROMPATH TOPATH`` ``tahoe rm PATH`` ``tahoe mv FROMPATH TOPATH`` ``tahoe ln FROMPATH TOPATH`` ``tahoe backup FROMLOCAL TOPATH`` In these summaries, ``PATH``, ``TOPATH`` or ``FROMPATH`` can be one of: * ``[SUBDIRS/]FILENAME`` for a path relative to the default ``tahoe:`` alias; * ``ALIAS:[SUBDIRS/]FILENAME`` for a path relative to another alias; * ``DIRCAP/[SUBDIRS/]FILENAME`` or ``DIRCAP:./[SUBDIRS/]FILENAME`` for a path relative to a directory cap. See `CLI Command Overview`_ above for information on using wildcards with local paths, and different treatment of colons between Unix and Windows. ``FROMLOCAL`` or ``TOLOCAL`` is a path in the local filesystem. Command Examples ---------------- ``tahoe add-alias ALIAS[:] DIRCAP`` An example would be:: tahoe add-alias fun URI:DIR2:ovjy4yhylqlfoqg2vcze36dhde:4d4f47qko2xm5g7osgo2yyidi5m4muyo2vjjy53q4vjju2u55mfa This creates an alias ``fun:`` and configures it to use the given directory cap. Once this is done, "``tahoe ls fun:``" will list the contents of this directory. Use "``tahoe add-alias tahoe DIRCAP``" to set the contents of the default ``tahoe:`` alias. Since Tahoe-LAFS v1.8.2, the alias name can be given with or without the trailing colon. On Windows, the alias should not be a single character, because it would be confused with the drive letter of a local path. ``tahoe create-alias fun`` This combines "``tahoe mkdir``" and "``tahoe add-alias``" into a single step. ``tahoe list-aliases`` This displays a table of all configured aliases. ``tahoe mkdir`` This creates a new empty unlinked directory, and prints its write-cap to stdout. The new directory is not attached to anything else. ``tahoe mkdir subdir`` ``tahoe mkdir /subdir`` This creates a new empty directory and attaches it below the root directory of the default ``tahoe:`` alias with the name "``subdir``". ``tahoe ls`` ``tahoe ls /`` ``tahoe ls tahoe:`` ``tahoe ls tahoe:/`` All four list the root directory of the default ``tahoe:`` alias. ``tahoe ls subdir`` This lists a subdirectory of your file store. ``tahoe webopen`` ``tahoe webopen tahoe:`` ``tahoe webopen tahoe:subdir/`` ``tahoe webopen subdir/`` This uses the python 'webbrowser' module to cause a local web browser to open to the web page for the given directory. This page offers interfaces to add, download, rename, and unlink files and subdirectories in that directory. If no alias or path is given, this command opens the root directory of the default ``tahoe:`` alias. ``tahoe put file.txt`` ``tahoe put ./file.txt`` ``tahoe put /tmp/file.txt`` ``tahoe put ~/file.txt`` These upload the local file into the grid, and prints the new read-cap to stdout. The uploaded file is not attached to any directory. All one-argument forms of "``tahoe put``" perform an unlinked upload. ``tahoe put -`` ``tahoe put`` These also perform an unlinked upload, but the data to be uploaded is taken from stdin. ``tahoe put file.txt uploaded.txt`` ``tahoe put file.txt tahoe:uploaded.txt`` These upload the local file and add it to your ``tahoe:`` root with the name "``uploaded.txt``". ``tahoe put file.txt subdir/foo.txt`` ``tahoe put - subdir/foo.txt`` ``tahoe put file.txt tahoe:subdir/foo.txt`` ``tahoe put file.txt DIRCAP/foo.txt`` ``tahoe put file.txt DIRCAP/subdir/foo.txt`` These upload the named file and attach them to a subdirectory of the given root directory, under the name "``foo.txt``". When a directory write-cap is given, you can use either ``/`` (as shown above) or ``:./`` to separate it from the following path. When the source file is named "``-``", the contents are taken from stdin. ``tahoe put file.txt --mutable`` Create a new (SDMF) mutable file, fill it with the contents of ``file.txt``, and print the new write-cap to stdout. ``tahoe put file.txt MUTABLE-FILE-WRITECAP`` Replace the contents of the given mutable file with the contents of ``file.txt`` and print the same write-cap to stdout. ``tahoe cp file.txt tahoe:uploaded.txt`` ``tahoe cp file.txt tahoe:`` ``tahoe cp file.txt tahoe:/`` ``tahoe cp ./file.txt tahoe:`` These upload the local file and add it to your ``tahoe:`` root with the name "``uploaded.txt``". ``tahoe cp tahoe:uploaded.txt downloaded.txt`` ``tahoe cp tahoe:uploaded.txt ./downloaded.txt`` ``tahoe cp tahoe:uploaded.txt /tmp/downloaded.txt`` ``tahoe cp tahoe:uploaded.txt ~/downloaded.txt`` This downloads the named file from your ``tahoe:`` root, and puts the result on your local filesystem. ``tahoe cp tahoe:uploaded.txt fun:stuff.txt`` This copies a file from your ``tahoe:`` root to a different directory, set up earlier with "``tahoe add-alias fun DIRCAP``" or "``tahoe create-alias fun``". ``tahoe cp -r ~/my_dir/ tahoe:`` This copies the folder ``~/my_dir/`` and all its children to the grid, creating the new folder ``tahoe:my_dir``. Note that the trailing slash is not required: all source arguments which are directories will be copied into new subdirectories of the target. The behavior of ``tahoe cp``, like the regular UNIX ``/bin/cp``, is subtly different depending upon the exact form of the arguments. In particular: * Trailing slashes indicate directories, but are not required. * If the target object does not already exist: * and if the source is a single file, it will be copied into the target; * otherwise, the target will be created as a directory. * If there are multiple sources, the target must be a directory. * If the target is a pre-existing file, the source must be a single file. * If the target is a directory, each source must be a named file, a named directory, or an unnamed directory. It is not possible to copy an unnamed file (e.g. a raw filecap) into a directory, as there is no way to know what the new file should be named. ``tahoe unlink uploaded.txt`` ``tahoe unlink tahoe:uploaded.txt`` This unlinks a file from your ``tahoe:`` root (that is, causes there to no longer be an entry ``uploaded.txt`` in the root directory that points to it). Note that this does not delete the file from the grid. For backward compatibility, ``tahoe rm`` is accepted as a synonym for ``tahoe unlink``. ``tahoe mv uploaded.txt renamed.txt`` ``tahoe mv tahoe:uploaded.txt tahoe:renamed.txt`` These rename a file within your ``tahoe:`` root directory. ``tahoe mv uploaded.txt fun:`` ``tahoe mv tahoe:uploaded.txt fun:`` ``tahoe mv tahoe:uploaded.txt fun:uploaded.txt`` These move a file from your ``tahoe:`` root directory to the directory set up earlier with "``tahoe add-alias fun DIRCAP``" or "``tahoe create-alias fun``". ``tahoe backup ~ work:backups`` This command performs a versioned backup of every file and directory underneath your "``~``" home directory, placing an immutable timestamped snapshot in e.g. ``work:backups/Archives/2009-02-06_04:00:05Z/`` (note that the timestamp is in UTC, hence the "Z" suffix), and a link to the latest snapshot in work:backups/Latest/ . This command uses a small SQLite database known as the "backupdb", stored in ``~/.tahoe/private/backupdb.sqlite``, to remember which local files have been backed up already, and will avoid uploading files that have already been backed up (except occasionally that will randomly upload them again if it has been awhile since had last been uploaded, just to make sure that the copy of it on the server is still good). It compares timestamps and filesizes when making this comparison. It also re-uses existing directories which have identical contents. This lets it run faster and reduces the number of directories created. If you reconfigure your client node to switch to a different grid, you should delete the stale backupdb.sqlite file, to force "``tahoe backup``" to upload all files to the new grid. The fact that "tahoe backup" checks timestamps on your local files and skips ones that don't appear to have been changed is one of the major differences between "tahoe backup" and "tahoe cp -r". The other major difference is that "tahoe backup" keeps links to all of the versions that have been uploaded to the grid, so you can navigate among old versions stored in the grid. In contrast, "tahoe cp -r" unlinks the previous version from the grid directory and links the new version into place, so unless you have a link to the older version stored somewhere else, you'll never be able to get back to it. ``tahoe backup --exclude=*~ ~ work:backups`` Same as above, but this time the backup process will ignore any filename that will end with '~'. ``--exclude`` will accept any standard Unix shell-style wildcards, as implemented by the `Python fnmatch module `__. You may give multiple ``--exclude`` options. Please pay attention that the pattern will be matched against any level of the directory tree; it's still impossible to specify absolute path exclusions. ``tahoe backup --exclude-from-utf-8=/path/to/filename ~ work:backups`` ``--exclude-from-utf-8`` is similar to ``--exclude``, but reads exclusion patterns from a UTF-8-encoded ``/path/to/filename``, one per line. ``tahoe backup --exclude-vcs ~ work:backups`` This command will ignore any file or directory name known to be used by version control systems to store metadata. The excluded names are: * CVS * RCS * SCCS * .git * .gitignore * .cvsignore * .svn * .arch-ids * {arch} * =RELEASE-ID * =meta-update * =update * .bzr * .bzrignore * .bzrtags * .hg * .hgignore * _darcs Storage Grid Maintenance ======================== ``tahoe manifest tahoe:`` ``tahoe manifest --storage-index tahoe:`` ``tahoe manifest --verify-cap tahoe:`` ``tahoe manifest --repair-cap tahoe:`` ``tahoe manifest --raw tahoe:`` This performs a recursive walk of the given directory, visiting every file and directory that can be reached from that point. It then emits one line to stdout for each object it encounters. The default behavior is to print the access cap string (like ``URI:CHK:..`` or ``URI:DIR2:..``), followed by a space, followed by the full path name. If ``--storage-index`` is added, each line will instead contain the object's storage index. This (string) value is useful to determine which share files (on the server) are associated with this directory tree. The ``--verify-cap`` and ``--repair-cap`` options are similar, but emit a verify-cap and repair-cap, respectively. If ``--raw`` is provided instead, the output will be a JSON-encoded dictionary that includes keys for pathnames, storage index strings, and cap strings. The last line of the ``--raw`` output will be a JSON encoded deep-stats dictionary. ``tahoe stats tahoe:`` This performs a recursive walk of the given directory, visiting every file and directory that can be reached from that point. It gathers statistics on the sizes of the objects it encounters, and prints a summary to stdout. Debugging ========= For a list of all debugging commands, use "``tahoe debug``". For more detailed help on any of these commands, use "``tahoe debug COMMAND --help``". "``tahoe debug find-shares STORAGEINDEX NODEDIRS..``" will look through one or more storage nodes for the share files that are providing storage for the given storage index. "``tahoe debug catalog-shares NODEDIRS..``" will look through one or more storage nodes and locate every single share they contain. It produces a report on stdout with one line per share, describing what kind of share it is, the storage index, the size of the file is used for, etc. It may be useful to concatenate these reports from all storage hosts and use it to look for anomalies. "``tahoe debug dump-share SHAREFILE``" will take the name of a single share file (as found by "``tahoe find-shares``") and print a summary of its contents to stdout. This includes a list of leases, summaries of the hash tree, and information from the UEB (URI Extension Block). For mutable file shares, it will describe which version (seqnum and root-hash) is being stored in this share. "``tahoe debug dump-cap CAP``" will take any Tahoe-LAFS URI and unpack it into separate pieces. The most useful aspect of this command is to reveal the storage index for any given URI. This can be used to locate the share files that are holding the encoded+encrypted data for this file. "``tahoe debug corrupt-share SHAREFILE``" will flip a bit in the given sharefile. This can be used to test the client-side verification/repair code. Obviously, this command should not be used during normal operation. tahoe_lafs-1.20.0/docs/frontends/FTP-and-SFTP.rst0000644000000000000000000001702713615410400016255 0ustar00.. -*- coding: utf-8-with-signature -*- ======================== Tahoe-LAFS SFTP Frontend ======================== 1. `SFTP Background`_ 2. `Tahoe-LAFS Support`_ 3. `Creating an Account File`_ 4. `Configuring SFTP Access`_ 5. `Dependencies`_ 6. `Immutable and Mutable Files`_ 7. `Known Issues`_ SFTP Background =============== FTP is the venerable internet file-transfer protocol, first developed in 1971. The FTP server usually listens on port 21. A separate connection is used for the actual data transfers, either in the same direction as the initial client-to-server connection (for PORT mode), or in the reverse direction (for PASV) mode. Connections are unencrypted, so passwords, file names, and file contents are visible to eavesdroppers. SFTP is the modern replacement, developed as part of the SSH "secure shell" protocol, and runs as a subchannel of the regular SSH connection. The SSH server usually listens on port 22. All connections are encrypted. Both FTP and SFTP were developed assuming a UNIX-like server, with accounts and passwords, octal file modes (user/group/other, read/write/execute), and ctime/mtime timestamps. Previous versions of Tahoe-LAFS supported FTP, but now only the superior SFTP frontend is supported. See `Known Issues`_, below, for details on the limitations of SFTP. Tahoe-LAFS Support ================== All Tahoe-LAFS client nodes can run a frontend SFTP server, allowing regular SFTP clients (like ``/usr/bin/sftp``, the ``sshfs`` FUSE plugin, and many others) to access the file store. Since Tahoe-LAFS does not use user accounts or passwords, the SFTP servers must be configured with a way to first authenticate a user (confirm that a prospective client has a legitimate claim to whatever authorities we might grant a particular user), and second to decide what directory cap should be used as the root directory for a log-in by the authenticated user. As of Tahoe-LAFS v1.17, RSA/DSA public key authentication is the only supported mechanism. Tahoe-LAFS provides two mechanisms to perform this user-to-cap mapping. The first (recommended) is a simple flat file with one account per line. The second is an HTTP-based login mechanism. Creating an Account File ======================== To use the first form, create a file (for example ``BASEDIR/private/accounts``) in which each non-comment/non-blank line is a space-separated line of (USERNAME, KEY-TYPE, PUBLIC-KEY, ROOTCAP), like so:: % cat BASEDIR/private/accounts # This is a public key line: username keytype pubkey cap # (Tahoe-LAFS v1.11 or later) carol ssh-rsa AAAA... URI:DIR2:ovjy4yhylqlfoqg2vcze36dhde:4d4f47qko2xm5g7osgo2yyidi5m4muyo2vjjy53q4vjju2u55mfa The key type may be either "ssh-rsa" or "ssh-dsa". Now add an ``accounts.file`` directive to your ``tahoe.cfg`` file, as described in the next sections. Configuring SFTP Access ======================= The Tahoe-LAFS SFTP server requires a host keypair, just like the regular SSH server. It is important to give each server a distinct keypair, to prevent one server from masquerading as different one. The first time a client program talks to a given server, it will store the host key it receives, and will complain if a subsequent connection uses a different key. This reduces the opportunity for man-in-the-middle attacks to just the first connection. Exercise caution when connecting to the SFTP server remotely. The AES implementation used by the SFTP code does not have defenses against timing attacks. The code for encrypting the SFTP connection was not written by the Tahoe-LAFS team, and we have not reviewed it as carefully as we have reviewed the code for encrypting files and directories in Tahoe-LAFS itself. (See `Twisted ticket #4633`_ for a possible fix to this issue.) .. _Twisted ticket #4633: https://twistedmatrix.com/trac/ticket/4633 If you can connect to the SFTP server (which is provided by the Tahoe-LAFS gateway) only from a client on the same host, then you would be safe from any problem with the SFTP connection security. The examples given below enforce this policy by including ":interface=127.0.0.1" in the "port" option, which causes the server to only accept connections from localhost. You will use directives in the tahoe.cfg file to tell the SFTP code where to find these keys. To create one, use the ``ssh-keygen`` tool (which comes with the standard OpenSSH client distribution):: % cd BASEDIR % ssh-keygen -f private/ssh_host_rsa_key The server private key file must not have a passphrase. Then, to enable the SFTP server with an accounts file, add the following lines to the BASEDIR/tahoe.cfg file:: [sftpd] enabled = true port = tcp:8022:interface=127.0.0.1 host_pubkey_file = private/ssh_host_rsa_key.pub host_privkey_file = private/ssh_host_rsa_key accounts.file = private/accounts The SFTP server will listen on the given port number and on the loopback interface only. The "accounts.file" pathname will be interpreted relative to the node's BASEDIR. Or, to use an account server instead, do this:: [sftpd] enabled = true port = tcp:8022:interface=127.0.0.1 host_pubkey_file = private/ssh_host_rsa_key.pub host_privkey_file = private/ssh_host_rsa_key accounts.url = https://example.com/login You can provide both accounts.file and accounts.url, although it probably isn't very useful except for testing. For further information on SFTP compatibility and known issues with various clients and with the sshfs filesystem, see wiki:SftpFrontend_ .. _wiki:SftpFrontend: https://tahoe-lafs.org/trac/tahoe-lafs/wiki/SftpFrontend Dependencies ============ The Tahoe-LAFS SFTP server requires the Twisted "Conch" component (a "conch" is a twisted shell, get it?). Many Linux distributions package the Conch code separately: debian puts it in the "python-twisted-conch" package. Immutable and Mutable Files =========================== All files created via SFTP are immutable files. However, files can only be created in writeable directories, which allows the directory entry to be relinked to a different file. Normally, when the path of an immutable file is opened for writing by SFTP, the directory entry is relinked to another file with the newly written contents when the file handle is closed. The old file is still present on the grid, and any other caps to it will remain valid. (See :doc:`../garbage-collection` for how to reclaim the space used by files that are no longer needed.) The 'no-write' metadata field of a directory entry can override this behaviour. If the 'no-write' field holds a true value, then a permission error will occur when trying to write to the file, even if it is in a writeable directory. This does not prevent the directory entry from being unlinked or replaced. When using sshfs, the 'no-write' field can be set by clearing the 'w' bits in the Unix permissions, for example using the command ``chmod 444 path/to/file``. Note that this does not mean that arbitrary combinations of Unix permissions are supported. If the 'w' bits are cleared on a link to a mutable file or directory, that link will become read-only. If SFTP is used to write to an existing mutable file, it will publish a new version when the file handle is closed. Known Issues ============ Known Issues in the SFTP Frontend --------------------------------- Upload errors may not be reported when writing files using SFTP via sshfs (`ticket #1059`_). Non-ASCII filenames are supported with SFTP only if the client encodes filenames as UTF-8 (`ticket #1089`_). See also wiki:SftpFrontend_. .. _ticket #1059: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1059 .. _ticket #1089: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1089 tahoe_lafs-1.20.0/docs/frontends/download-status.rst0000644000000000000000000001227413615410400017441 0ustar00.. -*- coding: utf-8-with-signature -*- =============== Download status =============== Introduction ============ The WUI will display the "status" of uploads and downloads. The Welcome Page has a link entitled "Recent Uploads and Downloads" which goes to this URL: http://$GATEWAY/status Each entry in the list of recent operations has a "status" link which will take you to a page describing that operation. For immutable downloads, the page has a lot of information, and this document is to explain what it all means. It was written by Brian Warner, who wrote the v1.8.0 downloader code and the code which generates this status report about the v1.8.0 downloader's behavior. Brian posted it to the trac: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1169#comment:1 Then Zooko lightly edited it while copying it into the docs/ directory. What's involved in a download? ============================== Downloads are triggered by read() calls, each with a starting offset (defaults to 0) and a length (defaults to the whole file). A regular web-API GET request will result in a whole-file read() call. Each read() call turns into an ordered sequence of get_segment() calls. A whole-file read will fetch all segments, in order, but partial reads or multiple simultaneous reads will result in random-access of segments. Segment reads always return ciphertext: the layer above that (in read()) is responsible for decryption. Before we can satisfy any segment reads, we need to find some shares. ("DYHB" is an abbreviation for "Do You Have Block", and is the message we send to storage servers to ask them if they have any shares for us. The name is historical, from Mojo Nation/Mnet/Mountain View, but nicely distinctive. Tahoe-LAFS's actual message name is remote_get_buckets().). Responses come back eventually, or don't. Once we get enough positive DYHB responses, we have enough shares to start downloading. We send "block requests" for various pieces of the share. Responses come back eventually, or don't. When we get enough block-request responses for a given segment, we can decode the data and satisfy the segment read. When the segment read completes, some or all of the segment data is used to satisfy the read() call (if the read call started or ended in the middle of a segment, we'll only use part of the data, otherwise we'll use all of it). Data on the download-status page ================================ DYHB Requests ------------- This shows every Do-You-Have-Block query sent to storage servers and their results. Each line shows the following: * the serverid to which the request was sent * the time at which the request was sent. Note that all timestamps are relative to the start of the first read() call and indicated with a "+" sign * the time at which the response was received (if ever) * the share numbers that the server has, if any * the elapsed time taken by the request Also, each line is colored according to the serverid. This color is also used in the "Requests" section below. Read Events ----------- This shows all the FileNode read() calls and their overall results. Each line shows: * the range of the file that was requested (as [OFFSET:+LENGTH]). A whole-file GET will start at 0 and read the entire file. * the time at which the read() was made * the time at which the request finished, either because the last byte of data was returned to the read() caller, or because they cancelled the read by calling stopProducing (i.e. closing the HTTP connection) * the number of bytes returned to the caller so far * the time spent on the read, so far * the total time spent in AES decryption * total time spend paused by the client (pauseProducing), generally because the HTTP connection filled up, which most streaming media players will do to limit how much data they have to buffer * effective speed of the read(), not including paused time Segment Events -------------- This shows each get_segment() call and its resolution. This table is not well organized, and my post-1.8.0 work will clean it up a lot. In its present form, it records "request" and "delivery" events separately, indicated by the "type" column. Each request shows the segment number being requested and the time at which the get_segment() call was made. Each delivery shows: * segment number * range of file data (as [OFFSET:+SIZE]) delivered * elapsed time spent doing ZFEC decoding * overall elapsed time fetching the segment * effective speed of the segment fetch Requests -------- This shows every block-request sent to the storage servers. Each line shows: * the server to which the request was sent * which share number it is referencing * the portion of the share data being requested (as [OFFSET:+SIZE]) * the time the request was sent * the time the response was received (if ever) * the amount of data that was received (which might be less than SIZE if we tried to read off the end of the share) * the elapsed time for the request (RTT=Round-Trip-Time) Also note that each Request line is colored according to the serverid it was sent to. And all timestamps are shown relative to the start of the first read() call: for example the first DYHB message was sent at +0.001393s about 1.4 milliseconds after the read() call started everything off. tahoe_lafs-1.20.0/docs/frontends/webapi.rst0000644000000000000000000032610613615410400015562 0ustar00.. -*- coding: utf-8-with-signature -*- ========================== The Tahoe REST-ful Web API ========================== 1. `Enabling the web-API port`_ 2. `Basic Concepts: GET, PUT, DELETE, POST`_ 3. `URLs`_ 1. `Child Lookup`_ 4. `Slow Operations, Progress, and Cancelling`_ 5. `Programmatic Operations`_ 1. `Reading a file`_ 2. `Writing/Uploading a File`_ 3. `Creating a New Directory`_ 4. `Getting Information About a File Or Directory (as JSON)`_ 5. `Attaching an Existing File or Directory by its read- or write-cap`_ 6. `Adding Multiple Files or Directories to a Parent Directory at Once`_ 7. `Unlinking a File or Directory`_ 6. `Browser Operations: Human-Oriented Interfaces`_ 1. `Viewing a Directory (as HTML)`_ 2. `Viewing/Downloading a File`_ 3. `Getting Information About a File Or Directory (as HTML)`_ 4. `Creating a Directory`_ 5. `Uploading a File`_ 6. `Attaching an Existing File Or Directory (by URI)`_ 7. `Unlinking a Child`_ 8. `Renaming a Child`_ 9. `Relinking ("Moving") a Child`_ 10. `Other Utilities`_ 11. `Debugging and Testing Features`_ 7. `Other Useful Pages`_ 8. `Static Files in /public_html`_ 9. `Safety and Security Issues -- Names vs. URIs`_ 10. `Concurrency Issues`_ 11. `Access Blacklist`_ Enabling the web-API port ========================= Every Tahoe node is capable of running a built-in HTTP server. To enable this, just write a port number into the "[node]web.port" line of your node's tahoe.cfg file. For example, writing "web.port = 3456" into the "[node]" section of $NODEDIR/tahoe.cfg will cause the node to run a webserver on port 3456. This string is actually a Twisted "strports" specification, meaning you can get more control over the interface to which the server binds by supplying additional arguments. For more details, see the documentation on `twisted.application.strports`_. Writing "tcp:3456:interface=127.0.0.1" into the web.port line does the same but binds to the loopback interface, ensuring that only the programs on the local host can connect. Using "ssl:3456:privateKey=mykey.pem:certKey=cert.pem" runs an SSL server. This webport can be set when the node is created by passing a --webport option to the 'tahoe create-node' command. By default, the node listens on port 3456, on the loopback (127.0.0.1) interface. .. _twisted.application.strports: https://twistedmatrix.com/documents/current/api/twisted.application.strports.html Basic Concepts: GET, PUT, DELETE, POST ====================================== As described in :doc:`../architecture`, each file and directory in a Tahoe-LAFS file store is referenced by an identifier that combines the designation of the object with the authority to do something with it (such as read or modify the contents). This identifier is called a "read-cap" or "write-cap", depending upon whether it enables read-only or read-write access. These "caps" are also referred to as URIs (which may be confusing because they are not currently RFC3986_-compliant URIs). The Tahoe web-based API is "REST-ful", meaning it implements the concepts of "REpresentational State Transfer": the original scheme by which the World Wide Web was intended to work. Each object (file or directory) is referenced by a URL that includes the read- or write- cap. HTTP methods (GET, PUT, and DELETE) are used to manipulate these objects. You can think of the URL as a noun, and the method as a verb. In REST, the GET method is used to retrieve information about an object, or to retrieve some representation of the object itself. When the object is a file, the basic GET method will simply return the contents of that file. Other variations (generally implemented by adding query parameters to the URL) will return information about the object, such as metadata. GET operations are required to have no side-effects. PUT is used to upload new objects into the file store, or to replace an existing link or the contents of a mutable file. DELETE is used to unlink objects from directories. Both PUT and DELETE are required to be idempotent: performing the same operation multiple times must have the same side-effects as only performing it once. POST is used for more complicated actions that cannot be expressed as a GET, PUT, or DELETE. POST operations can be thought of as a method call: sending some message to the object referenced by the URL. In Tahoe, POST is also used for operations that must be triggered by an HTML form (including upload and unlinking), because otherwise a regular web browser has no way to accomplish these tasks. In general, everything that can be done with a PUT or DELETE can also be done with a POST. Tahoe-LAFS' web API is designed for two different kinds of consumer. The first is a program that needs to manipulate the file store. Such programs are expected to use the RESTful interface described above. The second is a human using a standard web browser to work with the file store. This user is presented with a series of HTML pages with links to download files, and forms that use POST actions to upload, rename, and unlink files. When an error occurs, the HTTP response code will be set to an appropriate 400-series code (like 404 Not Found for an unknown childname, or 400 Bad Request when the parameters to a web-API operation are invalid), and the HTTP response body will usually contain a few lines of explanation as to the cause of the error and possible responses. Unusual exceptions may result in a 500 Internal Server Error as a catch-all, with a default response body containing a Nevow-generated HTML-ized representation of the Python exception stack trace that caused the problem. CLI programs which want to copy the response body to stderr should provide an "Accept: text/plain" header to their requests to get a plain text stack trace instead. If the Accept header contains ``*/*``, or ``text/*``, or text/html (or if there is no Accept header), HTML tracebacks will be generated. .. _RFC3986: https://tools.ietf.org/html/rfc3986 URLs ==== Tahoe uses a variety of read- and write- caps to identify files and directories. The most common of these is the "immutable file read-cap", which is used for most uploaded files. These read-caps look like the following:: URI:CHK:ime6pvkaxuetdfah2p2f35pe54:4btz54xk3tew6nd4y2ojpxj4m6wxjqqlwnztgre6gnjgtucd5r4a:3:10:202 The next most common is a "directory write-cap", which provides both read and write access to a directory, and look like this:: URI:DIR2:djrdkfawoqihigoett4g6auz6a:jx5mplfpwexnoqff7y5e4zjus4lidm76dcuarpct7cckorh2dpgq There are also "directory read-caps", which start with "URI:DIR2-RO:", and give read-only access to a directory. Finally there are also mutable file read- and write- caps, which start with "URI:SSK", and give access to mutable files. (Later versions of Tahoe will make these strings shorter, and will remove the unfortunate colons, which must be escaped when these caps are embedded in URLs.) To refer to any Tahoe object through the web API, you simply need to combine a prefix (which indicates the HTTP server to use) with the cap (which indicates which object inside that server to access). Since the default Tahoe webport is 3456, the most common prefix is one that will use a local node listening on this port:: http://127.0.0.1:3456/uri/ + $CAP So, to access the directory named above, the URL would be:: http://127.0.0.1:3456/uri/URI%3ADIR2%3Adjrdkfawoqihigoett4g6auz6a%3Ajx5mplfpwexnoqff7y5e4zjus4lidm76dcuarpct7cckorh2dpgq/ (note that the colons in the directory-cap are url-encoded into "%3A" sequences). Likewise, to access the file named above, use:: http://127.0.0.1:3456/uri/URI%3ACHK%3Aime6pvkaxuetdfah2p2f35pe54%3A4btz54xk3tew6nd4y2ojpxj4m6wxjqqlwnztgre6gnjgtucd5r4a%3A3%3A10%3A202 In the rest of this document, we'll use "$DIRCAP" as shorthand for a read-cap or write-cap that refers to a directory, and "$FILECAP" to abbreviate a cap that refers to a file (whether mutable or immutable). So those URLs above can be abbreviated as:: http://127.0.0.1:3456/uri/$DIRCAP/ http://127.0.0.1:3456/uri/$FILECAP The operation summaries below will abbreviate these further, by eliding the server prefix. They will be displayed like this:: /uri/$DIRCAP/ /uri/$FILECAP /cap can be used as a synonym for /uri. If interoperability with older web-API servers is required, /uri should be used. Child Lookup ------------ Tahoe directories contain named child entries, just like directories in a regular local filesystem. These child entries, called "dirnodes", consist of a name, metadata, a write slot, and a read slot. The write and read slots normally contain a write-cap and read-cap referring to the same object, which can be either a file or a subdirectory. The write slot may be empty (actually, both may be empty, but that is unusual). If you have a Tahoe URL that refers to a directory, and want to reference a named child inside it, just append the child name to the URL. For example, if our sample directory contains a file named "welcome.txt", we can refer to that file with:: http://127.0.0.1:3456/uri/$DIRCAP/welcome.txt (or http://127.0.0.1:3456/uri/URI%3ADIR2%3Adjrdkfawoqihigoett4g6auz6a%3Ajx5mplfpwexnoqff7y5e4zjus4lidm76dcuarpct7cckorh2dpgq/welcome.txt) Multiple levels of subdirectories can be handled this way:: http://127.0.0.1:3456/uri/$DIRCAP/tahoe-source/docs/architecture.rst In this document, when we need to refer to a URL that references a file using this child-of-some-directory format, we'll use the following string:: /uri/$DIRCAP/[SUBDIRS../]FILENAME The "[SUBDIRS../]" part means that there are zero or more (optional) subdirectory names in the middle of the URL. The "FILENAME" at the end means that this whole URL refers to a file of some sort, rather than to a directory. When we need to refer specifically to a directory in this way, we'll write:: /uri/$DIRCAP/[SUBDIRS../]SUBDIR Note that all components of pathnames in URLs are required to be UTF-8 encoded, so "resume.doc" (with an acute accent on both E's) would be accessed with:: http://127.0.0.1:3456/uri/$DIRCAP/r%C3%A9sum%C3%A9.doc Also note that the filenames inside upload POST forms are interpreted using whatever character set was provided in the conventional '_charset' field, and defaults to UTF-8 if not otherwise specified. The JSON representation of each directory contains native Unicode strings. Tahoe directories are specified to contain Unicode filenames, and cannot contain binary strings that are not representable as such. All Tahoe operations that refer to existing files or directories must include a suitable read- or write- cap in the URL: the web-API server won't add one for you. If you don't know the cap, you can't access the file. This allows the security properties of Tahoe caps to be extended across the web-API interface. Slow Operations, Progress, and Cancelling ========================================= Certain operations can be expected to take a long time. The "t=deep-check", described below, will recursively visit every file and directory reachable from a given starting point, which can take minutes or even hours for extremely large directory structures. A single long-running HTTP request is a fragile thing: proxies, NAT boxes, browsers, and users may all grow impatient with waiting and give up on the connection. For this reason, long-running operations have an "operation handle", which can be used to poll for status/progress messages while the operation proceeds. This handle can also be used to cancel the operation. These handles are created by the client, and passed in as a an "ophandle=" query argument to the POST or PUT request which starts the operation. The following operations can then be used to retrieve status: ``GET /operations/$HANDLE?output=HTML (with or without t=status)`` ``GET /operations/$HANDLE?output=JSON (same)`` These two retrieve the current status of the given operation. Each operation presents a different sort of information, but in general the page retrieved will indicate: * whether the operation is complete, or if it is still running * how much of the operation is complete, and how much is left, if possible Note that the final status output can be quite large: a deep-manifest of a directory structure with 300k directories and 200k unique files is about 275MB of JSON, and might take two minutes to generate. For this reason, the full status is not provided until the operation has completed. The HTML form will include a meta-refresh tag, which will cause a regular web browser to reload the status page about 60 seconds later. This tag will be removed once the operation has completed. There may be more status information available under /operations/$HANDLE/$ETC : i.e., the handle forms the root of a URL space. ``POST /operations/$HANDLE?t=cancel`` This terminates the operation, and returns an HTML page explaining what was cancelled. If the operation handle has already expired (see below), this POST will return a 404, which indicates that the operation is no longer running (either it was completed or terminated). The response body will be the same as a GET /operations/$HANDLE on this operation handle, and the handle will be expired immediately afterwards. The operation handle will eventually expire, to avoid consuming an unbounded amount of memory. The handle's time-to-live can be reset at any time, by passing a retain-for= argument (with a count of seconds) to either the initial POST that starts the operation, or the subsequent GET request which asks about the operation. For example, if a 'GET /operations/$HANDLE?output=JSON&retain-for=600' query is performed, the handle will remain active for 600 seconds (10 minutes) after the GET was received. In addition, if the GET includes a release-after-complete=True argument, and the operation has completed, the operation handle will be released immediately. If a retain-for= argument is not used, the default handle lifetimes are: * handles will remain valid at least until their operation finishes * uncollected handles for finished operations (i.e. handles for operations that have finished but for which the GET page has not been accessed since completion) will remain valid for four days, or for the total time consumed by the operation, whichever is greater. * collected handles (i.e. the GET page has been retrieved at least once since the operation completed) will remain valid for one day. Many "slow" operations can begin to use unacceptable amounts of memory when operating on large directory structures. The memory usage increases when the ophandle is polled, as the results must be copied into a JSON string, sent over the wire, then parsed by a client. So, as an alternative, many "slow" operations have streaming equivalents. These equivalents do not use operation handles. Instead, they emit line-oriented status results immediately. Client code can cancel the operation by simply closing the HTTP connection. Programmatic Operations ======================= Now that we know how to build URLs that refer to files and directories in a Tahoe-LAFS file store, what sorts of operations can we do with those URLs? This section contains a catalog of GET, PUT, DELETE, and POST operations that can be performed on these URLs. This set of operations are aimed at programs that use HTTP to communicate with a Tahoe node. A later section describes operations that are intended for web browsers. Reading a File -------------- ``GET /uri/$FILECAP`` ``GET /uri/$DIRCAP/[SUBDIRS../]FILENAME`` This will retrieve the contents of the given file. The HTTP response body will contain the sequence of bytes that make up the file. The "Range:" header can be used to restrict which portions of the file are returned (see RFC 2616 section 14.35.1 "Byte Ranges"), however Tahoe only supports a single "bytes" range and never provides a ``multipart/byteranges`` response. An attempt to begin a read past the end of the file will provoke a 416 Requested Range Not Satisfiable error, but normal overruns (reads which start at the beginning or middle and go beyond the end) are simply truncated. To view files in a web browser, you may want more control over the Content-Type and Content-Disposition headers. Please see the next section "Browser Operations", for details on how to modify these URLs for that purpose. Writing/Uploading a File ------------------------ ``PUT /uri/$FILECAP`` ``PUT /uri/$DIRCAP/[SUBDIRS../]FILENAME`` Upload a file, using the data from the HTTP request body, and add whatever child links and subdirectories are necessary to make the file available at the given location. Once this operation succeeds, a GET on the same URL will retrieve the same contents that were just uploaded. This will create any necessary intermediate subdirectories. To use the /uri/$FILECAP form, $FILECAP must be a write-cap for a mutable file. In the /uri/$DIRCAP/[SUBDIRS../]FILENAME form, if the target file is a writeable mutable file, that file's contents will be overwritten in-place. If it is a read-cap for a mutable file, an error will occur. If it is an immutable file, the old file will be discarded, and a new one will be put in its place. If the target file is a writable mutable file, you may also specify an "offset" parameter -- a byte offset that determines where in the mutable file the data from the HTTP request body is placed. This operation is relatively efficient for MDMF mutable files, and is relatively inefficient (but still supported) for SDMF mutable files. If no offset parameter is specified, then the entire file is replaced with the data from the HTTP request body. For an immutable file, the "offset" parameter is not valid. When creating a new file, you can control the type of file created by specifying a format= argument in the query string. format=MDMF creates an MDMF mutable file. format=SDMF creates an SDMF mutable file. format=CHK creates an immutable file. The value of the format argument is case-insensitive. If no format is specified, the newly-created file will be immutable (but see below). For compatibility with previous versions of Tahoe-LAFS, the web-API will also accept a mutable=true argument in the query string. If mutable=true is given, then the new file will be mutable, and its format will be the default mutable file format, as configured by the [client]mutable.format option of tahoe.cfg on the Tahoe-LAFS node hosting the webapi server. Use of mutable=true is discouraged; new code should use format= instead of mutable=true (unless it needs to be compatible with web-API servers older than v1.9.0). If neither format= nor mutable=true are given, the newly-created file will be immutable. This returns the file-cap of the resulting file. If a new file was created by this method, the HTTP response code (as dictated by rfc2616) will be set to 201 CREATED. If an existing file was replaced or modified, the response code will be 200 OK. Note that the 'curl -T localfile http://127.0.0.1:3456/uri/$DIRCAP/foo.txt' command can be used to invoke this operation. ``PUT /uri`` This uploads a file, and produces a file-cap for the contents, but does not attach the file into the file store. No directories will be modified by this operation. The file-cap is returned as the body of the HTTP response. This method accepts format= and mutable=true as query string arguments, and interprets those arguments in the same way as the linked forms of PUT described immediately above. Creating a New Directory ------------------------ ``POST /uri?t=mkdir`` ``PUT /uri?t=mkdir`` Create a new empty directory and return its write-cap as the HTTP response body. This does not make the newly created directory visible from the file store. The "PUT" operation is provided for backwards compatibility: new code should use POST. This supports a format= argument in the query string. The format= argument, if specified, controls the format of the directory. format=MDMF indicates that the directory should be stored as an MDMF file; format=SDMF indicates that the directory should be stored as an SDMF file. The value of the format= argument is case-insensitive. If no format= argument is given, the directory's format is determined by the default mutable file format, as configured on the Tahoe-LAFS node responding to the request. In addition, an optional "private-key=" argument is supported which, if given, specifies the underlying signing key to be used when creating the directory. This value must be a DER-encoded 2048-bit RSA private key in urlsafe base64 encoding. (To convert an existing PEM-encoded RSA key file into the format required, the following commands may be used -- assuming a modern UNIX-like environment with common tools already installed: ``openssl rsa -in key.pem -outform der | base64 -w 0 -i - | tr '+/' '-_'``) Because this key can be used to derive the write capability for the associated directory, additional care should be taken to ensure that the key is unique, that it is kept confidential, and that it was derived from an appropriate (high-entropy) source of randomness. If this argument is omitted (the default behavior), Tahoe-LAFS will generate an appropriate signing key using the underlying operating system's source of entropy. ``POST /uri?t=mkdir-with-children`` Create a new directory, populated with a set of child nodes, and return its write-cap as the HTTP response body. The new directory is not attached to any other directory: the returned write-cap is the only reference to it. The format of the directory can be controlled with the format= argument in the query string and a signing key can be specified with the private-key= argument, as described above. Initial children are provided as the body of the POST form (this is more efficient than doing separate mkdir and set_children operations). If the body is empty, the new directory will be empty. If not empty, the body will be interpreted as a UTF-8 JSON-encoded dictionary of children with which the new directory should be populated, using the same format as would be returned in the 'children' value of the t=json GET request, described below. Each dictionary key should be a child name, and each value should be a list of [TYPE, PROPDICT], where PROPDICT contains "rw_uri", "ro_uri", and "metadata" keys (all others are ignored). For example, the PUT request body could be:: { "Fran\u00e7ais": [ "filenode", { "ro_uri": "URI:CHK:...", "metadata": { "ctime": 1202777696.7564139, "mtime": 1202777696.7564139, "tahoe": { "linkcrtime": 1202777696.7564139, "linkmotime": 1202777696.7564139 } } } ], "subdir": [ "dirnode", { "rw_uri": "URI:DIR2:...", "ro_uri": "URI:DIR2-RO:...", "metadata": { "ctime": 1202778102.7589991, "mtime": 1202778111.2160511, "tahoe": { "linkcrtime": 1202777696.7564139, "linkmotime": 1202777696.7564139 } } } ] } For forward-compatibility, a mutable directory can also contain caps in a format that is unknown to the web-API server. When such caps are retrieved from a mutable directory in a "ro_uri" field, they will be prefixed with the string "ro.", indicating that they must not be decoded without checking that they are read-only. The "ro." prefix must not be stripped off without performing this check. (Future versions of the web-API server will perform it where necessary.) If both the "rw_uri" and "ro_uri" fields are present in a given PROPDICT, and the web-API server recognizes the rw_uri as a write cap, then it will reset the ro_uri to the corresponding read cap and discard the original contents of ro_uri (in order to ensure that the two caps correspond to the same object and that the ro_uri is in fact read-only). However this may not happen for caps in a format unknown to the web-API server. Therefore, when writing a directory the web-API client should ensure that the contents of "rw_uri" and "ro_uri" for a given PROPDICT are a consistent (write cap, read cap) pair if possible. If the web-API client only has one cap and does not know whether it is a write cap or read cap, then it is acceptable to set "rw_uri" to that cap and omit "ro_uri". The client must not put a write cap into a "ro_uri" field. The metadata may have a "no-write" field. If this is set to true in the metadata of a link, it will not be possible to open that link for writing via the SFTP frontend; see :doc:`FTP-and-SFTP` for details. Also, if the "no-write" field is set to true in the metadata of a link to a mutable child, it will cause the link to be diminished to read-only. Note that the web-API-using client application must not provide the "Content-Type: multipart/form-data" header that usually accompanies HTML form submissions, since the body is not formatted this way. Doing so will cause a server error as the lower-level code misparses the request body. Child file names should each be expressed as a Unicode string, then used as keys of the dictionary. The dictionary should then be converted into JSON, and the resulting string encoded into UTF-8. This UTF-8 bytestring should then be used as the POST body. ``POST /uri?t=mkdir-immutable`` Like t=mkdir-with-children above, but the new directory will be deep-immutable. This means that the directory itself is immutable, and that it can only contain objects that are treated as being deep-immutable, like immutable files, literal files, and deep-immutable directories. For forward-compatibility, a deep-immutable directory can also contain caps in a format that is unknown to the web-API server. When such caps are retrieved from a deep-immutable directory in a "ro_uri" field, they will be prefixed with the string "imm.", indicating that they must not be decoded without checking that they are immutable. The "imm." prefix must not be stripped off without performing this check. (Future versions of the web-API server will perform it where necessary.) The cap for each child may be given either in the "rw_uri" or "ro_uri" field of the PROPDICT (not both). If a cap is given in the "rw_uri" field, then the web-API server will check that it is an immutable read-cap of a *known* format, and give an error if it is not. If a cap is given in the "ro_uri" field, then the web-API server will still check whether known caps are immutable, but for unknown caps it will simply assume that the cap can be stored, as described above. Note that an attacker would be able to store any cap in an immutable directory, so this check when creating the directory is only to help non-malicious clients to avoid accidentally giving away more authority than intended. A non-empty request body is mandatory, since after the directory is created, it will not be possible to add more children to it. ``POST /uri/$DIRCAP/[SUBDIRS../]SUBDIR?t=mkdir`` ``PUT /uri/$DIRCAP/[SUBDIRS../]SUBDIR?t=mkdir`` Create new directories as necessary to make sure that the named target ($DIRCAP/SUBDIRS../SUBDIR) is a directory. This will create additional intermediate mutable directories as necessary. If the named target directory already exists, this will make no changes to it. If the final directory is created, it will be empty. This accepts a format= argument in the query string, which controls the format of the named target directory, if it does not already exist. format= is interpreted in the same way as in the POST /uri?t=mkdir form. Note that format= only controls the format of the named target directory; intermediate directories, if created, are created based on the default mutable type, as configured on the Tahoe-LAFS server responding to the request. This operation will return an error if a blocking file is present at any of the parent names, preventing the server from creating the necessary parent directory; or if it would require changing an immutable directory. The write-cap of the new directory will be returned as the HTTP response body. ``POST /uri/$DIRCAP/[SUBDIRS../]SUBDIR?t=mkdir-with-children`` Like /uri?t=mkdir-with-children, but the final directory is created as a child of an existing mutable directory. This will create additional intermediate mutable directories as necessary. If the final directory is created, it will be populated with initial children from the POST request body, as described above. This accepts a format= argument in the query string, which controls the format of the target directory, if the target directory is created as part of the operation. format= is interpreted in the same way as in the POST/ uri?t=mkdir-with-children operation. Note that format= only controls the format of the named target directory; intermediate directories, if created, are created using the default mutable type setting, as configured on the Tahoe-LAFS server responding to the request. This operation will return an error if a blocking file is present at any of the parent names, preventing the server from creating the necessary parent directory; or if it would require changing an immutable directory; or if the immediate parent directory already has a a child named SUBDIR. ``POST /uri/$DIRCAP/[SUBDIRS../]SUBDIR?t=mkdir-immutable`` Like /uri?t=mkdir-immutable, but the final directory is created as a child of an existing mutable directory. The final directory will be deep-immutable, and will be populated with the children specified as a JSON dictionary in the POST request body. In Tahoe 1.6 this operation creates intermediate mutable directories if necessary, but that behaviour should not be relied on; see ticket #920. This operation will return an error if the parent directory is immutable, or already has a child named SUBDIR. ``POST /uri/$DIRCAP/[SUBDIRS../]?t=mkdir&name=NAME`` Create a new empty mutable directory and attach it to the given existing directory. This will create additional intermediate directories as necessary. This accepts a format= argument in the query string, which controls the format of the named target directory, if it does not already exist. format= is interpreted in the same way as in the POST /uri?t=mkdir form. Note that format= only controls the format of the named target directory; intermediate directories, if created, are created based on the default mutable type, as configured on the Tahoe-LAFS server responding to the request. This operation will return an error if a blocking file is present at any of the parent names, preventing the server from creating the necessary parent directory, or if it would require changing any immutable directory. The URL of this operation points to the parent of the bottommost new directory, whereas the /uri/$DIRCAP/[SUBDIRS../]SUBDIR?t=mkdir operation above has a URL that points directly to the bottommost new directory. ``POST /uri/$DIRCAP/[SUBDIRS../]?t=mkdir-with-children&name=NAME`` Like /uri/$DIRCAP/[SUBDIRS../]?t=mkdir&name=NAME, but the new directory will be populated with initial children via the POST request body. This command will create additional intermediate mutable directories as necessary. This accepts a format= argument in the query string, which controls the format of the target directory, if the target directory is created as part of the operation. format= is interpreted in the same way as in the POST/ uri?t=mkdir-with-children operation. Note that format= only controls the format of the named target directory; intermediate directories, if created, are created using the default mutable type setting, as configured on the Tahoe-LAFS server responding to the request. This operation will return an error if a blocking file is present at any of the parent names, preventing the server from creating the necessary parent directory; or if it would require changing an immutable directory; or if the immediate parent directory already has a a child named NAME. Note that the name= argument must be passed as a queryarg, because the POST request body is used for the initial children JSON. ``POST /uri/$DIRCAP/[SUBDIRS../]?t=mkdir-immutable&name=NAME`` Like /uri/$DIRCAP/[SUBDIRS../]?t=mkdir-with-children&name=NAME, but the final directory will be deep-immutable. The children are specified as a JSON dictionary in the POST request body. Again, the name= argument must be passed as a queryarg. In Tahoe 1.6 this operation creates intermediate mutable directories if necessary, but that behaviour should not be relied on; see ticket #920. This operation will return an error if the parent directory is immutable, or already has a child named NAME. Getting Information About a File Or Directory (as JSON) ------------------------------------------------------- ``GET /uri/$FILECAP?t=json`` ``GET /uri/$DIRCAP?t=json`` ``GET /uri/$DIRCAP/[SUBDIRS../]SUBDIR?t=json`` ``GET /uri/$DIRCAP/[SUBDIRS../]FILENAME?t=json`` This returns a machine-parseable JSON-encoded description of the given object. The JSON always contains a list, and the first element of the list is always a flag that indicates whether the referenced object is a file or a directory. If it is a capability to a file, then the information includes file size and URI, like this:: GET /uri/$FILECAP?t=json : [ "filenode", { "ro_uri": file_uri, "verify_uri": verify_uri, "size": bytes, "mutable": false, "format": "CHK" } ] If it is a capability to a directory followed by a path from that directory to a file, then the information also includes metadata from the link to the file in the parent directory, like this:: GET /uri/$DIRCAP/[SUBDIRS../]FILENAME?t=json [ "filenode", { "ro_uri": file_uri, "verify_uri": verify_uri, "size": bytes, "mutable": false, "format": "CHK", "metadata": { "ctime": 1202777696.7564139, "mtime": 1202777696.7564139, "tahoe": { "linkcrtime": 1202777696.7564139, "linkmotime": 1202777696.7564139 } } } ] If it is a directory, then it includes information about the children of this directory, as a mapping from child name to a set of data about the child (the same data that would appear in a corresponding GET?t=json of the child itself). The child entries also include metadata about each child, including link-creation- and link-change- timestamps. The output looks like this:: GET /uri/$DIRCAP?t=json : GET /uri/$DIRCAP/[SUBDIRS../]SUBDIR?t=json : [ "dirnode", { "rw_uri": read_write_uri, "ro_uri": read_only_uri, "verify_uri": verify_uri, "mutable": true, "format": "SDMF", "children": { "foo.txt": [ "filenode", { "ro_uri": uri, "size": bytes, "metadata": { "ctime": 1202777696.7564139, "mtime": 1202777696.7564139, "tahoe": { "linkcrtime": 1202777696.7564139, "linkmotime": 1202777696.7564139 } } } ], "subdir": [ "dirnode", { "rw_uri": rwuri, "ro_uri": rouri, "metadata": { "ctime": 1202778102.7589991, "mtime": 1202778111.2160511, "tahoe": { "linkcrtime": 1202777696.7564139, "linkmotime": 1202777696.7564139 } } } ] } } ] In the above example, note how 'children' is a dictionary in which the keys are child names and the values depend upon whether the child is a file or a directory. The value is mostly the same as the JSON representation of the child object (except that directories do not recurse -- the "children" entry of the child is omitted, and the directory view includes the metadata that is stored on the directory edge). The rw_uri field will be present in the information about a directory if and only if you have read-write access to that directory. The verify_uri field will be present if and only if the object has a verify-cap (non-distributed LIT files do not have verify-caps). If the cap is of an unknown format, then the file size and verify_uri will not be available:: GET /uri/$UNKNOWNCAP?t=json : [ "unknown", { "ro_uri": unknown_read_uri } ] GET /uri/$DIRCAP/[SUBDIRS../]UNKNOWNCHILDNAME?t=json : [ "unknown", { "rw_uri": unknown_write_uri, "ro_uri": unknown_read_uri, "mutable": true, "metadata": { "ctime": 1202777696.7564139, "mtime": 1202777696.7564139, "tahoe": { "linkcrtime": 1202777696.7564139, "linkmotime": 1202777696.7564139 } } } ] As in the case of file nodes, the metadata will only be present when the capability is to a directory followed by a path. The "mutable" field is also not always present; when it is absent, the mutability of the object is not known. About the metadata `````````````````` The value of the 'tahoe':'linkmotime' key is updated whenever a link to a child is set. The value of the 'tahoe':'linkcrtime' key is updated whenever a link to a child is created -- i.e. when there was not previously a link under that name. Note however, that if the edge in the Tahoe-LAFS file store points to a mutable file and the contents of that mutable file is changed, then the 'tahoe':'linkmotime' value on that edge will *not* be updated, since the edge itself wasn't updated -- only the mutable file was. The timestamps are represented as a number of seconds since the UNIX epoch (1970-01-01 00:00:00 UTC), with leap seconds not being counted in the long term. In Tahoe earlier than v1.4.0, 'mtime' and 'ctime' keys were populated instead of the 'tahoe':'linkmotime' and 'tahoe':'linkcrtime' keys. Starting in Tahoe v1.4.0, the 'linkmotime'/'linkcrtime' keys in the 'tahoe' sub-dict are populated. However, prior to Tahoe v1.7beta, a bug caused the 'tahoe' sub-dict to be deleted by web-API requests in which new metadata is specified, and not to be added to existing child links that lack it. From Tahoe v1.7.0 onward, the 'mtime' and 'ctime' fields are no longer populated or updated (see ticket #924), except by "tahoe backup" as explained below. For backward compatibility, when an existing link is updated and 'tahoe':'linkcrtime' is not present in the previous metadata but 'ctime' is, the old value of 'ctime' is used as the new value of 'tahoe':'linkcrtime'. The reason we added the new fields in Tahoe v1.4.0 is that there is a "set_children" API (described below) which you can use to overwrite the values of the 'mtime'/'ctime' pair, and this API is used by the "tahoe backup" command (in Tahoe v1.3.0 and later) to set the 'mtime' and 'ctime' values when backing up files from a local filesystem into the Tahoe-LAFS file store. As of Tahoe v1.4.0, the set_children API cannot be used to set anything under the 'tahoe' key of the metadata dict -- if you include 'tahoe' keys in your 'metadata' arguments then it will silently ignore those keys. Therefore, if the 'tahoe' sub-dict is present, you can rely on the 'linkcrtime' and 'linkmotime' values therein to have the semantics described above. (This is assuming that only official Tahoe clients have been used to write those links, and that their system clocks were set to what you expected -- there is nothing preventing someone from editing their Tahoe client or writing their own Tahoe client which would overwrite those values however they like, and there is nothing to constrain their system clock from taking any value.) When an edge is created or updated by "tahoe backup", the 'mtime' and 'ctime' keys on that edge are set as follows: * 'mtime' is set to the timestamp read from the local filesystem for the "mtime" of the local file in question, which means the last time the contents of that file were changed. * On Windows, 'ctime' is set to the creation timestamp for the file read from the local filesystem. On other platforms, 'ctime' is set to the UNIX "ctime" of the local file, which means the last time that either the contents or the metadata of the local file was changed. There are several ways that the 'ctime' field could be confusing: 1. You might be confused about whether it reflects the time of the creation of a link in the Tahoe-LAFS file store (by a version of Tahoe < v1.7.0) or a timestamp copied in by "tahoe backup" from a local filesystem. 2. You might be confused about whether it is a copy of the file creation time (if "tahoe backup" was run on a Windows system) or of the last contents-or-metadata change (if "tahoe backup" was run on a different operating system). 3. You might be confused by the fact that changing the contents of a mutable file in Tahoe doesn't have any effect on any links pointing at that file in any directories, although "tahoe backup" sets the link 'ctime'/'mtime' to reflect timestamps about the local file corresponding to the Tahoe file to which the link points. 4. Also, quite apart from Tahoe, you might be confused about the meaning of the "ctime" in UNIX local filesystems, which people sometimes think means file creation time, but which actually means, in UNIX local filesystems, the most recent time that the file contents or the file metadata (such as owner, permission bits, extended attributes, etc.) has changed. Note that although "ctime" does not mean file creation time in UNIX, links created by a version of Tahoe prior to v1.7.0, and never written by "tahoe backup", will have 'ctime' set to the link creation time. Attaching an Existing File or Directory by its read- or write-cap ----------------------------------------------------------------- ``PUT /uri/$DIRCAP/[SUBDIRS../]CHILDNAME?t=uri`` This attaches a child object (either a file or directory) to a specified location in the Tahoe-LAFS file store. The child object is referenced by its read- or write- cap, as provided in the HTTP request body. This will create intermediate directories as necessary. This is similar to a UNIX hardlink: by referencing a previously-uploaded file (or previously-created directory) instead of uploading/creating a new one, you can create two references to the same object. The read- or write- cap of the child is provided in the body of the HTTP request, and this same cap is returned in the response body. The default behavior is to overwrite any existing object at the same location. To prevent this (and make the operation return an error instead of overwriting), add a "replace=false" argument, as "?t=uri&replace=false". With replace=false, this operation will return an HTTP 409 "Conflict" error if there is already an object at the given location, rather than overwriting the existing object. To allow the operation to overwrite a file, but return an error when trying to overwrite a directory, use "replace=only-files" (this behavior is closer to the traditional UNIX "mv" command). Note that "true", "t", and "1" are all synonyms for "True", and "false", "f", and "0" are synonyms for "False", and the parameter is case-insensitive. Note that this operation does not take its child cap in the form of separate "rw_uri" and "ro_uri" fields. Therefore, it cannot accept a child cap in a format unknown to the web-API server, unless its URI starts with "ro." or "imm.". This restriction is necessary because the server is not able to attenuate an unknown write cap to a read cap. Unknown URIs starting with "ro." or "imm.", on the other hand, are assumed to represent read caps. The client should not prefix a write cap with "ro." or "imm." and pass it to this operation, since that would result in granting the cap's write authority to holders of the directory read cap. Adding Multiple Files or Directories to a Parent Directory at Once ------------------------------------------------------------------ ``POST /uri/$DIRCAP/[SUBDIRS..]?t=set_children`` ``POST /uri/$DIRCAP/[SUBDIRS..]?t=set-children`` (Tahoe >= v1.6) This command adds multiple children to a directory in a single operation. It reads the request body and interprets it as a JSON-encoded description of the child names and read/write-caps that should be added. The body should be a JSON-encoded dictionary, in the same format as the "children" value returned by the "GET /uri/$DIRCAP?t=json" operation described above. In this format, each key is a child names, and the corresponding value is a tuple of (type, childinfo). "type" is ignored, and "childinfo" is a dictionary that contains "rw_uri", "ro_uri", and "metadata" keys. You can take the output of "GET /uri/$DIRCAP1?t=json" and use it as the input to "POST /uri/$DIRCAP2?t=set_children" to make DIR2 look very much like DIR1 (except for any existing children of DIR2 that were not overwritten, and any existing "tahoe" metadata keys as described below). When the set_children request contains a child name that already exists in the target directory, this command defaults to overwriting that child with the new value (both child cap and metadata, but if the JSON data does not contain a "metadata" key, the old child's metadata is preserved). The command takes a boolean "overwrite=" query argument to control this behavior. If you use "?t=set_children&overwrite=false", then an attempt to replace an existing child will instead cause an error. Any "tahoe" key in the new child's "metadata" value is ignored. Any existing "tahoe" metadata is preserved. The metadata["tahoe"] value is reserved for metadata generated by the tahoe node itself. The only two keys currently placed here are "linkcrtime" and "linkmotime". For details, see the section above entitled "Getting Information About a File Or Directory (as JSON)", in the "About the metadata" subsection. Note that this command was introduced with the name "set_children", which uses an underscore rather than a hyphen as other multi-word command names do. The variant with a hyphen is now accepted, but clients that desire backward compatibility should continue to use "set_children". Unlinking a File or Directory ----------------------------- ``DELETE /uri/$DIRCAP/[SUBDIRS../]CHILDNAME`` This removes the given name from its parent directory. CHILDNAME is the name to be removed, and $DIRCAP/SUBDIRS.. indicates the directory that will be modified. Note that this does not actually delete the file or directory that the name points to from the tahoe grid -- it only unlinks the named reference from this directory. If there are other names in this directory or in other directories that point to the resource, then it will remain accessible through those paths. Even if all names pointing to this object are removed from their parent directories, then someone with possession of its read-cap can continue to access the object through that cap. The object will only become completely unreachable once 1: there are no reachable directories that reference it, and 2: nobody is holding a read- or write- cap to the object. (This behavior is very similar to the way hardlinks and anonymous files work in traditional UNIX filesystems). This operation will not modify more than a single directory. Intermediate directories which were implicitly created by PUT or POST methods will *not* be automatically removed by DELETE. This method returns the file- or directory- cap of the object that was just removed. Browser Operations: Human-oriented interfaces ============================================= This section describes the HTTP operations that provide support for humans running a web browser. Most of these operations use HTML forms that use POST to drive the Tahoe-LAFS node. This section is intended for HTML authors who want to write web pages containing user interfaces for manipulating the Tahoe-LAFS file store. Note that for all POST operations, the arguments listed can be provided either as URL query arguments or as form body fields. URL query arguments are separated from the main URL by "?", and from each other by "&". For example, "POST /uri/$DIRCAP?t=upload&mutable=true". Form body fields are usually specified by using elements. For clarity, the descriptions below display the most significant arguments as URL query args. Viewing a Directory (as HTML) ----------------------------- ``GET /uri/$DIRCAP/[SUBDIRS../]`` This returns an HTML page, intended to be displayed to a human by a web browser, which contains HREF links to all files and directories reachable from this directory. These HREF links do not have a t= argument, meaning that a human who follows them will get pages also meant for a human. It also contains forms to upload new files, and to unlink files and directories from their parent directory. Those forms use POST methods to do their job. Viewing/Downloading a File -------------------------- ``GET /uri/$FILECAP`` ``GET /uri/$DIRCAP/[SUBDIRS../]FILENAME`` ``GET /named/$FILECAP/FILENAME`` These will retrieve the contents of the given file. The HTTP response body will contain the sequence of bytes that make up the file. The ``/named/`` form is an alternative to ``/uri/$FILECAP`` which makes it easier to get the correct filename. The Tahoe server will provide the contents of the given file, with a Content-Type header derived from the given filename. This form is used to get browsers to use the "Save Link As" feature correctly, and also helps command-line tools like "wget" and "curl" use the right filename. Note that this form can *only* be used with file caps; it is an error to use a directory cap after the /named/ prefix. URLs may also use /file/$FILECAP/FILENAME as a synonym for /named/$FILECAP/FILENAME. The use of "/file/" is deprecated in favor of "/named/" and support for "/file/" will be removed in a future release of Tahoe-LAFS. If you use the first form (``/uri/$FILECAP``) and want the HTTP response to include a useful Content-Type header, add a "filename=foo" query argument, like "GET /uri/$FILECAP?filename=foo.jpg". The bare "GET /uri/$FILECAP" does not give the Tahoe node enough information to determine a Content-Type (since LAFS immutable files are merely sequences of bytes, not typed and named file objects). If the URL has both filename= and "save=true" in the query arguments, then the server to add a "Content-Disposition: attachment" header, along with a filename= parameter. When a user clicks on such a link, most browsers will offer to let the user save the file instead of displaying it inline (indeed, most browsers will refuse to display it inline). "true", "t", "1", and other case-insensitive equivalents are all treated the same. Character-set handling in URLs and HTTP headers is a :ref:`dubious art`. For maximum compatibility, Tahoe simply copies the bytes from the filename= argument into the Content-Disposition header's filename= parameter, without trying to interpret them in any particular way. Getting Information About a File Or Directory (as HTML) ------------------------------------------------------- ``GET /uri/$FILECAP?t=info`` ``GET /uri/$DIRCAP/?t=info`` ``GET /uri/$DIRCAP/[SUBDIRS../]SUBDIR/?t=info`` ``GET /uri/$DIRCAP/[SUBDIRS../]FILENAME?t=info`` This returns a human-oriented HTML page with more detail about the selected file or directory object. This page contains the following items: * object size * storage index * JSON representation * raw contents (text/plain) * access caps (URIs): verify-cap, read-cap, write-cap (for mutable objects) * check/verify/repair form * deep-check/deep-size/deep-stats/manifest (for directories) * replace-contents form (for mutable files) Creating a Directory -------------------- ``POST /uri?t=mkdir`` This creates a new empty directory, but does not attach it to any other directory in the Tahoe-LAFS file store. If a "redirect_to_result=true" argument is provided, then the HTTP response will cause the web browser to be redirected to a /uri/$DIRCAP page that gives access to the newly-created directory. If you bookmark this page, you'll be able to get back to the directory again in the future. This is the recommended way to start working with a Tahoe server: create a new unlinked directory (using redirect_to_result=true), then bookmark the resulting /uri/$DIRCAP page. There is a "create directory" button on the Welcome page to invoke this action. This accepts a format= argument in the query string. Refer to the documentation of the PUT /uri?t=mkdir operation in `Creating A New Directory`_ for information on the behavior of the format= argument. If "redirect_to_result=true" is not provided (or is given a value of "false"), then the HTTP response body will simply be the write-cap of the new directory. ``POST /uri/$DIRCAP/[SUBDIRS../]?t=mkdir&name=CHILDNAME`` This creates a new empty directory as a child of the designated SUBDIR. This will create additional intermediate directories as necessary. This accepts a format= argument in the query string. Refer to the documentation of POST /uri/$DIRCAP/[SUBDIRS../]?t=mkdir&name=CHILDNAME in `Creating a New Directory`_ for information on the behavior of the format= argument. If a "when_done=URL" argument is provided, the HTTP response will cause the web browser to redirect to the given URL. This provides a convenient way to return the browser to the directory that was just modified. Without a when_done= argument, the HTTP response will simply contain the write-cap of the directory that was just created. Uploading a File ---------------- ``POST /uri?t=upload`` This uploads a file, and produces a file-cap for the contents, but does not attach the file to any directory in the Tahoe-LAFS file store. That is, no directories will be modified by this operation. The file must be provided as the "file" field of an HTML encoded form body, produced in response to an HTML form like this::
If a "when_done=URL" argument is provided, the response body will cause the browser to redirect to the given URL. If the when_done= URL has the string "%(uri)s" in it, that string will be replaced by a URL-escaped form of the newly created file-cap. (Note that without this substitution, there is no way to access the file that was just uploaded). The default (in the absence of when_done=) is to return an HTML page that describes the results of the upload. This page will contain information about which storage servers were used for the upload, how long each operation took, etc. This accepts format= and mutable=true query string arguments. Refer to `Writing/Uploading a File`_ for information on the behavior of format= and mutable=true. ``POST /uri/$DIRCAP/[SUBDIRS../]?t=upload`` This uploads a file, and attaches it as a new child of the given directory, which must be mutable. The file must be provided as the "file" field of an HTML-encoded form body, produced in response to an HTML form like this::
A "name=" argument can be provided to specify the new child's name, otherwise it will be taken from the "filename" field of the upload form (most web browsers will copy the last component of the original file's pathname into this field). To avoid confusion, name= is not allowed to contain a slash. If there is already a child with that name, and it is a mutable file, then its contents are replaced with the data being uploaded. If it is not a mutable file, the default behavior is to remove the existing child before creating a new one. To prevent this (and make the operation return an error instead of overwriting the old child), add a "replace=false" argument, as "?t=upload&replace=false". With replace=false, this operation will return an HTTP 409 "Conflict" error if there is already an object at the given location, rather than overwriting the existing object. Note that "true", "t", and "1" are all synonyms for "True", and "false", "f", and "0" are synonyms for "False". the parameter is case-insensitive. This will create additional intermediate directories as necessary, although since it is expected to be triggered by a form that was retrieved by "GET /uri/$DIRCAP/[SUBDIRS../]", it is likely that the parent directory will already exist. This accepts format= and mutable=true query string arguments. Refer to `Writing/Uploading a File`_ for information on the behavior of format= and mutable=true. If a "when_done=URL" argument is provided, the HTTP response will cause the web browser to redirect to the given URL. This provides a convenient way to return the browser to the directory that was just modified. Without a when_done= argument, the HTTP response will simply contain the file-cap of the file that was just uploaded (a write-cap for mutable files, or a read-cap for immutable files). ``POST /uri/$DIRCAP/[SUBDIRS../]FILENAME?t=upload`` This also uploads a file and attaches it as a new child of the given directory, which must be mutable. It is a slight variant of the previous operation, as the URL refers to the target file rather than the parent directory. It is otherwise identical: this accepts mutable= and when_done= arguments too. ``POST /uri/$FILECAP?t=upload`` This modifies the contents of an existing mutable file in-place. An error is signalled if $FILECAP does not refer to a mutable file. It behaves just like the "PUT /uri/$FILECAP" form, but uses a POST for the benefit of HTML forms in a web browser. Attaching An Existing File Or Directory (by URI) ------------------------------------------------ ``POST /uri/$DIRCAP/[SUBDIRS../]?t=uri&name=CHILDNAME&uri=CHILDCAP`` This attaches a given read- or write- cap "CHILDCAP" to the designated directory, with a specified child name. This behaves much like the PUT t=uri operation, and is a lot like a UNIX hardlink. It is subject to the same restrictions as that operation on the use of cap formats unknown to the web-API server. This will create additional intermediate directories as necessary, although since it is expected to be triggered by a form that was retrieved by "GET /uri/$DIRCAP/[SUBDIRS../]", it is likely that the parent directory will already exist. This accepts the same replace= argument as POST t=upload. Unlinking a Child ----------------- ``POST /uri/$DIRCAP/[SUBDIRS../]?t=delete&name=CHILDNAME`` ``POST /uri/$DIRCAP/[SUBDIRS../]?t=unlink&name=CHILDNAME`` (Tahoe >= v1.9) This instructs the node to remove a child object (file or subdirectory) from the given directory, which must be mutable. Note that the entire subtree is unlinked from the parent. Unlike deleting a subdirectory in a UNIX local filesystem, the subtree need not be empty; if it isn't, then other references into the subtree will see that the child subdirectories are not modified by this operation. Only the link from the given directory to its child is severed. In Tahoe-LAFS v1.9.0 and later, t=unlink can be used as a synonym for t=delete. If interoperability with older web-API servers is required, t=delete should be used. Renaming a Child ---------------- ``POST /uri/$DIRCAP/[SUBDIRS../]?t=rename&from_name=OLD&to_name=NEW`` This instructs the node to rename a child of the given directory, which must be mutable. This has a similar effect to removing the child, then adding the same child-cap under the new name, except that it preserves metadata. This operation cannot move the child to a different directory. The default behavior is to overwrite any existing link at the destination (replace=true). To prevent this (and make the operation return an error instead of overwriting), add a "replace=false" argument. With replace=false, this operation will return an HTTP 409 "Conflict" error if the destination is not the same link as the source and there is already a link at the destination, rather than overwriting the existing link. To allow the operation to overwrite a link to a file, but return an HTTP 409 error when trying to overwrite a link to a directory, use "replace=only-files" (this behavior is closer to the traditional UNIX "mv" command). Note that "true", "t", and "1" are all synonyms for "True"; "false", "f", and "0" are synonyms for "False"; and the parameter is case-insensitive. Relinking ("Moving") a Child ---------------------------- ``POST /uri/$DIRCAP/[SUBDIRS../]?t=relink&from_name=OLD&to_dir=$NEWDIRCAP/[NEWSUBDIRS../]&to_name=NEW`` ``[&replace=true|false|only-files]`` (Tahoe >= v1.10) This instructs the node to move a child of the given source directory, into a different directory and/or to a different name. The command is named ``relink`` because what it does is add a new link to the child from the new location, then remove the old link. Nothing is actually "moved": the child is still reachable through any path from which it was formerly reachable, and the storage space occupied by its ciphertext is not affected. The source and destination directories must be writeable. If ``to_dir`` is not present, the child link is renamed within the same directory. If ``to_name`` is not present then it defaults to ``from_name``. If the destination link (directory and name) is the same as the source link, the operation has no effect. Metadata from the source directory entry is preserved. Multiple levels of descent in the source and destination paths are supported. This operation will return an HTTP 404 "Not Found" error if ``$DIRCAP/[SUBDIRS../]``, the child being moved, or the destination directory does not exist. It will return an HTTP 400 "Bad Request" error if any entry in the source or destination paths is not a directory. The default behavior is to overwrite any existing link at the destination (replace=true). To prevent this (and make the operation return an error instead of overwriting), add a "replace=false" argument. With replace=false, this operation will return an HTTP 409 "Conflict" error if the destination is not the same link as the source and there is already a link at the destination, rather than overwriting the existing link. To allow the operation to overwrite a link to a file, but return an HTTP 409 error when trying to overwrite a link to a directory, use "replace=only-files" (this behavior is closer to the traditional UNIX "mv" command). Note that "true", "t", and "1" are all synonyms for "True"; "false", "f", and "0" are synonyms for "False"; and the parameter is case-insensitive. When relinking into a different directory, for safety, the child link is not removed from the old directory until it has been successfully added to the new directory. This implies that in case of a crash or failure, the link to the child will not be lost, but it could be linked at both the old and new locations. The source link should not be the same as any link (directory and child name) in the ``to_dir`` path. This restriction is not enforced, but it may be enforced in a future version. If it were violated then the result would be to create a cycle in the directory structure that is not necessarily reachable from the root of the destination path (``$NEWDIRCAP``), which could result in data loss, as described in ticket `#943`_. .. _`#943`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/943 Other Utilities --------------- ``GET /uri?uri=$CAP`` This causes a redirect to /uri/$CAP, and retains any additional query arguments (like filename= or save=). This is for the convenience of web forms which allow the user to paste in a read- or write- cap (obtained through some out-of-band channel, like IM or email). Note that this form merely redirects to the specific file or directory indicated by the $CAP: unlike the GET /uri/$DIRCAP form, you cannot traverse to children by appending additional path segments to the URL. ``GET /uri/$DIRCAP/[SUBDIRS../]?t=rename-form&name=$CHILDNAME`` This provides a useful facility to browser-based user interfaces. It returns a page containing a form targetting the "POST $DIRCAP t=rename" functionality described above, with the provided $CHILDNAME present in the 'from_name' field of that form. I.e. this presents a form offering to rename $CHILDNAME, requesting the new name, and submitting POST rename. This same URL format can also be used with "move-form" with the expected results. ``GET /uri/$DIRCAP/[SUBDIRS../]CHILDNAME?t=uri`` This returns the file- or directory- cap for the specified object. ``GET /uri/$DIRCAP/[SUBDIRS../]CHILDNAME?t=readonly-uri`` This returns a read-only file- or directory- cap for the specified object. If the object is an immutable file, this will return the same value as t=uri. Debugging and Testing Features ------------------------------ These URLs are less-likely to be helpful to the casual Tahoe user, and are mainly intended for developers. ``POST $URL?t=check`` This triggers the FileChecker to determine the current "health" of the given file or directory, by counting how many shares are available. The page that is returned will display the results. This can be used as a "show me detailed information about this file" page. If a verify=true argument is provided, the node will perform a more intensive check, downloading and verifying every single bit of every share. If an add-lease=true argument is provided, the node will also add (or renew) a lease to every share it encounters. Each lease will keep the share alive for a certain period of time (one month by default). Once the last lease expires or is explicitly cancelled, the storage server is allowed to delete the share. If an output=JSON argument is provided, the response will be machine-readable JSON instead of human-oriented HTML. The data is a dictionary with the following keys:: storage-index: a base32-encoded string with the objects's storage index, or an empty string for LIT files summary: a string, with a one-line summary of the stats of the file results: a dictionary that describes the state of the file. For LIT files, this dictionary has only the 'healthy' key, which will always be True. For distributed files, this dictionary has the following keys: count-happiness: the servers-of-happiness level of the file, as defined in doc/specifications/servers-of-happiness. count-shares-good: the number of good shares that were found count-shares-needed: 'k', the number of shares required for recovery count-shares-expected: 'N', the number of total shares generated count-good-share-hosts: the number of distinct storage servers with good shares. Note that a high value does not necessarily imply good share distribution, because some of these servers may only hold duplicate shares. count-wrong-shares: for mutable files, the number of shares for versions other than the 'best' one (highest sequence number, highest roothash). These are either old, or created by an uncoordinated or not fully successful write. count-recoverable-versions: for mutable files, the number of recoverable versions of the file. For a healthy file, this will equal 1. count-unrecoverable-versions: for mutable files, the number of unrecoverable versions of the file. For a healthy file, this will be 0. count-corrupt-shares: the number of shares with integrity failures list-corrupt-shares: a list of "share locators", one for each share that was found to be corrupt. Each share locator is a list of (serverid, storage_index, sharenum). servers-responding: list of base32-encoded storage server identifiers, one for each server which responded to the share query. healthy: (bool) True if the file is completely healthy, False otherwise. Healthy files have at least N good shares. Overlapping shares do not currently cause a file to be marked unhealthy. If there are at least N good shares, then corrupt shares do not cause the file to be marked unhealthy, although the corrupt shares will be listed in the results (list-corrupt-shares) and should be manually removed to wasting time in subsequent downloads (as the downloader rediscovers the corruption and uses alternate shares). Future compatibility: the meaning of this field may change to reflect whether the servers-of-happiness criterion is met (see ticket #614). sharemap: dict mapping share identifier to list of serverids (base32-encoded strings). This indicates which servers are holding which shares. For immutable files, the shareid is an integer (the share number, from 0 to N-1). For immutable files, it is a string of the form 'seq%d-%s-sh%d', containing the sequence number, the roothash, and the share number. Before Tahoe-LAFS v1.11, the ``results`` dictionary also had a ``needs-rebalancing`` field, but that has been removed since it was computed incorrectly. ``POST $URL?t=start-deep-check`` (must add &ophandle=XYZ) This initiates a recursive walk of all files and directories reachable from the target, performing a check on each one just like t=check. The result page will contain a summary of the results, including details on any file/directory that was not fully healthy. t=start-deep-check can only be invoked on a directory. An error (400 BAD_REQUEST) will be signalled if it is invoked on a file. The recursive walker will deal with loops safely. This accepts the same verify= and add-lease= arguments as t=check. Since this operation can take a long time (perhaps a second per object), the ophandle= argument is required (see "Slow Operations, Progress, and Cancelling" above). The response to this POST will be a redirect to the corresponding /operations/$HANDLE page (with output=HTML or output=JSON to match the output= argument given to the POST). The deep-check operation will continue to run in the background, and the /operations page should be used to find out when the operation is done. Detailed check results for non-healthy files and directories will be available under /operations/$HANDLE/$STORAGEINDEX, and the HTML status will contain links to these detailed results. The HTML /operations/$HANDLE page for incomplete operations will contain a meta-refresh tag, set to 60 seconds, so that a browser which uses deep-check will automatically poll until the operation has completed. The JSON page (/options/$HANDLE?output=JSON) will contain a machine-readable JSON dictionary with the following keys:: finished: a boolean, True if the operation is complete, else False. Some of the remaining keys may not be present until the operation is complete. root-storage-index: a base32-encoded string with the storage index of the starting point of the deep-check operation count-objects-checked: count of how many objects were checked. Note that non-distributed objects (i.e. small immutable LIT files) are not checked, since for these objects, the data is contained entirely in the URI. count-objects-healthy: how many of those objects were completely healthy count-objects-unhealthy: how many were damaged in some way count-corrupt-shares: how many shares were found to have corruption, summed over all objects examined list-corrupt-shares: a list of "share identifiers", one for each share that was found to be corrupt. Each share identifier is a list of (serverid, storage_index, sharenum). list-unhealthy-files: a list of (pathname, check-results) tuples, for each file that was not fully healthy. 'pathname' is a list of strings (which can be joined by "/" characters to turn it into a single string), relative to the directory on which deep-check was invoked. The 'check-results' field is the same as that returned by t=check&output=JSON, described above. stats: a dictionary with the same keys as the t=start-deep-stats command (described below) ``POST $URL?t=stream-deep-check`` This initiates a recursive walk of all files and directories reachable from the target, performing a check on each one just like t=check. For each unique object (duplicates are skipped), a single line of JSON is emitted to the HTTP response channel (or an error indication, see below). When the walk is complete, a final line of JSON is emitted which contains the accumulated file-size/count "deep-stats" data. This command takes the same arguments as t=start-deep-check. A CLI tool can split the response stream on newlines into "response units", and parse each response unit as JSON. Each such parsed unit will be a dictionary, and will contain at least the "type" key: a string, one of "file", "directory", or "stats". For all units that have a type of "file" or "directory", the dictionary will contain the following keys:: "path": a list of strings, with the path that is traversed to reach the object "cap": a write-cap URI for the file or directory, if available, else a read-cap URI "verifycap": a verify-cap URI for the file or directory "repaircap": an URI for the weakest cap that can still be used to repair the object "storage-index": a base32 storage index for the object "check-results": a copy of the dictionary which would be returned by t=check&output=json, with three top-level keys: "storage-index", "summary", and "results", and a variety of counts and sharemaps in the "results" value. Note that non-distributed files (i.e. LIT files) will have values of None for verifycap, repaircap, and storage-index, since these files can neither be verified nor repaired, and are not stored on the storage servers. Likewise the check-results dictionary will be limited: an empty string for storage-index, and a results dictionary with only the "healthy" key. The last unit in the stream will have a type of "stats", and will contain the keys described in the "start-deep-stats" operation, below. If any errors occur during the traversal (specifically if a directory is unrecoverable, such that further traversal is not possible), an error indication is written to the response body, instead of the usual line of JSON. This error indication line will begin with the string "ERROR:" (in all caps), and contain a summary of the error on the rest of the line. The remaining lines of the response body will be a python exception. The client application should look for the ERROR: and stop processing JSON as soon as it is seen. Note that neither a file being unrecoverable nor a directory merely being unhealthy will cause traversal to stop. The line just before the ERROR: will describe the directory that was untraversable, since the unit is emitted to the HTTP response body before the child is traversed. ``POST $URL?t=check&repair=true`` This performs a health check of the given file or directory, and if the checker determines that the object is not healthy (some shares are missing or corrupted), it will perform a "repair". During repair, any missing shares will be regenerated and uploaded to new servers. This accepts the same verify=true and add-lease= arguments as t=check. When an output=JSON argument is provided, the machine-readable JSON response will contain the following keys:: storage-index: a base32-encoded string with the objects's storage index, or an empty string for LIT files repair-attempted: (bool) True if repair was attempted repair-successful: (bool) True if repair was attempted and the file was fully healthy afterwards. False if no repair was attempted, or if a repair attempt failed. pre-repair-results: a dictionary that describes the state of the file before any repair was performed. This contains exactly the same keys as the 'results' value of the t=check response, described above. post-repair-results: a dictionary that describes the state of the file after any repair was performed. If no repair was performed, post-repair-results and pre-repair-results will be the same. This contains exactly the same keys as the 'results' value of the t=check response, described above. ``POST $URL?t=start-deep-check&repair=true`` (must add &ophandle=XYZ) This triggers a recursive walk of all files and directories, performing a t=check&repair=true on each one. Like t=start-deep-check without the repair= argument, this can only be invoked on a directory. An error (400 BAD_REQUEST) will be signalled if it is invoked on a file. The recursive walker will deal with loops safely. This accepts the same verify= and add-lease= arguments as t=start-deep-check. It uses the same ophandle= mechanism as start-deep-check. When an output=JSON argument is provided, the response will contain the following keys:: finished: (bool) True if the operation has completed, else False root-storage-index: a base32-encoded string with the storage index of the starting point of the deep-check operation count-objects-checked: count of how many objects were checked count-objects-healthy-pre-repair: how many of those objects were completely healthy, before any repair count-objects-unhealthy-pre-repair: how many were damaged in some way count-objects-healthy-post-repair: how many of those objects were completely healthy, after any repair count-objects-unhealthy-post-repair: how many were damaged in some way count-repairs-attempted: repairs were attempted on this many objects. count-repairs-successful: how many repairs resulted in healthy objects count-repairs-unsuccessful: how many repairs resulted did not results in completely healthy objects count-corrupt-shares-pre-repair: how many shares were found to have corruption, summed over all objects examined, before any repair count-corrupt-shares-post-repair: how many shares were found to have corruption, summed over all objects examined, after any repair list-corrupt-shares: a list of "share identifiers", one for each share that was found to be corrupt (before any repair). Each share identifier is a list of (serverid, storage_index, sharenum). list-remaining-corrupt-shares: like list-corrupt-shares, but mutable shares that were successfully repaired are not included. These are shares that need manual processing. Since immutable shares cannot be modified by clients, all corruption in immutable shares will be listed here. list-unhealthy-files: a list of (pathname, check-results) tuples, for each file that was not fully healthy. 'pathname' is relative to the directory on which deep-check was invoked. The 'check-results' field is the same as that returned by t=check&repair=true&output=JSON, described above. stats: a dictionary with the same keys as the t=start-deep-stats command (described below) ``POST $URL?t=stream-deep-check&repair=true`` This triggers a recursive walk of all files and directories, performing a t=check&repair=true on each one. For each unique object (duplicates are skipped), a single line of JSON is emitted to the HTTP response channel (or an error indication). When the walk is complete, a final line of JSON is emitted which contains the accumulated file-size/count "deep-stats" data. This emits the same data as t=stream-deep-check (without the repair=true), except that the "check-results" field is replaced with a "check-and-repair-results" field, which contains the keys returned by t=check&repair=true&output=json (i.e. repair-attempted, repair-successful, pre-repair-results, and post-repair-results). The output does not contain the summary dictionary that is provied by t=start-deep-check&repair=true (the one with count-objects-checked and list-unhealthy-files), since the receiving client is expected to calculate those values itself from the stream of per-object check-and-repair-results. Note that the "ERROR:" indication will only be emitted if traversal stops, which will only occur if an unrecoverable directory is encountered. If a file or directory repair fails, the traversal will continue, and the repair failure will be indicated in the JSON data (in the "repair-successful" key). ``POST $DIRURL?t=start-manifest`` (must add &ophandle=XYZ) This operation generates a "manfest" of the given directory tree, mostly for debugging. This is a table of (path, filecap/dircap), for every object reachable from the starting directory. The path will be slash-joined, and the filecap/dircap will contain a link to the object in question. This page gives immediate access to every object in the file store subtree. This operation uses the same ophandle= mechanism as deep-check. The corresponding /operations/$HANDLE page has three different forms. The default is output=HTML. If output=text is added to the query args, the results will be a text/plain list. The first line is special: it is either "finished: yes" or "finished: no"; if the operation is not finished, you must periodically reload the page until it completes. The rest of the results are a plaintext list, with one file/dir per line, slash-separated, with the filecap/dircap separated by a space. If output=JSON is added to the queryargs, then the results will be a JSON-formatted dictionary with six keys. Note that because large directory structures can result in very large JSON results, the full results will not be available until the operation is complete (i.e. until output["finished"] is True):: finished (bool): if False then you must reload the page until True origin_si (base32 str): the storage index of the starting point manifest: list of (path, cap) tuples, where path is a list of strings. verifycaps: list of (printable) verify cap strings storage-index: list of (base32) storage index strings stats: a dictionary with the same keys as the t=start-deep-stats command (described below) ``POST $DIRURL?t=start-deep-size`` (must add &ophandle=XYZ) This operation generates a number (in bytes) containing the sum of the filesize of all directories and immutable files reachable from the given directory. This is a rough lower bound of the total space consumed by this subtree. It does not include space consumed by mutable files, nor does it take expansion or encoding overhead into account. Later versions of the code may improve this estimate upwards. The /operations/$HANDLE status output consists of two lines of text:: finished: yes size: 1234 ``POST $DIRURL?t=start-deep-stats`` (must add &ophandle=XYZ) This operation performs a recursive walk of all files and directories reachable from the given directory, and generates a collection of statistics about those objects. The result (obtained from the /operations/$OPHANDLE page) is a JSON-serialized dictionary with the following keys (note that some of these keys may be missing until 'finished' is True):: finished: (bool) True if the operation has finished, else False api-version: (int), number of deep-stats API version. Will be increased every time backwards-incompatible change is introduced. Current version is 1. count-immutable-files: count of how many CHK files are in the set count-mutable-files: same, for mutable files (does not include directories) count-literal-files: same, for LIT files (data contained inside the URI) count-files: sum of the above three count-directories: count of directories count-unknown: count of unrecognized objects (perhaps from the future) size-immutable-files: total bytes for all CHK files in the set, =deep-size size-mutable-files (TODO): same, for current version of all mutable files size-literal-files: same, for LIT files size-directories: size of directories (includes size-literal-files) size-files-histogram: list of (minsize, maxsize, count) buckets, with a histogram of filesizes, 5dB/bucket, for both literal and immutable files largest-directory: number of children in the largest directory largest-immutable-file: number of bytes in the largest CHK file size-mutable-files is not implemented, because it would require extra queries to each mutable file to get their size. This may be implemented in the future. Assuming no sharing, the basic space consumed by a single root directory is the sum of size-immutable-files, size-mutable-files, and size-directories. The actual disk space used by the shares is larger, because of the following sources of overhead:: integrity data expansion due to erasure coding share management data (leases) backend (ext3) minimum block size ``POST $URL?t=stream-manifest`` This operation performs a recursive walk of all files and directories reachable from the given starting point. For each such unique object (duplicates are skipped), a single line of JSON is emitted to the HTTP response channel (or an error indication, see below). When the walk is complete, a final line of JSON is emitted which contains the accumulated file-size/count "deep-stats" data. A CLI tool can split the response stream on newlines into "response units", and parse each response unit as JSON. Each such parsed unit will be a dictionary, and will contain at least the "type" key: a string, one of "file", "directory", or "stats". For all units that have a type of "file" or "directory", the dictionary will contain the following keys:: "path": a list of strings, with the path that is traversed to reach the object "cap": a write-cap URI for the file or directory, if available, else a read-cap URI "verifycap": a verify-cap URI for the file or directory "repaircap": an URI for the weakest cap that can still be used to repair the object "storage-index": a base32 storage index for the object Note that non-distributed files (i.e. LIT files) will have values of None for verifycap, repaircap, and storage-index, since these files can neither be verified nor repaired, and are not stored on the storage servers. The last unit in the stream will have a type of "stats", and will contain the keys described in the "start-deep-stats" operation, below. If any errors occur during the traversal (specifically if a directory is unrecoverable, such that further traversal is not possible), an error indication is written to the response body, instead of the usual line of JSON. This error indication line will begin with the string "ERROR:" (in all caps), and contain a summary of the error on the rest of the line. The remaining lines of the response body will be a python exception. The client application should look for the ERROR: and stop processing JSON as soon as it is seen. The line just before the ERROR: will describe the directory that was untraversable, since the manifest entry is emitted to the HTTP response body before the child is traversed. Other Useful Pages ================== The portion of the web namespace that begins with "/uri" (and "/named") is dedicated to giving users (both humans and programs) access to the Tahoe-LAFS file store. The rest of the namespace provides status information about the state of the Tahoe-LAFS node. ``GET /`` (the root page) This is the "Welcome Page", and contains a few distinct sections:: Node information: library versions, local nodeid, services being provided. File store access forms: create a new directory, view a file/directory by URI, upload a file (unlinked), download a file by URI. Grid status: introducer information, helper information, connected storage servers. ``GET /?t=json`` (the json welcome page) This is the "json Welcome Page", and contains connectivity status of the introducer(s) and storage server(s), here's an example:: { "introducers": { "statuses": [] }, "servers": [{ "nodeid": "other_nodeid", "available_space": 123456, "nickname": "George \u263b", "version": "1.0", "connection_status": "summary", "last_received_data": 1487811257 }] } The above json ``introducers`` section includes a list of introducer connectivity status messages. The above json ``servers`` section is an array with map elements. Each map has the following properties: 1. ``nodeid`` - an identifier derived from the node's public key 2. ``available_space`` - the available space in bytes expressed as an integer 3. ``nickname`` - the storage server nickname 4. ``version`` - the storage server Tahoe-LAFS version 5. ``connection_status`` - connectivity status 6. ``last_received_data`` - the time when data was last received, expressed in seconds since epoch ``GET /status/`` This page lists all active uploads and downloads, and contains a short list of recent upload/download operations. Each operation has a link to a page that describes file sizes, servers that were involved, and the time consumed in each phase of the operation. A GET of /status/?t=json will contain a machine-readable subset of the same data. It returns a JSON-encoded dictionary. The only key defined at this time is "active", with a value that is a list of operation dictionaries, one for each active operation. Once an operation is completed, it will no longer appear in data["active"] . Each op-dict contains a "type" key, one of "upload", "download", "mapupdate", "publish", or "retrieve" (the first two are for immutable files, while the latter three are for mutable files and directories). The "upload" op-dict will contain the following keys:: type (string): "upload" storage-index-string (string): a base32-encoded storage index total-size (int): total size of the file status (string): current status of the operation progress-hash (float): 1.0 when the file has been hashed progress-ciphertext (float): 1.0 when the file has been encrypted. progress-encode-push (float): 1.0 when the file has been encoded and pushed to the storage servers. For helper uploads, the ciphertext value climbs to 1.0 first, then encoding starts. For unassisted uploads, ciphertext and encode-push progress will climb at the same pace. The "download" op-dict will contain the following keys:: type (string): "download" storage-index-string (string): a base32-encoded storage index total-size (int): total size of the file status (string): current status of the operation progress (float): 1.0 when the file has been fully downloaded Front-ends which want to report progress information are advised to simply average together all the progress-* indicators. A slightly more accurate value can be found by ignoring the progress-hash value (since the current implementation hashes synchronously, so clients will probably never see progress-hash!=1.0). ``GET /helper_status/`` If the node is running a helper (i.e. if [helper]enabled is set to True in tahoe.cfg), then this page will provide a list of all the helper operations currently in progress. If "?t=json" is added to the URL, it will return a JSON-formatted list of helper statistics, which can then be used to produce graphs to indicate how busy the helper is. ``GET /statistics/`` This page provides "node statistics", which are collected from a variety of sources:: load_monitor: every second, the node schedules a timer for one second in the future, then measures how late the subsequent callback is. The "load_average" is this tardiness, measured in seconds, averaged over the last minute. It is an indication of a busy node, one which is doing more work than can be completed in a timely fashion. The "max_load" value is the highest value that has been seen in the last 60 seconds. cpu_monitor: every minute, the node uses time.clock() to measure how much CPU time it has used, and it uses this value to produce 1min/5min/15min moving averages. These values range from 0% (0.0) to 100% (1.0), and indicate what fraction of the CPU has been used by the Tahoe node. Not all operating systems provide meaningful data to time.clock(): they may report 100% CPU usage at all times. uploader: this counts how many immutable files (and bytes) have been uploaded since the node was started downloader: this counts how many immutable files have been downloaded since the node was started publishes: this counts how many mutable files (including directories) have been modified since the node was started retrieves: this counts how many mutable files (including directories) have been read since the node was started There are other statistics that are tracked by the node. The "raw stats" section shows a formatted dump of all of them. By adding "?t=json" to the URL, the node will return a JSON-formatted dictionary of stats values, which can be used by other tools to produce graphs of node behavior. The misc/munin/ directory in the source distribution provides some tools to produce these graphs. ``GET /`` (introducer status) For Introducer nodes, the welcome page displays information about both clients and servers which are connected to the introducer. Servers make "service announcements", and these are listed in a table. Clients will subscribe to hear about service announcements, and these subscriptions are listed in a separate table. Both tables contain information about what version of Tahoe is being run by the remote node, their advertised and outbound IP addresses, their nodeid and nickname, and how long they have been available. By adding "?t=json" to the URL, the node will return a JSON-formatted dictionary of stats values, which can be used to produce graphs of connected clients over time. This dictionary has the following keys:: ["subscription_summary"] : a dictionary mapping service name (like "storage") to an integer with the number of clients that have subscribed to hear about that service ["announcement_summary"] : a dictionary mapping service name to an integer with the number of servers which are announcing that service ["announcement_distinct_hosts"] : a dictionary mapping service name to an integer which represents the number of distinct hosts that are providing that service. If two servers have announced FURLs which use the same hostnames (but different ports and tubids), they are considered to be on the same host. Static Files in /public_html ============================ The web-API server will take any request for a URL that starts with /static and serve it from a configurable directory which defaults to $BASEDIR/public_html . This is configured by setting the "[node]web.static" value in $BASEDIR/tahoe.cfg . If this is left at the default value of "public_html", then http://127.0.0.1:3456/static/subdir/foo.html will be served with the contents of the file $BASEDIR/public_html/subdir/foo.html . This can be useful to serve a javascript application which provides a prettier front-end to the rest of the Tahoe web-API. Safety and Security Issues -- Names vs. URIs ============================================ Summary: use explicit file- and dir- caps whenever possible, to reduce the potential for surprises when the file store structure is changed. Tahoe-LAFS provides a mutable file store, but the ways that the store can change are limited. The only things that can change are: * the mapping from child names to child objects inside mutable directories (by adding a new child, removing an existing child, or changing an existing child to point to a different object) * the contents of mutable files Obviously if you query for information about the file store and then act to change it (such as by getting a listing of the contents of a mutable directory and then adding a file to the directory), then the store might have been changed after you queried it and before you acted upon it. However, if you use the URI instead of the pathname of an object when you act upon the object, then it will be the same object; only its contents can change (if it is mutable). If, on the other hand, you act upon the object using its pathname, then a different object might be in that place, which can result in more kinds of surprises. For example, suppose you are writing code which recursively downloads the contents of a directory. The first thing your code does is fetch the listing of the contents of the directory. For each child that it fetched, if that child is a file then it downloads the file, and if that child is a directory then it recurses into that directory. Now, if the download and the recurse actions are performed using the child's name, then the results might be wrong, because for example a child name that pointed to a subdirectory when you listed the directory might have been changed to point to a file (in which case your attempt to recurse into it would result in an error), or a child name that pointed to a file when you listed the directory might now point to a subdirectory (in which case your attempt to download the child would result in a file containing HTML text describing the subdirectory!). If your recursive algorithm uses the URI of the child instead of the name of the child, then those kinds of mistakes just can't happen. Note that both the child's name and the child's URI are included in the results of listing the parent directory, so it isn't any harder to use the URI for this purpose. The read and write caps in a given directory node are separate URIs, and can't be assumed to point to the same object even if they were retrieved in the same operation (although the web-API server attempts to ensure this in most cases). If you need to rely on that property, you should explicitly verify it. More generally, you should not make assumptions about the internal consistency of the contents of mutable directories. As a result of the signatures on mutable object versions, it is guaranteed that a given version was written in a single update, but -- as in the case of a file -- the contents may have been chosen by a malicious writer in a way that is designed to confuse applications that rely on their consistency. In general, use names if you want "whatever object (whether file or directory) is found by following this name (or sequence of names) when my request reaches the server". Use URIs if you want "this particular object". Concurrency Issues ================== Tahoe uses both mutable and immutable files. Mutable files can be created explicitly by doing an upload with ?mutable=true added, or implicitly by creating a new directory (since a directory is just a special way to interpret a given mutable file). Mutable files suffer from the same consistency-vs-availability tradeoff that all distributed data storage systems face. It is not possible to simultaneously achieve perfect consistency and perfect availability in the face of network partitions (servers being unreachable or faulty). Tahoe tries to achieve a reasonable compromise, but there is a basic rule in place, known as the Prime Coordination Directive: "Don't Do That". What this means is that if write-access to a mutable file is available to several parties, then those parties are responsible for coordinating their activities to avoid multiple simultaneous updates. This could be achieved by having these parties talk to each other and using some sort of locking mechanism, or by serializing all changes through a single writer. The consequences of performing uncoordinated writes can vary. Some of the writers may lose their changes, as somebody else wins the race condition. In many cases the file will be left in an "unhealthy" state, meaning that there are not as many redundant shares as we would like (reducing the reliability of the file against server failures). In the worst case, the file can be left in such an unhealthy state that no version is recoverable, even the old ones. It is this small possibility of data loss that prompts us to issue the Prime Coordination Directive. Tahoe nodes implement internal serialization to make sure that a single Tahoe node cannot conflict with itself. For example, it is safe to issue two directory modification requests to a single tahoe node's web-API server at the same time, because the Tahoe node will internally delay one of them until after the other has finished being applied. (This feature was introduced in Tahoe-1.1; back with Tahoe-1.0 the web client was responsible for serializing web requests themselves). For more details, please see the "Consistency vs Availability" and "The Prime Coordination Directive" sections of :doc:`../specifications/mutable`. Access Blacklist ================ Gateway nodes may find it necessary to prohibit access to certain files. The web-API has a facility to block access to filecaps by their storage index, returning a 403 "Forbidden" error instead of the original file. This blacklist is recorded in $NODEDIR/access.blacklist, and contains one blocked file per line. Comment lines (starting with ``#``) are ignored. Each line consists of the storage-index (in the usual base32 format as displayed by the "More Info" page, or by the "tahoe debug dump-cap" command), followed by whitespace, followed by a reason string, which will be included in the 403 error message. This could hold a URL to a page that explains why the file is blocked, for example. So for example, if you found a need to block access to a file with filecap ``URI:CHK:n7r3m6wmomelk4sep3kw5cvduq:os7ijw5c3maek7pg65e5254k2fzjflavtpejjyhshpsxuqzhcwwq:3:20:14861``, you could do the following:: tahoe debug dump-cap URI:CHK:n7r3m6wmomelk4sep3kw5cvduq:os7ijw5c3maek7pg65e5254k2fzjflavtpejjyhshpsxuqzhcwwq:3:20:14861 -> storage index: whpepioyrnff7orecjolvbudeu echo "whpepioyrnff7orecjolvbudeu my puppy told me to" >>$NODEDIR/access.blacklist # ... restart the node to re-read configuration ... tahoe get URI:CHK:n7r3m6wmomelk4sep3kw5cvduq:os7ijw5c3maek7pg65e5254k2fzjflavtpejjyhshpsxuqzhcwwq:3:20:14861 -> error, 403 Access Prohibited: my puppy told me to The ``access.blacklist`` file will be checked each time a file or directory is accessed: the file's ``mtime`` is used to decide whether it need to be reloaded. Therefore no node restart is necessary when creating the initial blacklist, nor when adding second, third, or additional entries to the list. When modifying the file, be careful to update it atomically, otherwise a request may arrive while the file is only halfway written, and the partial file may be incorrectly parsed. The blacklist is applied to all access paths (including SFTP and CLI operations), not just the web-API. The blacklist also applies to directories. If a directory is blacklisted, the gateway will refuse access to both that directory and any child files/directories underneath it, when accessed via "DIRCAP/SUBDIR/FILENAME" -style URLs. Users who go directly to the child file/dir will bypass the blacklist. The node will log the SI of the file being blocked, and the reason code, into the ``logs/twistd.log`` file. URLs and HTTP and UTF-8 ======================= .. _urls-and-utf8: HTTP does not provide a mechanism to specify the character set used to encode non-ASCII names in URLs (`RFC3986#2.1`_). We prefer the convention that the ``filename=`` argument shall be a URL-escaped UTF-8 encoded Unicode string. For example, suppose we want to provoke the server into using a filename of "f i a n c e-acute e" (i.e. f i a n c U+00E9 e). The UTF-8 encoding of this is 0x66 0x69 0x61 0x6e 0x63 0xc3 0xa9 0x65 (or "fianc\\xC3\\xA9e", as python's ``repr()`` function would show). To encode this into a URL, the non-printable characters must be escaped with the urlencode ``%XX`` mechanism, giving us "fianc%C3%A9e". Thus, the first line of the HTTP request will be "``GET /uri/CAP...?save=true&filename=fianc%C3%A9e HTTP/1.1``". Not all browsers provide this: IE7 by default uses the Latin-1 encoding, which is "fianc%E9e" (although it has a configuration option to send URLs as UTF-8). The response header will need to indicate a non-ASCII filename. The actual mechanism to do this is not clear. For ASCII filenames, the response header would look like:: Content-Disposition: attachment; filename="english.txt" If Tahoe were to enforce the UTF-8 convention, it would need to decode the URL argument into a Unicode string, and then encode it back into a sequence of bytes when creating the response header. One possibility would be to use unencoded UTF-8. Developers suggest that IE7 might accept this:: #1: Content-Disposition: attachment; filename="fianc\xC3\xA9e" (note, the last four bytes of that line, not including the newline, are 0xC3 0xA9 0x65 0x22) `RFC2231#4`_ (dated 1997): suggests that the following might work, and `some developers have reported`_ that it is supported by Firefox (but not IE7):: #2: Content-Disposition: attachment; filename*=utf-8''fianc%C3%A9e My reading of `RFC2616#19.5.1`_ (which defines Content-Disposition) says that the filename= parameter is defined to be wrapped in quotes (presumably to allow spaces without breaking the parsing of subsequent parameters), which would give us:: #3: Content-Disposition: attachment; filename*=utf-8''"fianc%C3%A9e" However this is contrary to the examples in the email thread listed above. Developers report that IE7 (when it is configured for UTF-8 URL encoding, which is not the default in Asian countries), will accept:: #4: Content-Disposition: attachment; filename=fianc%C3%A9e However, for maximum compatibility, Tahoe simply copies bytes from the URL into the response header, rather than enforcing the UTF-8 convention. This means it does not try to decode the filename from the URL argument, nor does it encode the filename into the response header. .. _RFC3986#2.1: https://tools.ietf.org/html/rfc3986#section-2.1 .. _RFC2231#4: https://tools.ietf.org/html/rfc2231#section-4 .. _some developers have reported: http://markmail.org/message/dsjyokgl7hv64ig3 .. _RFC2616#19.5.1: https://tools.ietf.org/html/rfc2616#section-19.5.1 tahoe_lafs-1.20.0/docs/historical/configuration.rst0000644000000000000000000000672413615410400017322 0ustar00.. -*- coding: utf-8-with-signature -*- ======================= Old Configuration Files ======================= Tahoe-LAFS releases before v1.3.0 had no ``tahoe.cfg`` file, and used distinct files for each item listed below. If Tahoe-LAFS v1.9.0 or above detects the old configuration files at start up it emits a warning and aborts the start up. (This was issue ticket #1385.) =============================== =================================== ================= Config setting File Comment =============================== =================================== ================= ``[node]nickname`` ``BASEDIR/nickname`` ``[node]web.port`` ``BASEDIR/webport`` ``[node]tub.port`` ``BASEDIR/client.port`` (for Clients, not Introducers) ``[node]tub.port`` ``BASEDIR/introducer.port`` (for Introducers, not Clients) (note that, unlike other keys, ``tahoe.cfg`` overrode this file from Tahoe-LAFS v1.3.0 up to and including Tahoe-LAFS v1.8.2) ``[node]tub.location`` ``BASEDIR/advertised_ip_addresses`` ``[node]log_gatherer.furl`` ``BASEDIR/log_gatherer.furl`` (one per line) ``[node]timeout.keepalive`` ``BASEDIR/keepalive_timeout`` ``[node]timeout.disconnect`` ``BASEDIR/disconnect_timeout`` ``BASEDIR/introducer.furl`` ``BASEDIR/private/introducers.yaml`` ``[client]helper.furl`` ``BASEDIR/helper.furl`` ``[client]key_generator.furl`` ``BASEDIR/key_generator.furl`` ``BASEDIR/stats_gatherer.furl`` Stats gatherer has been removed. ``[storage]enabled`` ``BASEDIR/no_storage`` (``False`` if ``no_storage`` exists) ``[storage]readonly`` ``BASEDIR/readonly_storage`` (``True`` if ``readonly_storage`` exists) ``[storage]sizelimit`` ``BASEDIR/sizelimit`` ``[storage]debug_discard`` ``BASEDIR/debug_discard_storage`` ``[helper]enabled`` ``BASEDIR/run_helper`` (``True`` if ``run_helper`` exists) =============================== =================================== ================= Note: the functionality of ``[node]ssh.port`` and ``[node]ssh.authorized_keys_file`` were previously (before Tahoe-LAFS v1.3.0) combined, controlled by the presence of a ``BASEDIR/authorized_keys.SSHPORT`` file, in which the suffix of the filename indicated which port the ssh server should listen on, and the contents of the file provided the ssh public keys to accept. Support for these files has been removed completely. To ``ssh`` into your Tahoe-LAFS node, add ``[node]ssh.port`` and ``[node].ssh_authorized_keys_file`` statements to your ``tahoe.cfg``. Likewise, the functionality of ``[node]tub.location`` is a variant of the now (since Tahoe-LAFS v1.3.0) unsupported ``BASEDIR/advertised_ip_addresses`` . The old file was additive (the addresses specified in ``advertised_ip_addresses`` were used in addition to any that were automatically discovered), whereas the new ``tahoe.cfg`` directive is not (``tub.location`` is used verbatim). The stats gatherer has been broken at least since Tahoe-LAFS v1.13.0. The (broken) functionality of ``[client]stats_gatherer.furl`` (which was previously in ``BASEDIR/stats_gatherer.furl``), is scheduled to be completely removed after Tahoe-LAFS v1.15.0. After that point, if your configuration contains a ``[client]stats_gatherer.furl``, your node will refuse to start. tahoe_lafs-1.20.0/docs/historical/historical_known_issues.txt0000644000000000000000000003070713615410400021430 0ustar00= Known Issues = Below is a list of known issues in older releases of Tahoe-LAFS, and how to manage them. The current version of this file can be found at https://tahoe-lafs.org/source/tahoe/trunk/docs/historical/historical_known_issues.txt Issues in newer releases of Tahoe-LAFS can be found at: https://tahoe-lafs.org/source/tahoe/trunk/docs/known_issues.rst == issues in Tahoe v1.8.2, released 30-Jan-2011 == Unauthorized deletion of an immutable file by its storage index --------------------------------------------------------------- Due to a flaw in the Tahoe-LAFS storage server software in v1.3.0 through v1.8.2, a person who knows the "storage index" that identifies an immutable file can cause the server to delete its shares of that file. If an attacker can cause enough shares to be deleted from enough storage servers, this deletes the file. This vulnerability does not enable anyone to read file contents without authorization (confidentiality), nor to change the contents of a file (integrity). A person could learn the storage index of a file in several ways: 1. By being granted the authority to read the immutable file—i.e. by being granted a read capability to the file. They can determine the file's storage index from its read capability. 2. By being granted a verify capability to the file. They can determine the file's storage index from its verify capability. This case probably doesn't happen often because users typically don't share verify caps. 3. By operating a storage server, and receiving a request from a client that has a read cap or a verify cap. If the client attempts to upload, download, or verify the file with their storage server, even if it doesn't actually have the file, then they can learn the storage index of the file. 4. By gaining read access to an existing storage server's local filesystem, and inspecting the directory structure that it stores its shares in. They can thus learn the storage indexes of all files that the server is holding at least one share of. Normally only the operator of an existing storage server would be able to inspect its local filesystem, so this requires either being such an operator of an existing storage server, or somehow gaining the ability to inspect the local filesystem of an existing storage server. *how to manage it* Tahoe-LAFS version v1.8.3 or newer (except v1.9a1) no longer has this flaw; if you upgrade a storage server to a fixed release then that server is no longer vulnerable to this problem. Note that the issue is local to each storage server independently of other storage servers—when you upgrade a storage server then that particular storage server can no longer be tricked into deleting its shares of the target file. If you can't immediately upgrade your storage server to a version of Tahoe-LAFS that eliminates this vulnerability, then you could temporarily shut down your storage server. This would of course negatively impact availability—clients would not be able to upload or download shares to that particular storage server while it was shut down—but it would protect the shares already stored on that server from being deleted as long as the server is shut down. If the servers that store shares of your file are running a version of Tahoe-LAFS with this vulnerability, then you should think about whether someone can learn the storage indexes of your files by one of the methods described above. A person can not exploit this vulnerability unless they have received a read cap or verify cap, or they control a storage server that has been queried about this file by a client that has a read cap or a verify cap. Tahoe-LAFS does not currently have a mechanism to limit which storage servers can connect to your grid, but it does have a way to see which storage servers have been connected to the grid. The Introducer's front page in the Web User Interface has a list of all storage servers that the Introducer has ever seen and the first time and the most recent time that it saw them. Each Tahoe-LAFS gateway maintains a similar list on its front page in its Web User Interface, showing all of the storage servers that it learned about from the Introducer, when it first connected to that storage server, and when it most recently connected to that storage server. These lists are stored in memory and are reset to empty when the process is restarted. See ticket `#1528`_ for technical details. .. _#1528: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1528 == issues in Tahoe v1.1.0, released 2008-06-11 == (Tahoe v1.1.0 was superceded by v1.2.0 which was released 2008-07-21.) === more than one file can match an immutable file cap === In Tahoe v1.0 and v1.1, a flaw in the cryptographic integrity check makes it possible for the original uploader of an immutable file to produce more than one immutable file matching the same capability, so that different downloads using the same capability could result in different files. This flaw can be exploited only by the original uploader of an immutable file, which means that it is not a severe vulnerability: you can still rely on the integrity check to make sure that the file you download with a given capability is a file that the original uploader intended. The only issue is that you can't assume that every time you use the same capability to download a file you'll get the same file. ==== how to manage it ==== This was fixed in Tahoe v1.2.0, released 2008-07-21, under ticket #491. Upgrade to that release of Tahoe and then you can rely on the property that there is only one file that you can download using a given capability. If you are still using Tahoe v1.0 or v1.1, then remember that the original uploader could produce multiple files that match the same capability, so for example if someone gives you a capability, and you use it to download a file, and you give that capability to your friend, and he uses it to download a file, you and your friend could get different files. === server out of space when writing mutable file === If a v1.0 or v1.1 storage server runs out of disk space or is otherwise unable to write to its local filesystem, then problems can ensue. For immutable files, this will not lead to any problem (the attempt to upload that share to that server will fail, the partially uploaded share will be deleted from the storage server's "incoming shares" directory, and the client will move on to using another storage server instead). If the write was an attempt to modify an existing mutable file, however, a problem will result: when the attempt to write the new share fails (e.g. due to insufficient disk space), then it will be aborted and the old share will be left in place. If enough such old shares are left, then a subsequent read may get those old shares and see the file in its earlier state, which is a "rollback" failure. With the default parameters (3-of-10), six old shares will be enough to potentially lead to a rollback failure. ==== how to manage it ==== Make sure your Tahoe storage servers don't run out of disk space. This means refusing storage requests before the disk fills up. There are a couple of ways to do that with v1.1. First, there is a configuration option named "sizelimit" which will cause the storage server to do a "du" style recursive examination of its directories at startup, and then if the sum of the size of files found therein is greater than the "sizelimit" number, it will reject requests by clients to write new immutable shares. However, that can take a long time (something on the order of a minute of examination of the filesystem for each 10 GB of data stored in the Tahoe server), and the Tahoe server will be unavailable to clients during that time. Another option is to set the "readonly_storage" configuration option on the storage server before startup. This will cause the storage server to reject all requests to upload new immutable shares. Note that neither of these configurations affect mutable shares: even if sizelimit is configured and the storage server currently has greater space used than allowed, or even if readonly_storage is configured, servers will continue to accept new mutable shares and will continue to accept requests to overwrite existing mutable shares. Mutable files are typically used only for directories, and are usually much smaller than immutable files, so if you use one of these configurations to stop the influx of immutable files while there is still sufficient disk space to receive an influx of (much smaller) mutable files, you may be able to avoid the potential for "rollback" failure. A future version of Tahoe will include a fix for this issue. Here is [https://lists.tahoe-lafs.org/pipermail/tahoe-dev/2008-May/000628.html the mailing list discussion] about how that future version will work. === pyOpenSSL/Twisted defect causes false alarms in tests === The combination of Twisted v8.0 or Twisted v8.1 with pyOpenSSL v0.7 causes the Tahoe v1.1 unit tests to fail, even though the behavior of Tahoe itself which is being tested is correct. ==== how to manage it ==== If you are using Twisted v8.0 or Twisted v8.1 and pyOpenSSL v0.7, then please ignore ERROR "Reactor was unclean" in test_system and test_introducer. Upgrading to a newer version of Twisted or pyOpenSSL will cause those false alarms to stop happening (as will downgrading to an older version of either of those packages). == issues in Tahoe v1.0.0, released 2008-03-25 == (Tahoe v1.0 was superceded by v1.1 which was released 2008-06-11.) === server out of space when writing mutable file === In addition to the problems caused by insufficient disk space described above, v1.0 clients which are writing mutable files when the servers fail to write to their filesystem are likely to think the write succeeded, when it in fact failed. This can cause data loss. ==== how to manage it ==== Upgrade client to v1.1, or make sure that servers are always able to write to their local filesystem (including that there is space available) as described in "server out of space when writing mutable file" above. === server out of space when writing immutable file === Tahoe v1.0 clients are using v1.0 servers which are unable to write to their filesystem during an immutable upload will correctly detect the first failure, but if they retry the upload without restarting the client, or if another client attempts to upload the same file, the second upload may appear to succeed when it hasn't, which can lead to data loss. ==== how to manage it ==== Upgrading either or both of the client and the server to v1.1 will fix this issue. Also it can be avoided by ensuring that the servers are always able to write to their local filesystem (including that there is space available) as described in "server out of space when writing mutable file" above. === large directories or mutable files of certain sizes === If a client attempts to upload a large mutable file with a size greater than about 3,139,000 and less than or equal to 3,500,000 bytes then it will fail but appear to succeed, which can lead to data loss. (Mutable files larger than 3,500,000 are refused outright). The symptom of the failure is very high memory usage (3 GB of memory) and 100% CPU for about 5 minutes, before it appears to succeed, although it hasn't. Directories are stored in mutable files, and a directory of approximately 9000 entries may fall into this range of mutable file sizes (depending on the size of the filenames or other metadata associated with the entries). ==== how to manage it ==== This was fixed in v1.1, under ticket #379. If the client is upgraded to v1.1, then it will fail cleanly instead of falsely appearing to succeed when it tries to write a file whose size is in this range. If the server is also upgraded to v1.1, then writes of mutable files whose size is in this range will succeed. (If the server is upgraded to v1.1 but the client is still v1.0 then the client will still suffer this failure.) === uploading files greater than 12 GiB === If a Tahoe v1.0 client uploads a file greater than 12 GiB in size, the file will be silently corrupted so that it is not retrievable, but the client will think that it succeeded. This is a "data loss" failure. ==== how to manage it ==== Don't upload files larger than 12 GiB. If you have previously uploaded files of that size, assume that they have been corrupted and are not retrievable from the Tahoe storage grid. Tahoe v1.1 clients will refuse to upload files larger than 12 GiB with a clean failure. A future release of Tahoe will remove this limitation so that larger files can be uploaded. tahoe_lafs-1.20.0/docs/historical/peer-selection-tahoe2.txt0000644000000000000000000000657013615410400020557 0ustar00= THIS PAGE DESCRIBES HISTORICAL DESIGN CHOICES. SEE docs/architecture.rst FOR CURRENT DOCUMENTATION = When a file is uploaded, the encoded shares are sent to other peers. But to which ones? The PeerSelection algorithm is used to make this choice. Early in 2007, we were planning to use the following "Tahoe Two" algorithm. By the time we released 0.2.0, we switched to "tahoe3", but when we released v0.6, we switched back (ticket #132). As in Tahoe Three, the verifierid is used to consistently-permute the set of all peers (by sorting the peers by HASH(verifierid+peerid)). Each file gets a different permutation, which (on average) will evenly distribute shares among the grid and avoid hotspots. With our basket of (usually 10) shares to distribute in hand, we start at the beginning of the list and ask each peer in turn if they are willing to hold on to one of our shares (the "lease request"). If they say yes, we remove that share from the basket and remember who agreed to host it. Then we go to the next peer in the list and ask them the same question about another share. If a peer says no, we remove them from the list. If a peer says that they already have one or more shares for this file, we remove those shares from the basket. If we reach the end of the list, we start again at the beginning. If we run out of peers before we run out of shares, we fail unless we've managed to place at least some number of the shares: the likely threshold is to attempt to place 10 shares (out of which we'll need 3 to recover the file), and be content if we can find homes for at least 7 of them. In small networks, this approach will loop around several times and place several shares with each node (e.g. in a 5-host network with plenty of space, each node will get 2 shares). In large networks with plenty of space, the shares will be placed with the first 10 peers in the permuted list. In large networks that are somewhat full, we'll need to traverse more of the list before we find homes for the shares. The average number of peers that we'll need to talk to is vaguely equal to 10 / (1-utilization), with a bunch of other terms that relate to the distribution of free space on the peers and the size of the shares being offered. Small files with small shares will fit anywhere, large files with large shares will only fit on certain peers, so the mesh may have free space but no holes large enough for a very large file, which might indicate that we should try again with a larger number of (smaller) shares. When it comes time to download, we compute a similar list of permuted peerids, and start asking for shares beginning with the start of the list. Each peer gives us a list of the shareids that they are holding. Eventually (depending upon how much churn the peerlist has experienced), we'll find holders for at least 3 shares, or we'll run out of peers. If the mesh is very large and we want to fail faster, we can establish an upper bound on how many peers we should talk to (perhaps by recording the permuted peerid of the last node to which we sent a share, or a count of the total number of peers we talked to during upload). I suspect that this approach handles churn more efficiently than tahoe3, but I haven't gotten my head around the math that could be used to show it. On the other hand, it takes a lot more round trips to find homes in small meshes (one per share, whereas tahoe three can do just one per node). tahoe_lafs-1.20.0/docs/historical/peer-selection-tahoe3.txt0000644000000000000000000001011513615410400020546 0ustar00= THIS PAGE DESCRIBES HISTORICAL ARCHITECTURE CHOICES: THE CURRENT CODE DOES NOT WORK AS DESCRIBED HERE. = When a file is uploaded, the encoded shares are sent to other peers. But to which ones? The PeerSelection algorithm is used to make this choice. In the old (May 2007) version, the verifierid is used to consistently-permute the set of all peers (by sorting the peers by HASH(verifierid+peerid)). Each file gets a different permutation, which (on average) will evenly distribute shares among the grid and avoid hotspots. This permutation places the peers around a 2^256^-sized ring, like the rim of a big clock. The 100-or-so shares are then placed around the same ring (at 0, 1/100*2^256^, 2/100*2^256^, ... 99/100*2^256^). Imagine that we start at 0 with an empty basket in hand and proceed clockwise. When we come to a share, we pick it up and put it in the basket. When we come to a peer, we ask that peer if they will give us a lease for every share in our basket. The peer will grant us leases for some of those shares and reject others (if they are full or almost full). If they reject all our requests, we remove them from the ring, because they are full and thus unhelpful. Each share they accept is removed from the basket. The remainder stay in the basket as we continue walking clockwise. We keep walking, accumulating shares and distributing them to peers, until either we find a home for all shares, or there are no peers left in the ring (because they are all full). If we run out of peers before we run out of shares, the upload may be considered a failure, depending upon how many shares we were able to place. The current parameters try to place 100 shares, of which 25 must be retrievable to recover the file, and the peer selection algorithm is happy if it was able to place at least 75 shares. These numbers are adjustable: 25-out-of-100 means an expansion factor of 4x (every file in the grid consumes four times as much space when totalled across all StorageServers), but is highly reliable (the actual reliability is a binomial distribution function of the expected availability of the individual peers, but in general it goes up very quickly with the expansion factor). If the file has been uploaded before (or if two uploads are happening at the same time), a peer might already have shares for the same file we are proposing to send to them. In this case, those shares are removed from the list and assumed to be available (or will be soon). This reduces the number of uploads that must be performed. When downloading a file, the current release just asks all known peers for any shares they might have, chooses the minimal necessary subset, then starts downloading and processing those shares. A later release will use the full algorithm to reduce the number of queries that must be sent out. This algorithm uses the same consistent-hashing permutation as on upload, but instead of one walker with one basket, we have 100 walkers (one per share). They each proceed clockwise in parallel until they find a peer, and put that one on the "A" list: out of all peers, this one is the most likely to be the same one to which the share was originally uploaded. The next peer that each walker encounters is put on the "B" list, etc. All the "A" list peers are asked for any shares they might have. If enough of them can provide a share, the download phase begins and those shares are retrieved and decoded. If not, the "B" list peers are contacted, etc. This routine will eventually find all the peers that have shares, and will find them quickly if there is significant overlap between the set of peers that were present when the file was uploaded and the set of peers that are present as it is downloaded (i.e. if the "peerlist stability" is high). Some limits may be imposed in large grids to avoid querying a million peers; this provides a tradeoff between the work spent to discover that a file is unrecoverable and the probability that a retrieval will fail when it could have succeeded if we had just tried a little bit harder. The appropriate value of this tradeoff will depend upon the size of the grid, and will change over time. tahoe_lafs-1.20.0/docs/historical/peer-selection.txt0000644000000000000000000000306413615410400017372 0ustar00When a file is uploaded, the encoded shares are sent to other peers. But to which ones? Likewise, when we want to download a file, which peers should we ask for shares? The "peer selection" algorithm is used to make these choices. During the first tahoe meeting, (actualy on the drive back home), we designed the now-abandoned "tahoe1" algorithm, which involved a "cabal" for each file, where the peers involved would check up on each other to make sure the data was still available. The big limitation was the expense of tracking which nodes were parts of which cabals. When we release 0.2.0, we used the "tahoe3" algorithm (see peer-selection-tahoe3.txt), but in v0.6 (ticket #132) we switched back to "tahoe2" (see peer-selection-tahoe2.txt, and the PEER SELECTION section of docs/architecture.rst), which uses a permuted peerid list and packs the shares into the first 10 or so members of this list. (It is named "tahoe2" because it was designed before "tahoe3" was.) In the future, we might move to an algorithm known as "denver airport", which uses Chord-like routing to minimize the number of active connections. Different peer selection algorithms result in different properties: * how well do we handle nodes leaving or joining the mesh (differences in the peer list)? * how many connections do we need to keep open? * how many nodes must we speak to when uploading a file? * if a file is unrecoverable, how long will it take for us to discover this fact? * how expensive is a file-checking operation? * how well can we accomodate changes to encoding parameters? tahoe_lafs-1.20.0/docs/man/man1/tahoe.10000644000000000000000000001212713615410400014343 0ustar00.TH TAHOE 1 "July 2011" "Tahoe-LAFS \[em] tahoe command" "User Commands" .SH NAME .PP tahoe - Secure distributed file store. .SH SYNOPSIS .PP tahoe \f[I]COMMAND\f[] [\f[I]OPTION\f[]]... [\f[I]PARAMETER\f[]]... .SH GENERAL OPTIONS .TP .B \f[B]-q,\ --quiet\f[] Operate silently. .RS .RE .TP .B \f[B]-V,\ --version\f[] Display version numbers. .RS .RE .TP .B \f[B]--version-and-path\f[] Display version numbers and paths to their locations. .RS .RE .TP .B \f[B]-d,\ --node-directory\f[] Specify which Tahoe node directory should be used. (default for most commands: `$HOME/.tahoe') .RS .RE .SH COMMANDS .PP The \f[B]tahoe\f[] runner can be used for various tasks depending on the command used. .SS ADMINISTRATION .PP tahoe \f[I]COMMAND\f[] [\f[I]OPTION\f[]]... [\f[I]NODEDIR\f[]] .RS .SS COMMANDS .TP .B \f[B]create-node\f[] Create a node that acts as a client, server or both. .TP .B \f[B]create-client\f[] Create a client node (with storage initially disabled). .TP .B \f[B]create-introducer\f[] Create an introducer node. .SS OPTIONS .TP .B \f[B]-C,\ --basedir=\f[] Same as \[em]node-directory. .TP .B \f[B]-d,\ --node-directory=\f[] Specify which Tahoe node directory should be used. (default for \f[B]create-node\f[] and \f[B]create-client\f[]: `$HOME/.tahoe/'). .TP .B \f[B]-n,\ --nickname=\f[] Specify the nickname for this node (\f[B]create-node\f[] and \f[B]create-client\f[] only). .TP .B \f[B]-i,\ --introducer=\f[] Specify the introducer FURL to use (\f[B]create-node\f[] and \f[B]create-client\f[] only). .TP .B \f[B]-p,\ --webport=\f[] Specify which TCP port to run the HTTP interface on. Use `none' to disable. Default: `tcp:3456:interface=127.0.0.1' (\f[B]create-node\f[] and \f[B]create-client\f[] only). .TP .B \f[B]--no-storage\f[] Do not offer storage service to other nodes (\f[B]create-node\f[] only). .RE .SS CONTROLLING NODES .PP tahoe \f[I]COMMAND\f[] [\f[I]OPTION\f[]]... [\f[I]NODEDIR\f[]] .SS COMMANDS .TP .B \f[B]start\f[] Start a node (of any type). .RS .RE .TP .B \f[B]stop\f[] Stop a node. .RS .RE .TP .B \f[B]restart\f[] Restart a node. .RS .RE .TP .B \f[B]run\f[] Run a node synchronously. .RS .RE .SS OPTIONS .TP .B \f[B]-p,\ --profile\f[] Run under the Python profiler, putting results in `profiling_results.prof' (use with \f[B]start\f[] or \f[B]restart\f[] only). .RS .RE .TP .B \f[B]--syslog\f[] Tell the node to log to syslog, not a file (use with \f[B]start\f[] or \f[B]restart\f[] only). .RS .RE .TP .B \f[B]-C,\ --basedir=\f[] Same as \[em]node-directory. .RS .RE .TP .B \f[B]-d,\ --node-directory=\f[] Specify which Tahoe node directory should be used (default for commands other than \f[B]run\f[]: `$HOME/.tahoe/'). .RS .RE .TP .B \f[B]--help\f[] Display help and exit .RS .RE .SS USING THE FILE STORE .TP .B \f[B]mkdir\f[] Create a new directory. .RS .RE .TP .B \f[B]add-alias\f[] Add a new alias cap. .RS .RE .TP .B \f[B]create-alias\f[] Create a new alias cap. .RS .RE .TP .B \f[B]list-aliases\f[] List all alias caps. .RS .RE .TP .B \f[B]ls\f[] List a directory. .RS .RE .TP .B \f[B]get\f[] Retrieve a file from the grid. .RS .RE .TP .B \f[B]put\f[] Upload a file into the grid. .RS .RE .TP .B \f[B]cp\f[] Copy one or more files or directories. .RS .RE .TP .B \f[B]unlink\f[] Unlink a file or directory on the grid. .RS .RE .TP .B \f[B]rm\f[] Unlink a file or directory on the grid (same as \f[B]unlink\f[]). .RS .RE .TP .B \f[B]mv\f[] Move a file within the grid. .RS .RE .TP .B \f[B]ln\f[] Make an additional link to an existing file or directory. .RS .RE .TP .B \f[B]backup\f[] Make target dir look like local dir. .RS .RE .TP .B \f[B]webopen\f[] Open a web browser to a grid file or directory. .RS .RE .TP .B \f[B]manifest\f[] List all files/directories in a subtree. .RS .RE .TP .B \f[B]stats\f[] Print statistics about all files/directories in a subtree. .RS .RE .TP .B \f[B]check\f[] Check a single file or directory. .RS .RE .TP .B \f[B]deep-check\f[] Check all files/directories reachable from a starting point .RS .RE .SS OPTIONS .PP Please run `tahoe \f[I]COMMAND\f[] --help' for more details on each command. .SS DEBUGGING .PP tahoe debug \f[I]SUBCOMMAND\f[] [\f[I]OPTION\f[]]... [\f[I]PARAMETER\f[]]... .SS SUBCOMMANDS .TP .B \f[B]dump-share\f[] Unpack and display the contents of a share. .RS .RE .TP .B \f[B]dump-cap\f[] Unpack a read-cap or write-cap. .RS .RE .TP .B \f[B]find-shares\f[] Locate sharefiles in node directories. .RS .RE .TP .B \f[B]catalog-shares\f[] Describe all shares in node dirs. .RS .RE .TP .B \f[B]corrupt-share\f[] Corrupt a share by flipping a bit. .RS .RE .PP Please run e.g.\ `tahoe debug dump-share --help' for more details on each subcommand. .SH AUTHORS .PP Tahoe-LAFS has been written by Brian Warner, Zooko Wilcox-O'Hearn and dozens of others. This manpage was originally written by bertagaz. .SH REPORTING BUGS .PP Please see . .PP For known security issues see . .PP Tahoe-LAFS home page: .PP tahoe-dev mailing list: .SH COPYRIGHT .PP Copyright \@ 2006\[en]2013 The Tahoe-LAFS Software Foundation tahoe_lafs-1.20.0/docs/proposed/GridID.txt0000644000000000000000000002621513615410400015253 0ustar00= Grid Identifiers = What makes up a Tahoe "grid"? The rough answer is a fairly-stable set of Storage Servers. The read- and write- caps that point to files and directories are scoped to a particular set of servers. The Tahoe peer-selection and erasure-coding algorithms provide high availability as long as there is significant overlap between the servers that were used for upload and the servers that are available for subsequent download. When new peers are added, the shares will get spread out in the search space, so clients must work harder to download their files. When peers are removed, shares are lost, and file health is threatened. Repair bandwidth must be used to generate new shares, so cost increases with the rate of server departure. If servers leave the grid too quickly, repair may not be able to keep up, and files will be lost. So to get long-term stability, we need that peer set to remain fairly stable. A peer which joins the grid needs to stick around for a while. == Multiple Grids == The current Tahoe read-cap format doesn't admit the existence of multiple grids. In fact, the "URI:" prefix implies that these cap strings are universal: it suggests that this string (plus some protocol definition) is completely sufficient to recover the file. However, there are a variety of reasons why we may want to have more than one Tahoe grid in the world: * scaling: there are a variety of problems that are likely to be encountered as we attempt to grow a Tahoe grid from a few dozen servers to a few thousand, some of which are easier to deal with than others. Maintaining connections to servers and keeping up-to-date on the locations of servers is one issue. There are design improvements that can work around these, but they will take time, and we may not want to wait for that work to be done. Begin able to deploy multiple grids may be the best way to get a large number of clients using tahoe at once. * managing quality of storage, storage allocation: the members of a friendnet may want to restrict access to storage space to just each other, and may want to run their grid without involving any external coordination * commercial goals: a company using Tahoe may want to restrict access to storage space to just their customers * protocol upgrades, development: new and experimental versions of the tahoe software may need to be deployed and analyzed in isolation from the grid that clients are using for active storage So if we define a grid to be a set of storage servers, then two distinct grids will have two distinct sets of storage servers. Clients are free to use whichever grid they like (and have permission to use), however each time they upload a file, they must choose a specific grid to put it in. Clients can upload the same file to multiple grids in two separate upload operations. == Grid IDs in URIs == Each URI needs to be scoped to a specific grid, to avoid confusion ("I looked for URI123 and it said File Not Found.. oh, which grid did you upload that into?"). To accomplish this, the URI will contain a "grid identifier" that references a specific Tahoe grid. The grid ID is shorthand for a relatively stable set of storage servers. To make the URIs actually Universal, there must be a way to get from the grid ID to the actual grid. This document defines a protocol by which a client that wants to download a file from a previously-unknown grid will be able to locate and connect to that grid. == Grid ID specification == The Grid ID is a string, using a fairly limited character set, alphanumerics plus possibly a few others. It can be very short: a gridid of just "0" can be used. The gridID will be copied into the cap string for every file that is uploaded to that grid, so there is pressure to keep them short. The cap format needs to be able to distinguish the gridID from the rest of the cap. This could be expressed in DNS-style dot notation, for example the directory write-cap with a write-key of "0ZrD.." that lives on gridID "foo" could be expressed as "D0ZrDNAHuxs0XhYJNmkdicBUFxsgiHzMdm.foo" . * design goals: non-word-breaking, double-click-pasteable, maybe human-readable (do humans need to know which grid is being used? probably not). * does not need to be Secure (i.e. long and unguessable), but we must analyze the sorts of DoS attack that can result if it is not (and even if it is) * does not need to be human-memorable, although that may assist debugging and discussion ("my file is on grid 4, where is yours?) * *does* need to be unique, but the total number of grids is fairly small (counted in the hundreds or thousands rather than millions or billions) and we can afford to coordinate the use of short names. Folks who don't like coordination can pick a largeish random string. Each announcement that a Storage Server publishes (to introducers) will include its grid id. If a server participates in multiple grids, it will make multiple announcements, each with a single grid id. Clients will be able to ask an introducer for information about all storage servers that participate in a specific grid. Clients are likely to have a default grid id, to which they upload files. If a client is adding a file to a directory that lives in a different grid, they may upload the file to that other grid instead of their default. == Getting from a Grid ID to a grid == When a client decides to download a file, it starts by unpacking the cap and extracting the grid ID. Then it attempts to connect to at least one introducer for that grid, by leveraging DNS: hash $GRIDID id (with some tag) to get a long base32-encoded string: $HASH GET http://tahoe-$HASH.com/introducer/gridid/$GRIDID the results should be a JSON-encoded list of introducer FURLs for extra redundancy, if that query fails, perform the following additional queries: GET http://tahoe-$HASH.net/introducer/gridid/$GRIDID GET http://tahoe-$HASH.org/introducer/gridid/$GRIDID GET http://tahoe-$HASH.tv/introducer/gridid/$GRIDID GET http://tahoe-$HASH.info/introducer/gridid/$GRIDID etc. GET http://grids.tahoe-lafs.org/introducer/gridid/$GRIDID The first few introducers should be able to announce other introducers, via the distributed gossip-based introduction scheme of #68. Properties: * claiming a grid ID is cheap: a single domain name registration (in an uncontested namespace), and a simple web server. allmydata.com can publish introducer FURLs for grids that don't want to register their own domain. * lookup is at least as robust as DNS. By using benevolent public services like tahoe-grids.allmydata.com, reliability can be increased further. The HTTP fetch can return a list of every known server node, all of which can act as introducers. * not secure: anyone who can interfere with DNS lookups (or claims tahoe-$HASH.com before you do) can cause clients to connect to their servers instead of yours. This admits a moderate DoS attack against download availability. Performing multiple queries (to .net, .org, etc) and merging the results may mitigate this (you'll get their servers *and* your servers; the download search will be slower but is still likely to succeed). It may admit an upload DoS attack as well, or an upload file-reliability attack (trick you into uploading to unreliable servers) depending upon how the "server selection policy" (see below) is implemented. Once the client is connected to an introducer, it will see if there is a Helper who is willing to assist with the upload or download. (For download, this might reduce the number of connections that the grid's storage servers must deal with). If not, ask the introducers for storage servers, and connect to them directly. == Controlling Access == The introducers are not used to enforce access control. Instead, a system of public keys are used. There are a few kinds of access control that we might want to implement: * protect storage space: only let authorized clients upload/consume storage * protect download bandwidth: only give shares to authorized clients * protect share reliability: only upload shares to "good" servers The first two are implemented by the server, to protect their resources. The last is implemented by the client, to avoid uploading shares to unreliable servers (specifically, to maximize the utility of the client's limited upload bandwidth: there's no problem with putting shares on unreliable peers per se, but it is a problem if doing so means the client won't put a share on a more reliable peer). The first limitation (protect storage space) will be implemented by public keys and signed "storage authority" certificates. The client will present some credentials to the storage server to convince it that the client deserves the space. When storage servers are in this mode, they will have a certificate that names a public key, and any credentials that can demonstrate a path from that key will be accepted. This scheme is described in docs/proposed/old-accounts-pubkey.txt . The second limitation is unexplored. The read-cap does not currently contain any notion of who must pay for the bandwidth incurred. The third limitation (only upload to "good" servers), when enabled, is implemented by a "server selection policy" on the client side, which defines which server credentials will be accepted. This is just like the first limitation in reverse. Before clients consider including a server in their peer selection algorithm, they check the credentials, and ignore any that do not meet them. This means that a client may not wish to upload anything to "foreign grids", because they have no promise of reliability. The reasons that a client might want to upload to a foreign grid need to be examined: reliability may not be important, or it might be good enough to upload the file to the client's "home grid" instead. The server selection policy is intended to be fairly open-ended: we can imagine a policy that says "upload to any server that has a good reputation among group X", or more complicated schemes that require less and less centralized management. One important and simple scheme is to simply have a list of acceptable keys: a friendnet with 5 members would include 5 such keys in each policy, enabling every member to use the services of the others, without having a single central manager with unilateral control over the definition of the group. == Closed Grids == To implement these access controls, each client needs to be configured with three things: * home grid ID (used to find introducers, helpers, storage servers) * storage authority (certificate to enable uploads) * server selection policy (identify good/reliable servers) If the server selection policy indicates centralized control (i.e. there is some single key X which is used to sign the credentials for all "good" servers), then this could be built in to the grid ID. By using the base32 hash of the pubkey as the grid ID, clients would only need to be configured with two things: the grid ID, and their storage authority. In this case, the introducer would provide the pubkey, and the client would compare the hashes to make sure they match. This is analogous to how a TubID is used in a FURL. Such grids would have significantly larger grid IDs, 24 characters or more. tahoe_lafs-1.20.0/docs/proposed/README.lossmodel0000644000000000000000000000222413615410400016262 0ustar00The lossmodel.lyx file is the source document for an in-progress paper that analyzes the probability of losing files stored in a Tahoe-LAFS file store under various scenarios. It describes: 1. How to estimate peer reliabilities, based on peer MTBF failure data. 2. How to compute file loss probabilities, based on a given set of shares stored on peers with estimated reliabilities. The peer reliabilities do not have to be uniform, and the model takes into account the file repair process. 3. How to estimate Tahoe parameters for k (shares needed), n (shares distributed) and A (repair interval) to achieve a file reliability target. 4. How to compute the estimated repair cost over time, discounted at a fixed rate, of maintaining a file for a time period T. Future work will also address the latter three issues in the context of "non-aggressive" repair, where repair will only be performed if too many shares are lost, and it will also extend the repair cost estimation model to suggest cost functions appropriate for common network architectures. A PDF of the current version of the file may be downloaded from: http://willden.org/~shawn/lossmodel.pdftahoe_lafs-1.20.0/docs/proposed/accounting-overview.txt0000644000000000000000000010772113615410400020151 0ustar00 = Accounting = "Accounting" is the arena of the Tahoe system that concerns measuring, controlling, and enabling the ability to upload and download files, and to create new directories. In contrast with the capability-based access control model, which dictates how specific files and directories may or may not be manipulated, Accounting is concerned with resource consumption: how much disk space a given person/account/entity can use. Tahoe releases up to and including 1.4.1 have a nearly-unbounded resource usage model. Anybody who can talk to the Introducer gets to talk to all the Storage Servers, and anyone who can talk to a Storage Server gets to use as much disk space as they want (up to the reserved_space= limit imposed by the server, which affects all users equally). Not only is the per-user space usage unlimited, it is also unmeasured: the owner of the Storage Server has no way to find out how much space Alice or Bob is using. The goals of the Accounting system are thus: * allow the owner of a storage server to control who gets to use disk space, with separate limits per user * allow both the server owner and the user to measure how much space the user is consuming, in an efficient manner * provide grid-wide aggregation tools, so a set of cooperating server operators can easily measure how much a given user is consuming across all servers. This information should also be available to the user in question. For the purposes of this document, the terms "Account" and "User" are mostly interchangeable. The fundamental unit of Accounting is the "Account", in that usage and quota enforcement is performed separately for each account. These accounts might correspond to individual human users, or they might be shared among a group, or a user might have an arbitrary number of accounts. Accounting interacts with Garbage Collection. To protect their shares from GC, clients maintain limited-duration leases on those shares: when the last lease expires, the share is deleted. Each lease has a "label", which indicates the account or user which wants to keep the share alive. A given account's "usage" (their per-server aggregate usage) is simply the sum of the sizes of all shares on which they hold a lease. The storage server may limit the user to a fixed "quota" (an upper bound on their usage). To keep a file alive, the user must be willing to use up some of their quota. Note that a popular file might have leases from multiple users, in which case one user might take a chance and decline to add their own lease, saving some of their quota and hoping that the other leases continue to keep the file alive despite their personal unwillingness to contribute to the effort. One could imagine a "pro-rated quotas" scheme, in which a 10MB file with 5 leaseholders would deduct 2MB from each leaseholder's quota. We have decided to not implement pro-rated quotas, because such a scheme would make usage values hard to predict: a given account might suddenly go over quota solely because of a third party's actions. == Accounting Implementation == The implementation of these accounting features are tracked in this ticket: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/666 == Authority Flow == The authority to consume space on the storage server originates, of course, with the storage server operator. These operators start with complete control over their space, and delegate portions of it to others: either directly to clients who want to upload files, or to intermediaries who can then delegate attenuated authority onwards. The operators have various reasons for wanting to share their space: monetary consideration, expectations of in-kind exchange, or simple generosity. But the final authority always rests with the operator. The server operator grants limited authority over their space by configuring their server to accept requests that demonstrate knowledge of certain secrets. They then share those secrets with the client who intends to use this space, or with an intermediary who will generate still more secrets and share those with the client. Eventually, an upload or create-directory operation will be performed that needs this authority. Part of the operation will involve proving knowledge of the secret to the storage server, and the server will require this proof before accepting the uploaded share or adding a new lease. The authority is expressed as a string, containing cryptographically-signed messages and keys. The string also contains "restrictions", which are annotations that explain the limits imposed upon this authority, either by the original grantor (the storage server operator) or by one of the intermediaries. Authority can be reduced but not increased. Any holder of a given authority can delegate some or all of it to another party. The authority string may be short enough to include as an argument to a CLI command (--with-authority ABCDE), or it may be long enough that it must be stashed in a file and referenced in some other fashion (--with-authority-file ~/.my_authority). There are CLI tools to create brand new authority strings, to derive attenuated authorities from an existing one, and to explain the contents of an authority string. These authority strings can be shared with others just like filecaps and dircaps: knowledge of the authority string is both necessary and complete to wield the authority it represents. Web-API requests will include the authority necessary to complete the operation. When used by a CLI tool, the authority is likely to come from ~/.tahoe/private/authority (i.e. it is ambient to the user who has access to that node, just like aliases provide similar access to a specific "root directory"). When used by the browser-oriented WUI, the authority will [TODO] somehow be retained on each page in a way that minimizes the risk of CSRF attacks and allows safe sharing (cut-and-paste of a URL without sharing the storage authority too). The client node receiving the web-API request will extract the authority string from the request and use it to build the storage server messages that it sends to fulfill that request. == Definition Of Authority == The term "authority" is used here in the object-capability sense: it refers to the ability of some principal to cause some action to occur, whether because they can do it themselves, or because they can convince some other principal to do it for them. In Tahoe terms, "storage authority" is the ability to do one of the following actions: * upload a new share, thus consuming storage space * adding a new lease to a share, thus preventing space from being reclaimed * modify an existing mutable share, potentially increasing the space consumed The Accounting effort may involve other kinds of authority that get limited in a similar manner as storage authority, like the ability to download a share or query whether a given share is present: anything that may consume CPU time, disk bandwidth, or other limited resources. The authority to renew or cancel a lease may be controlled in a similar fashion. Storage authority, as granted from a server operator to a client, is not simply a binary "use space or not" grant. Instead, it is parameterized by a number of "restrictions". The most important of these restrictions (with respect to the goals of Accounting) is the "Account Label". === Account Labels === A Tahoe "Account" is defined by a variable-length sequence of small integers. (they are not required to be small, the actual limit is 2**64, but neither are they required to be unguessable). For the purposes of discussion, these lists will be expressed as period-joined strings: the two-element list (1,4) will be displayed here as "1.4". These accounts are arranged in a hierarchy: the account identifier 1.4 is considered to be a "parent" of 1.4.2 . There is no relationship between the values used by unrelated accounts: 1.4 is unrelated to 2.4, despite both coincidentally using a "4" in the second element. Each lease has a label, which contains the Account identifier. The storage server maintains an aggregate size count for each label prefix: when asked about account 1.4, it will report the amount of space used by shares labeled 1.4, 1.4.2, 1.4.7, 1.4.7.8, etc (but *not* 1 or 1.5). The "Account Label" restriction allows a client to apply any label it wants, as long as that label begins with a specific prefix. If account 1 is associated with Alice, then Alice will receive a storage authority string that contains a "must start with 1" restriction, enabling her to to use storage space but obligating her to lease her shares with a label that can be traced back to her. She can delegate part of her authority to others (perhaps with other non-label restrictions, such as a space restriction or time limit) with or without an additional label restriction. For example, she might delegate some of her authority to her friend Amy, with a 1.4 label restriction. Amy could then create labels with 1.4 or 1.4.7, but she could not create labels with the same 1 identifier that Alice can do, nor could she create labels with 1.5 (which Alice might have given to her other friend Annette). The storage server operator can ask about the usage of 1 to find out how much Alice is responsible for (which includes the space that she has delegated to Amy and Annette), and none of the A-users can avoid being counted in this total. But Alice can ask the storage server about the usage of 1.4 to find out how much Amy has taken advantage of her gift. Likewise, Alice has control over any lease with a label that begins with 1, so she can cancel Amy's leases and free the space they were consuming. If this seems surprising, consider that the storage server operator considered Alice to be responsible for that space anyways: with great responsibility (for space consumed) comes great power (to stop consuming that space). === Server Space Restriction === The storage server's basic control over how space usage (apart from the binary use-it-or-not authority granted by handing out an authority string at all) is implemented by keeping track of the space used by any given account identifier. If account 1.4 sends a request to allocate a 1MB share, but that 1MB would bring the 1.4 usage over its quota, the request will be denied. For this to be useful, the storage server must give each usage-limited principal a separate account, and it needs to configure a size limit at the same time as the authority string is minted. For a friendnet, the CLI "add account" tool can do both at once: tahoe server add-account --quota 5GB Alice --> Please give the following authority string to "Alice", who should provide it to the "tahoe add-authority" command (authority string..) This command will allocate an account identifier, add Alice to the "pet name table" to associate it with the new account, and establish the 5GB sizelimit. Both the sizelimit and the petname can be changed later. Note that this restriction is independent for each server: some additional mechanism must be used to provide a grid-wide restriction. Also note that this restriction is not expressed in the authority string. It is purely local to the storage server. === Attenuated Server Space Restriction === TODO (or not) The server-side space restriction described above can only be applied by the storage server, and cannot be attenuated by other delegates. Alice might be allowed to use 5GB on this server, but she cannot use that restriction to delegate, say, just 1GB to Amy. Instead, Alice's sub-delegation should include a "server_size" restriction key, which contains a size limit. The storage server will only honor a request that uses this authority string if it does not cause the aggregate usage of this authority string's account prefix to rise above the given size limit. Note that this will not enforce the desired restriction if the size limits are not consistent across multiple delegated authorities for the same label. For example, if Amy ends up with two delagations, A1 (which gives her a size limit of 1GB) and A2 (which gives her 5GB), then she can consume 5GB despite the limit in A1. === Other Restrictions === Many storage authority restrictions are meant for internal use by tahoe tools as they delegate short-lived subauthorities to each other, and are not likely to be set by end users. * "SI": a storage index string. The authority can only be used to upload shares of a single file. * "serverid": a server identifier. The authority can only be used when talking to a specific server * "UEB_hash": a binary hash. The authority can only be used to upload shares of a single file, identified by its share's contents. (note: this restricton would require the server to parse the share and validate the hash) * "before": a timestamp. The authority is only valid until a specific time. Requires synchronized clocks or a better definition of "timestamp". * "delegate_to_furl": a string, used to acquire a FURL for an object that contains the attenuated authority. When it comes time to actually use the authority string to do something, this is the first step. * "delegate_to_key": an ECDSA pubkey, used to grant attenuated authority to a separate private key. == User Experience == The process starts with Bob the storage server operator, who has just created a new Storage Server: tahoe create-node --> creates ~/.tahoe # edit ~/.tahoe/tahoe.cfg, add introducer.furl, configure storage, etc Now Bob decides that he wants to let his friend Alice use 5GB of space on his new server. tahoe server add-account --quota=5GB Alice --> Please give the following authority string to "Alice", who should provide it to the "tahoe add-authority" command (authority string XYZ..) Bob copies the new authority string into an email message and sends it to Alice. Meanwhile, Alice has created her own client, and attached it to the same Introducer as Bob. When she gets the email, she pastes the authority string into her local client: tahoe client add-authority (authority string XYZ..) --> new authority added: account (1) Now all CLI commands that Alice runs with her node will take advantage of Bob's space grant. Once Alice's node connects to Bob's, any upload which needs to send a share to Bob's server will search her list of authorities to find one that allows her to use Bob's server. When Alice uses her WUI, upload will be disabled until and unless she pastes one or more authority strings into a special "storage authority" box. TODO: Once pasted, we'll use some trick to keep the authority around in a convenient-yet-safe fashion. When Alice uses her javascript-based web drive, the javascript program will be launched with some trick to hand it the storage authorities, perhaps via a fragment identifier (http://server/path#fragment). If Alice decides that she wants Amy to have some space, she takes the authority string that Bob gave her and uses it to create one for Amy: tahoe authority dump (authority string XYZ..) --> explanation of what is in XYZ tahoe authority delegate --account 4,1 --space 2GB (authority string XYZ..) --> (new authority string ABC..) Alice sends the ABC string to Amy, who uses "tahoe client add-authority" to start using it. Later, Bob would like to find out how much space Alice is using. He brings up his node's Storage Server Web Status page. In addition to the overall usage numbers, the page will have a collapsible-treeview table with lines like: AccountID Usage TotalUsage Petname (1) 1.5GB 2.5GB Alice +(1,4) 1.0GB 1.0GB ? This indicates that Alice, as a whole, is using 2.5GB. It also indicates that Alice has delegated some space to a (1,4) account, and that delegation has used 1.0GB. Alice has used 1.5GB on her own, but is responsible for the full 2.5GB. If Alice tells Bob that the subaccount is for Amy, then Bob can assign a pet name for (1,4) with "tahoe server add-pet-name 1,4 Amy". Note that Bob is not aware of the 2GB limit that Alice has imposed upon Amy: the size restriction may have appeared on all the requests that have showed up thus far, but Bob has no way of being sure that a less-restrictive delgation hasn't been created, so his UI does not attempt to remember or present the restrictions it has seen before. === Friendnet === A "friendnet" is a set of nodes, each of which is both a storage server and a client, each operated by a separate person, all of which have granted storage rights to the others. The simplest way to get a friendnet started is to simply grant storage authority to everybody. "tahoe server enable-ambient-storage-authority" will configure the storage server to give space to anyone who asks. This behaves just like a 1.3.0 server, without accounting of any sort. The next step is to restrict server use to just the participants. "tahoe server disable-ambient-storage-authority" will undo the previous step, then there are two basic approaches: * "full mesh": each node grants authority directory to all the others. First, agree upon a userid number for each participant (the value doesn't matter, as long as it is unique). Each user should then use "tahoe server add-account" for all the accounts (including themselves, if they want some of their shares to land on their own machine), including a quota if they wish to restrict individuals: tahoe server add-account --account 1 --quota 5GB Alice --> authority string for Alice tahoe server add-account --account 2 --quota 5GB Bob --> authority string for Bob tahoe server add-account --account 3 --quota 5GB Carol --> authority string for Carol Then email Alice's string to Alice, Bob's string to Bob, etc. Once all users have used "tahoe client add-authority" on everything, each server will accept N distinct authorities, and each client will hold N distinct authorities. * "account manager": the group designates somebody to be the "AM", or "account manager". The AM generates a keypair and publishes the public key to all the participants, who create a local authority which delgates full storage rights to the corresponding private key. The AM then delegates account-restricted authority to each user, sending them their personal authority string: AM: tahoe authority create-authority --write-private-to=private.txt --> public.txt # email public.txt to all members AM: tahoe authority delegate --from-file=private.txt --account 1 --quota 5GB --> alice_authority.txt # email this to Alice tahoe authority delegate --from-file=private.txt --account 2 --quota 5GB --> bob_authority.txt # email this to Bob tahoe authority delegate --from-file=private.txt --account 3 --quota 5GB --> carol_authority.txt # email this to Carol ... Alice: # receives alice_authority.txt tahoe client add-authority --from-file=alice_authority.txt # receives public.txt tahoe server add-authorization --from-file=public.txt Bob: # receives bob_authority.txt tahoe client add-authority --from-file=bob_authority.txt # receives public.txt tahoe server add-authorization --from-file=public.txt Carol: # receives carol_authority.txt tahoe client add-authority --from-file=carol_authority.txt # receives public.txt tahoe server add-authorization --from-file=public.txt If the members want to see names next to their local usage totals, they can set local petnames for the accounts: tahoe server set-petname 1 Alice tahoe server set-petname 2 Bob tahoe server set-petname 3 Carol Alternatively, the AM could provide a usage aggregator, which will collect usage values from all the storage servers and show the totals in a single place, and add the petnames to that display instead. The AM gets more authority than anyone else (they can spoof everybody), but each server has just a single authorization instead of N, and each client has a single authority instead of N. When a new member joins the group, the amount of work that must be done is significantly less, and only two parties are involved instead of all N: AM: tahoe authority delegate --from-file=private.txt --account 4 --quota 5GB --> dave_authority.txt # email this to Dave Dave: # receives dave_authority.txt tahoe client add-authority --from-file=dave_authority.txt # receives public.txt tahoe server add-authorization --from-file=public.txt Another approach is to let everybody be the AM: instead of keeping the private.txt file secret, give it to all members of the group (but not to outsiders). This lets current members bring new members into the group without depending upon anybody else doing work. It also renders any notion of enforced quotas meaningless, so it is only appropriate for actual friends who are voluntarily refraining from spoofing each other. === Commercial Grid === A "commercial grid", like the one that allmydata.com manages as a for-profit service, is characterized by a large number of independent clients (who do not know each other), and by all of the storage servers being managed by a single entity. In this case, we use an Account Manager like above, to collapse the potential N*M explosion of authorities into something smaller. We also create a dummy "parent" account, and give all the real clients subaccounts under it, to give the operations personnel a convenient "total space used" number. Each time a new customer joins, the AM is directed to create a new authority for them, and the resulting string is provided to the customer's client node. AM: tahoe authority create-authority --account 1 \ --write-private-to=AM-private.txt --write-public-to=AM-public.txt Each time a new storage server is brought up: SERVER: tahoe server add-authorization --from-file=AM-public.txt Each time a new client joins: AM: N = next_account++ tahoe authority delegate --from-file=AM-private.txt --account 1,N --> new_client_authority.txt # give this to new client == Programmatic Interfaces == The storage authority can be passed as a string in a single serialized form, which is cut-and-pasteable and printable. It uses minimal punctuation, to make it possible to include it as a URL query argument or HTTP header field without requiring character-escaping. Before passing it over HTTP, however, note that revealing the authority string to someone is equivalent to irrevocably delegating all that authority to them. While this is appropriate when transferring authority from, say, a receptive storage server to your local agent, it is not appropriate when using a foreign tahoe node, or when asking a Helper to upload a specific file. Attenuations (see below) should be used to limit the delegated authority in these cases. In the programmatic web-API, any operation that consumes storage will accept a storage-authority= query argument, the value of which will be the printable form of an authority string. This includes all PUT operations, POST t=upload and t=mkdir, and anything which creates a new file, creates a directory (perhaps an intermediate one), or modifies a mutable file. Alternatively, the authority string can also be passed through an HTTP header. A single "X-Tahoe-Storage-Authority:" header can be used with the printable authority string. If the string is too large to fit in a single header, the application can provide a series of numbered "X-Tahoe-Storage-Authority-1:", "X-Tahoe-Storage-Authority-2:", etc, headers, and these will be sorted in alphabetical order (please use 08/09/10/11 rather than 8/9/10/11), stripped of leading and trailing whitespace, and concatenated. The HTTP header form can accomodate larger authority strings, since these strings can grow too large to pass as a query argument (especially when several delegations or attenuations are involved). However, depending upon the HTTP client library being used, passing extra HTTP headers may be more complicated than simply modifying the URL, and may be impossible in some cases (such as javascript running in a web browser). TODO: we may add a stored-token form of authority-passing to handle environments in which query-args won't work and headers are not available. This approach would use a special PUT which takes the authority string as the HTTP body, and remembers it on the server side in associated with a brief-but-unguessable token. Later operations would then use the authority by passing a --storage-authority-token=XYZ query argument. These authorities would expire after some period. == Quota Management, Aggregation, Reporting == The storage server will maintain enough information to efficiently compute usage totals for each account referenced in all of their leases, as well as all their parent accounts. This information is used for several purposes: * enforce server-space restrictions, by selectively rejecting storage requests which would cause the account-usage-total to rise above the limit specified in the enabling authorization string * report individual account usage to the account-holder (if a client can consume space under account A, they are also allowed to query usage for account A or a subaccount). * report individual account usage to the storage-server operator, possibly associated with a pet name * report usage for all accounts to the storage-server operator, possibly associated with a pet name, in the form of a large table * report usage for all accounts to an external aggregator The external aggregator would take usage information from all the storage servers in a single grid and sum them together, providing a grid-wide usage number for each account. This could be used by e.g. clients in a commercial grid to report overall-space-used to the end user. There will be web-API URLs available for all of these reports. TODO: storage servers might also have a mechanism to apply space-usage limits to specific account ids directly, rather than requiring that these be expressed only through authority-string limitation fields. This would let a storage server operator revoke their space-allocation after delivering the authority string. == Low-Level Formats == This section describes the low-level formats used by the Accounting process, beginning with the storage-authority data structure and working upwards. This section is organized to follow the storage authority, starting from the point of grant. The discussion will thus begin at the storage server (where the authority is first created), work back to the client (which receives the authority as a web-API argument), then follow the authority back to the servers as it is used to enable specific storage operations. It will then detail the accounting tables that the storage server is obligated to maintain, and describe the interfaces through which these tables are accessed by other parties. === Storage Authority === ==== Terminology ==== Storage Authority is represented as a chain of certificates and a private key. Each certificate authorizes and restricts a specific private key. The initial certificate in the chain derives its authority by being placed in the storage server's tahoe.cfg file (i.e. by being authorized by the storage server operator). All subsequent certificates are signed by the authorized private key that was identified in the previous certificate: they derive their authority by delegation. Each certificate has restrictions which limit the authority being delegated. authority: ([cert[0], cert[1], cert[2] ...], privatekey) The "restrictions dictionary" is a table which establishes an upper bound on how this authority (or any attenuations thereof) may be used. It is effectively a set of key-value pairs. A "signing key" is an EC-DSA192 private key string and is 12 bytes long. A "verifying key" is an EC-DSA192 public key string, and is 24 bytes long. A "key identifier" is a string which securely identifies a specific signing/verifying keypair: for long RSA keys it would be a secure hash of the public key, but since ECDSA192 keys are so short, we simply use the full verifying key verbatim. A "key hint" is a variable-length prefix of the key identifier, perhaps zero bytes long, used to help a recipient reduce the number of verifying keys that it must search to find one that matches a signed message. ==== Authority Chains ==== The authority chain consists of a list of certificates, each of which has a serialized restrictions dictionary. Each dictionary will have a "delegate-to-key" field, which delegates authority to a private key, referenced with a key identifier. In addition, the non-initial certs are signed, so they each contain a signature and a key hint: cert[0]: serialized(restrictions_dictionary) cert[1]: serialized(restrictions_dictionary), signature, keyhint cert[2]: serialized(restrictions_dictionary), signature, keyhint In this example, suppose cert[0] contains a delegate-to-key field that identifies a keypair sign_A/verify_A. In this case, cert[1] will have a signature that was made with sign_A, and the keyhint in cert[1] will reference verify_A. cert[0].restrictions[delegate-to-key] = A_keyid cert[1].signature = SIGN(sign_A, serialized(cert[0].restrictions)) cert[1].keyhint = verify_A cert[1].restrictions[delegate-to-key] = B_keyid cert[2].signature = SIGN(sign_B, serialized(cert[1].restrictions)) cert[2].keyhint = verify_B cert[2].restrictions[delete-to-key] = C_keyid In this example, the full storage authority consists of the cert[0,1,2] chain and the sign_C private key: anyone who is in possession of both will be able to exert this authority. To wield the authority, a client will present the cert[0,1,2] chain and an action message signed by sign_C; the server will validate the chain and the signature before performing the requested action. The only circumstances that might prompt the client to share the sign_C private key with another party (including the server) would be if it wanted to irrevocably share its full authority with that party. ==== Restriction Dictionaries ==== Within a restriction dictionary, the following keys are defined. Their full meanings are defined later. 'accountid': an arbitrary-length sequence of integers >=0, restricting the accounts which can be manipulated or used in leases 'SI': a storage index (binary string), controlling which file may be manipulated 'serverid': binary string, limiting which server will accept requests 'UEB-hash': binary string, limiting the content of the file being manipulated 'before': timestamp (seconds since epoch), limits the lifetime of this authority 'server-size': integer >0, maximum aggregate storage (in bytes) per account 'delegate-to-key': binary string (DSA pubkey identifier) 'furl-to': printable FURL string ==== Authority Serialization ==== There is only one form of serialization: a somewhat-compact URL-safe cut-and-pasteable printable form. We are interested in minimizing the size of the resulting authority, so rather than using a general-purpose (perhaps JSON-based) serialization scheme, we use one that is specialized for this task. This URL-safe form will use minimal punctuation to avoid quoting issues when used in a URL query argument. It would be nice to avoid word-breaking characters that make cut-and-paste troublesome, however this is more difficult because most non-alphanumeric characters are word-breaking in at least one application. The serialized storage authority as a whole contains a single version identifier and magic number at the beginning. None of the internal components contain redundant version numbers: they are implied by the container. If components are serialized independently for other reasons, they may contain version identifers in that form. Signing keys (i.e. private keys) are URL-safe-serialized using Zooko's base62 alphabet, which offers almost the same density as standard base64 but without any non-URL-safe or word-breaking characters. Since we used fixed-format keys (EC-DSA, 192bit, with SHA256), the private keys are fixed-length (96 bits or 12 bytes), so there is no length indicator: all URL-safe-serialized signing keys are 17 base62 characters long. The 192-bit verifying keys (i.e. public keys) use the same approach: the URL-safe form is 33 characters long. An account-id sequence (a variable-length sequence of non-negative numbers) is serialized by representing each number in decimal ASCII, then joining the pieces with commas. The string is terminated by the first non-[0-9,] character encountered, which will either be the key-identifier letter of the next field, or the dictionary-terminating character at the end. Any single integral decimal number (such as the "before" timestamp field, or the "server-size" field) is serialized as a variable-length sequence of ASCII decimal digits, terminated by any non-digit. The restrictions dictionary is serialized as a concatenated series of key-identifier-letter / value string pairs, ending with the marker "E.". The URL-safe form uses a single printable letter to indicate the which key is being serialized. Each type of value string is serialized differently: "A": accountid: variable-length sequence of comma-joned numbers "I": storage index: fixed-length 26-character *base32*-encoded storage index "P": server id (peer id): fixed-length 32-character *base32* encoded serverid (matching the printable Tub.tubID string that Foolscap provides) "U": UEB hash: fixed-length 43-character base62 encoded UEB hash "B": before: variable-length sequence of decimal digits, seconds-since-epoch. "S": server-size: variable-length sequence of decimal digits, max size in bytes "D": delegate-to-key: ECDSA public key, 33 base62 characters. "F": furl-to: variable-length FURL string, wrapped in a netstring: "%d:%s," % (len(FURL), FURL). Note that this is rarely pasted. "E.": end-of-dictionary marker The ECDSA signature is serialized as a variable number of base62 characters, terminated by a period. We expect the signature to be about 384 bits (48 bytes) long, or 65 base62 characters. A missing signature (such as for the initial cert) is represented as a single period. The key hint is serialized with a base62-encoded serialized hint string (a byte-quantized prefix of the serialized public key), terminated by a period. An empty hint would thus be serialized as a single period. For the current design, we expect the key hint to be empty. The full storage authority string consists of a certificate chain and a delegate private key. Given the single-certificate serialization scheme described above, the full authority is serialized as follows: * version prefix: depends upon the application, but for storage-authority chains this will be "sa0-", for Storage-Authority Version 0. * serialized certificates, concatenated together * serialized private key (to which the last certificate delegates authority) Note that this serialization form does not have an explicit terminator, so the environment must provide a length indicator or some other way to identify the end of the authority string. The benefit of this approach is that the full string will begin and end with alphanumeric characters, making cut-and-paste easier (increasing the size of the mouse target: anywhere within the final component will work). Also note that the period is a reserved delimiter: it cannot appear in the serialized restrictions dictionary. The parser can remove the version prefix, split the rest on periods, and expect to see 3*k+1 fields, consisting of k (restriction-dictionary,signature,keyhint) 3-tuples and a single private key at the end. Some examples: (example A) cert[0] delegates account 1,4 to (pubkey ZlFA / privkey 1f2S): sa0-A1,4D2lFA6LboL2xx0ldQH2K1TdSrwuqMMiME3E...1f2SI9UJPXvb7vdJ1 (example B) cert[0] delegates account 1,4 to ZlFA/1f2S cert[1] subdelegates 5GB and subaccount 1,4,7 to pubkey 0BPo/06rt: sa0-A1,4D2lFA6LboL2xx0ldQH2K1TdSrwuqMMiME3E...A1,4,7S5000000000D0BPoGxJ3M4KWrmdpLnknhJABrWip5e9kPE,7cyhQvv5axdeihmOzIHjs85TcUIYiWHdsxNz50GTerEOR5ucj2TITPXxyaCUli1oF...06rtcPQotR3q4f2cT == Problems == Problems which have thus far been identified with this approach: * allowing arbitrary subaccount generation will permit a DoS attack, in which an authorized uploader consumes lots of DB space by creating an unbounded number of randomly-generated subaccount identifiers. OTOH, they can already attach an unbounded number of leases to any file they like, consuming a lot of space. tahoe_lafs-1.20.0/docs/proposed/denver.txt0000644000000000000000000002435713615410400015441 0ustar00The "Denver Airport" Protocol (discussed whilst returning robk to DEN, 12/1/06) This is a scaling improvement on the "Select Peers" phase of Tahoe2. The problem it tries to address is the storage and maintenance of the 1M-long peer list, and the relative difficulty of gathering long-term reliability information on a useful numbers of those peers. In DEN, each node maintains a Chord-style set of connections to other nodes: log2(N) "finger" connections to distant peers (the first of which is halfway across the ring, the second is 1/4 across, then 1/8th, etc). These connections need to be kept alive with relatively short timeouts (5s?), so any breaks can be rejoined quickly. In addition to the finger connections, each node must also remain aware of K "successor" nodes (those which are immediately clockwise of the starting point). The node is not required to maintain connections to these, but it should remain informed about their contact information, so that it can create connections when necessary. We probably need a connection open to the immediate successor at all times. Since inbound connections exist too, each node has something like 2*log2(N) plus up to 2*K connections. Each node keeps history of uptime/availability of the nodes that it remains connected to. Each message that is sent to these peers includes an estimate of that peer's availability from the point of view of the outside world. The receiving node will average these reports together to determine what kind of reliability they should announce to anyone they accept leases for. This reliability is expressed as a percentage uptime: P=1.0 means the peer is available 24/7, P=0.0 means it is almost never reachable. When a node wishes to publish a file, it creates a list of (verifierid, sharenum) tuples, and computes a hash of each tuple. These hashes then represent starting points for the landlord search: starting_points = [(sharenum,sha(verifierid + str(sharenum))) for sharenum in range(256)] The node then constructs a reservation message that contains enough information for the potential landlord to evaluate the lease, *and* to make a connection back to the starting node: message = [verifierid, sharesize, requestor_furl, starting_points] The node looks through its list of finger connections and splits this message into up to log2(N) smaller messages, each of which contains only the starting points that should be sent to that finger connection. Specifically we sent a starting_point to a finger A if the nodeid of that finger is <= the starting_point and if the next finger B is > starting_point. Each message sent out can contain multiple starting_points, each for a different share. When a finger node receives this message, it performs the same splitting algorithm, sending each starting_point to other fingers. Eventually a starting_point is received by a node that knows that the starting_point lies between itself and its immediate successor. At this point the message switches from the "hop" mode (following fingers) to the "search" mode (following successors). While in "search" mode, each node interprets the message as a lease request. It checks its storage pool to see if it can accomodate the reservation. If so, it uses requestor_furl to contact the originator and announces its willingness to host the given sharenum. This message will include the reliability measurement derived from the host's counterclockwise neighbors. If the recipient cannot host the share, it forwards the request on to the next successor, which repeats the cycle. Each message has a maximum hop count which limits the number of peers which may be searched before giving up. If a node sees itself to be the last such hop, it must establish a connection to the originator and let them know that this sharenum could not be hosted. The originator sends out something like 100 or 200 starting points, and expects to get back responses (positive or negative) in a reasonable amount of time. (perhaps if we receive half of the responses in time T, wait for a total of 2T for the remaining ones). If no response is received with the timeout, either re-send the requests for those shares (to different fingers) or send requests for completely different shares. Each share represents some fraction of a point "S", such that the points for enough shares to reconstruct the whole file total to 1.0 points. I.e., if we construct 100 shares such that we need 25 of them to reconstruct the file, then each share represents .04 points. As the positive responses come in, we accumulate two counters: the capacity counter (which gets a full S points for each positive response), and the reliability counter (which gets S*(reliability-of-host) points). The capacity counter is not allowed to go above some limit (like 4x), as determined by provisioning. The node keeps adding leases until the reliability counter has gone above some other threshold (larger but close to 1.0). [ at download time, each host will be able to provide the share back with probability P times an exponential decay factor related to peer death. Sum these probabilities to get the average number of shares that will be available. The interesting thing is actually the distribution of these probabilities, and what threshold you have to pick to get a sufficiently high chance of recovering the file. If there are N identical peers with probability P, the number of recovered shares will have a gaussian distribution with an average of N*P and a stddev of (??). The PMF of this function is an S-curve, with a sharper slope when N is large. The probability of recovering the file is the value of this S curve at the threshold value (the number of necessary shares). P is not actually constant across all peers, rather we assume that it has its own distribution: maybe gaussian, more likely exponential (power law). This changes the shape of the S-curve. Assuming that we can characterize the distribution of P with perhaps two parameters (say meanP and stddevP), the S-curve is a function of meanP, stddevP, N, and threshold... To get 99.99% or 99.999% recoverability, we must choose a threshold value high enough to accomodate the random variations and uncertainty about the real values of P for each of the hosts we've selected. By counting reliability points, we are trying to estimate meanP/stddevP, so we know which S-curve to look at. The threshold is fixed at 1.0, since that's what erasure coding tells us we need to recover the file. The job is then to add hosts (increasing N and possibly changing meanP/stddevP) until our recoverability probability is as high as we want. ] The originator takes all acceptance messages and adds them in order to the list of landlords that will be used to host the file. It stops when it gets enough reliability points. Note that it does *not* discriminate against unreliable hosts: they are less likely to have been found in the first place, so we don't need to discriminate against them a second time. We do, however, use the reliability points to acknowledge that sending data to an unreliable peer is not as useful as sending it to a reliable one (there is still value in doing so, though). The remaining reservation-acceptance messages are cancelled and then put aside: if we need to make a second pass, we ask those peers first. Shares are then created and published as in Tahoe2. If we lose a connection during the encoding, that share is lost. If we lose enough shares, we might want to generate more to make up for them: this is done by using the leftover acceptance messages first, then triggering a new Chord search for the as-yet-unaccepted sharenums. These new peers will get shares from all segments that have not yet been finished, then a second pass will be made to catch them up on the earlier segments. Properties of this approach: the total number of peers that each node must know anything about is bounded to something like 2*log2(N) + K, probably on the order of 50 to 100 total. This is the biggest advantage, since in tahoe2 each node must know at least the nodeid of all 1M peers. The maintenance traffic should be much less as a result. each node must maintain open (keep-alived) connections to something like 2*log2(N) peers. In tahoe2, this number is 0 (well, probably 1 for the introducer). during upload, each node must actively use 100 connections to a random set of peers to push data (just like tahoe2). The probability that any given share-request gets a response is equal to the number of hops it travels through times the chance that a peer dies while holding on to the message. This should be pretty small, as the message should only be held by a peer for a few seconds (more if their network is busy). In tahoe2, each share-request always gets a response, since they are made directly to the target. I visualize the peer-lookup process as the originator creating a message-in-a-bottle for each share. Each message says "Dear Sir/Madam, I would like to store X bytes of data for file Y (share #Z) on a system close to (but not below) nodeid STARTING_POINT. If you find this amenable, please contact me at FURL so we can make arrangements.". These messages are then bundled together according to their rough destination (STARTING_POINT) and sent somewhere in the right direction. Download happens the same way: lookup messages are disseminated towards the STARTING_POINT and then search one successor at a time from there. There are two ways that the share might go missing: if the node is now offline (or has for some reason lost its shares), or if new nodes have joined since the original upload and the search depth (maximum hop count) is too small to accomodate the churn. Both result in the same amount of localized traffic. In the latter case, a storage node might want to migrate the share closer to the starting point, or perhaps just send them a note to remember a pointer for the share. Checking: anyone who wishes to do a filecheck needs to send out a lookup message for every potential share. These lookup messages could have a higher search depth than usual. It would be useful to know how many peers each message went through before being returned: this might be useful to perform repair by instructing the old host (which is further from the starting point than you'd like) to push their share closer towards the starting point. tahoe_lafs-1.20.0/docs/proposed/index.rst0000644000000000000000000000071113615410400015242 0ustar00Proposed Specifications ======================= This directory is where we hold design notes about upcoming/proposed features. Usually this is kept in tickets on the `bug tracker`_, but sometimes we put this directly into the source tree. .. _bug tracker: https://tahoe-lafs.org/trac Most of these files are plain text, should be read from a source tree. This index only lists the files that are in .rst format. .. toctree:: :maxdepth: 2 leasedb tahoe_lafs-1.20.0/docs/proposed/leasedb.rst0000644000000000000000000002465113615410400015543 0ustar00.. -*- coding: utf-8-with-signature -*- ===================== Lease database design ===================== The target audience for this document is developers who wish to understand the new lease database (leasedb) planned to be added in Tahoe-LAFS v1.11.0. Introduction ------------ A "lease" is a request by an account that a share not be deleted before a specified time. Each storage server stores leases in order to know which shares to spare from garbage collection. Motivation ---------- The leasedb will replace the current design in which leases are stored in the storage server's share container files. That design has several disadvantages: - Updating a lease requires modifying a share container file (even for immutable shares). This complicates the implementation of share classes. The mixing of share contents and lease data in share files also led to a security bug (ticket `#1528`_). - When only the disk backend is supported, it is possible to read and update leases synchronously because the share files are stored locally to the storage server. For the cloud backend, accessing share files requires an HTTP request, and so must be asynchronous. Accepting this asynchrony for lease queries would be both inefficient and complex. Moving lease information out of shares and into a local database allows lease queries to stay synchronous. Also, the current cryptographic protocol for renewing and cancelling leases (based on shared secrets derived from secure hash functions) is complex, and the cancellation part was never used. The leasedb solves the first two problems by storing the lease information in a local database instead of in the share container files. The share data itself is still held in the share container file. At the same time as implementing leasedb, we devised a simpler protocol for allocating and cancelling leases: a client can use a public key digital signature to authenticate access to a foolscap object representing the authority of an account. This protocol is not yet implemented; at the time of writing, only an "anonymous" account is supported. The leasedb also provides an efficient way to get summarized information, such as total space usage of shares leased by an account, for accounting purposes. .. _`#1528`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1528 Design constraints ------------------ A share is stored as a collection of objects. The persistent storage may be remote from the server (for example, cloud storage). Writing to the persistent store objects is in general not an atomic operation. So the leasedb also keeps track of which shares are in an inconsistent state because they have been partly written. (This may change in future when we implement a protocol to improve atomicity of updates to mutable shares.) Leases are no longer stored in shares. The same share format is used as before, but the lease slots are ignored, and are cleared when rewriting a mutable share. The new design also does not use lease renewal or cancel secrets. (They are accepted as parameters in the storage protocol interfaces for backward compatibility, but are ignored. Cancel secrets were already ignored due to the fix for `#1528`_.) The new design needs to be fail-safe in the sense that if the lease database is lost or corruption is detected, no share data will be lost (even though the metadata about leases held by particular accounts has been lost). Accounting crawler ------------------ A "crawler" is a long-running process that visits share container files at a slow rate, so as not to overload the server by trying to visit all share container files one after another immediately. The accounting crawler replaces the previous "lease crawler". It examines each share container file and compares it with the state of the leasedb, and may update the state of the share and/or the leasedb. The accounting crawler may perform the following functions (but see ticket #1834 for a proposal to reduce the scope of its responsibility): - Remove leases that are past their expiration time. (Currently, this is done automatically before deleting shares, but we plan to allow expiration to be performed separately for individual accounts in future.) - Delete the objects containing unleased shares — that is, shares that have stable entries in the leasedb but no current leases (see below for the definition of "stable" entries). - Discover shares that have been manually added to storage, via ``scp`` or some other out-of-band means. - Discover shares that are present when a storage server is upgraded to a leasedb-supporting version from a previous version, and give them "starter leases". - Recover from a situation where the leasedb is lost or detectably corrupted. This is handled in the same way as upgrading from a previous version. - Detect shares that have unexpectedly disappeared from storage. The disappearance of a share is logged, and its entry and leases are removed from the leasedb. Accounts -------- An account holds leases for some subset of shares stored by a server. The leasedb schema can handle many distinct accounts, but for the time being we create only two accounts: an anonymous account and a starter account. The starter account is used for leases on shares discovered by the accounting crawler; the anonymous account is used for all other leases. The leasedb has at most one lease entry per account per (storage_index, shnum) pair. This entry stores the times when the lease was last renewed and when it is set to expire (if the expiration policy does not force it to expire earlier), represented as Unix UTC-seconds-since-epoch timestamps. For more on expiration policy, see :doc:`../garbage-collection`. Share states ------------ The leasedb holds an explicit indicator of the state of each share. The diagram and descriptions below give the possible values of the "state" indicator, what that value means, and transitions between states, for any (storage_index, shnum) pair on each server:: # STATE_STABLE -------. # ^ | ^ | | # | v | | v # STATE_COMING | | STATE_GOING # ^ | | | # | | v | # '----- NONE <------' **NONE**: There is no entry in the ``shares`` table for this (storage_index, shnum) in this server's leasedb. This is the initial state. **STATE_COMING**: The share is being created or (if a mutable share) updated. The store objects may have been at least partially written, but the storage server doesn't have confirmation that they have all been completely written. **STATE_STABLE**: The store objects have been completely written and are not in the process of being modified or deleted by the storage server. (It could have been modified or deleted behind the back of the storage server, but if it has, the server has not noticed that yet.) The share may or may not be leased. **STATE_GOING**: The share is being deleted. State transitions ----------------- • **STATE_GOING** → **NONE** trigger: The storage server gains confidence that all store objects for the share have been removed. implementation: 1. Remove the entry in the leasedb. • **STATE_STABLE** → **NONE** trigger: The accounting crawler noticed that all the store objects for this share are gone. implementation: 1. Remove the entry in the leasedb. • **NONE** → **STATE_COMING** triggers: A new share is being created, as explicitly signalled by a client invoking a creation command, *or* the accounting crawler discovers an incomplete share. implementation: 1. Add an entry to the leasedb with **STATE_COMING**. 2. (In case of explicit creation) begin writing the store objects to hold the share. • **STATE_STABLE** → **STATE_COMING** trigger: A mutable share is being modified, as explicitly signalled by a client invoking a modification command. implementation: 1. Add an entry to the leasedb with **STATE_COMING**. 2. Begin updating the store objects. • **STATE_COMING** → **STATE_STABLE** trigger: All store objects have been written. implementation: 1. Change the state value of this entry in the leasedb from **STATE_COMING** to **STATE_STABLE**. • **NONE** → **STATE_STABLE** trigger: The accounting crawler discovers a complete share. implementation: 1. Add an entry to the leasedb with **STATE_STABLE**. • **STATE_STABLE** → **STATE_GOING** trigger: The share should be deleted because it is unleased. implementation: 1. Change the state value of this entry in the leasedb from **STATE_STABLE** to **STATE_GOING**. 2. Initiate removal of the store objects. The following constraints are needed to avoid race conditions: - While a share is being deleted (entry in **STATE_GOING**), we do not accept any requests to recreate it. That would result in add and delete requests for store objects being sent concurrently, with undefined results. - While a share is being added or modified (entry in **STATE_COMING**), we treat it as leased. - Creation or modification requests for a given mutable share are serialized. Unresolved design issues ------------------------ - What happens if a write to store objects for a new share fails permanently? If we delete the share entry, then the accounting crawler will eventually get to those store objects and see that their lengths are inconsistent with the length in the container header. This will cause the share to be treated as corrupted. Should we instead attempt to delete those objects immediately? If so, do we need a direct **STATE_COMING** → **STATE_GOING** transition to handle this case? - What happens if only some store objects for a share disappear unexpectedly? This case is similar to only some objects having been written when we get an unrecoverable error during creation of a share, but perhaps we want to treat it differently in order to preserve information about the storage service having lost data. - Does the leasedb need to track corrupted shares? Future directions ----------------- Clients will have key pairs identifying accounts, and will be able to add leases for a specific account. Various space usage policies can be defined. Better migration tools ('tahoe storage export'?) will create export files that include both the share data and the lease data, and then an import tool will both put the share in the right place and update the recipient node's leasedb. tahoe_lafs-1.20.0/docs/proposed/lossmodel.lyx0000644000000000000000000015750413615410400016155 0ustar00#LyX 1.6.2 created this file. For more info see http://www.lyx.org/ \lyxformat 345 \begin_document \begin_header \textclass amsart \use_default_options true \begin_modules theorems-ams theorems-ams-extended \end_modules \language english \inputencoding auto \font_roman default \font_sans default \font_typewriter default \font_default_family default \font_sc false \font_osf false \font_sf_scale 100 \font_tt_scale 100 \graphics default \float_placement h \paperfontsize default \spacing single \use_hyperref false \papersize default \use_geometry false \use_amsmath 1 \use_esint 1 \cite_engine basic \use_bibtopic false \paperorientation portrait \secnumdepth 3 \tocdepth 3 \paragraph_separation indent \defskip medskip \quotes_language english \papercolumns 1 \papersides 1 \paperpagestyle default \tracking_changes false \output_changes false \author "" \author "" \end_header \begin_body \begin_layout Title Tahoe Distributed Filesharing System Loss Model \end_layout \begin_layout Author Shawn Willden \end_layout \begin_layout Date 07/22/2009 \end_layout \begin_layout Address South Weber, Utah \end_layout \begin_layout Email shawn@willden.org \end_layout \begin_layout Abstract The abstract goes here \end_layout \begin_layout Section Problem Statement \end_layout \begin_layout Standard The allmydata Tahoe distributed file system uses Reed-Solomon erasure coding to split files into \begin_inset Formula $N$ \end_inset shares which are delivered to randomly-selected peers in a distributed network. The file can later be reassembled from any \begin_inset Formula $k\leq N$ \end_inset of the shares, if they are available. \end_layout \begin_layout Standard Over time shares are lost for a variety of reasons. Storage servers may crash, be destroyed or simply be removed from the network. To mitigate such losses, Tahoe network clients employ a repair agent which scans the peers once per time period \begin_inset Formula $A$ \end_inset and determines how many of the shares remain. If less than \begin_inset Formula $L$ \end_inset ( \begin_inset Formula $k\leq L\leq N$ \end_inset ) shares remain, then the repairer reconstructs the file shares and redistribute s the missing ones, bringing the availability back up to full. \end_layout \begin_layout Standard The question we're trying to answer is "What is the probability that we'll be able to reassemble the file at some later time \begin_inset Formula $T$ \end_inset ?". We'd also like to be able to determine what values we should choose for \begin_inset Formula $k$ \end_inset , \begin_inset Formula $N$ \end_inset , \begin_inset Formula $A$ \end_inset , and \begin_inset Formula $L$ \end_inset in order to ensure \begin_inset Formula $Pr[loss]\leq r$ \end_inset for some threshold probability \begin_inset Formula $r$ \end_inset . This is an optimization problem because although we could obtain very low \begin_inset Formula $Pr[loss]$ \end_inset by selecting conservative parameters, these choices have costs. The peer storage and bandwidth consumed by the share distribution process are approximately \begin_inset Formula $\nicefrac{N}{k}$ \end_inset times the size of the original file, so we would like to minimize \begin_inset Formula $\nicefrac{N}{k}$ \end_inset , consistent with \begin_inset Formula $Pr[loss]\leq r$ \end_inset . Likewise, a frequent and aggressive repair process keeps the number of shares available close to \begin_inset Formula $N,$ \end_inset but at a cost in bandwidth and processing time as the repair agent downloads \begin_inset Formula $k$ \end_inset shares, reconstructs the file and uploads new shares to replace those that are lost. \end_layout \begin_layout Section Reliability \end_layout \begin_layout Standard The probability that the file becomes unrecoverable is dependent upon the probability that the peers to whom we send shares are able to return those copies on demand. Shares that are corrupted are detected and discarded, so there is no need to distinguish between corruption and loss. \end_layout \begin_layout Standard Many factors affect share availability. Availability can be temporarily interrupted by peer unavailability due to network outages, power failures or administrative shutdown, among other reasons. Availability can be permanently lost due to failure or corruption of storage media, catastrophic damage to the peer system, administrative error, withdrawal from the network, malicious corruption, etc. \end_layout \begin_layout Standard The existence of intermittent failure modes motivates the introduction of a distinction between \noun on availability \noun default and \noun on reliability \noun default . Reliability is the probability that a share is retrievable assuming intermitten t failures can be waited out, so reliability considers only permanent failures. Availability considers all failures, and is focused on the probability of retrieval within some defined time frame. \end_layout \begin_layout Standard Another consideration is that some failures affect multiple shares. If multiple shares of a file are stored on a single hard drive, for example, failure of that drive may lose them all. Catastrophic damage to a data center may destroy all shares on all peers in that data center. \end_layout \begin_layout Standard While the types of failures that may occur are quite consistent across peers, their probabilities differ dramatically. A professionally-administered server with redundant storage, power and Internet located in a carefully-monitored data center with automatic fire suppression systems is much less likely to become either temporarily or permanently unavailable than the typical virus and malware-ridden home computer on a single cable modem connection. A variety of situations in between exist as well, such as the case of the author's home file server, which is administered by an IT professional and uses RAID level 6 redundant storage, but runs on old, cobbled-together equipment, and has a consumer-grade Internet connection. \end_layout \begin_layout Standard To begin with, let's use a simple definition of reliability: \end_layout \begin_layout Definition \noun on Reliability \noun default is the probability \begin_inset Formula $p_{i}$ \end_inset that a share \begin_inset Formula $s_{i}$ \end_inset will survive to (be retrievable at) time \begin_inset Formula $T=A$ \end_inset , ignoring intermittent failures. That is, the probability that the share will be retrievable at the end of the current repair cycle, and therefore usable by the repairer to regenerate any lost shares. \end_layout \begin_layout Standard Reliability \begin_inset Formula $p_{i}$ \end_inset is clearly dependent on \begin_inset Formula $A$ \end_inset . Short repair cycles offer less time for shares to \begin_inset Quotes eld \end_inset decay \begin_inset Quotes erd \end_inset into unavailability. \end_layout \begin_layout Subsection Peer Reliability \end_layout \begin_layout Standard Since peer reliability is the basis for any computations we may do on share and file reliability, we must have a way to estimate it. Reliability modeling of hardware, software and human performance are each complex topics, the subject of much ongoing research. In particular, the reliability of one of the key components of any peer from our perspective -- the hard drive where file shares are stored -- is the subject of much current debate. \end_layout \begin_layout Standard A common assumption about hardware failure is that it follows the \begin_inset Quotes eld \end_inset bathtub curve \begin_inset Quotes erd \end_inset , with frequent failures during the first few months, a constant failure rate for a few years and then a rising failure rate as the hardware wears out. This curve is often flattened by burn-in stress testing, and by periodic replacement that assures that in-service components never reach \begin_inset Quotes eld \end_inset old age \begin_inset Quotes erd \end_inset . \end_layout \begin_layout Standard In any case, we're generally going to ignore all of that complexity and focus on the bottom of the bathtub, assuming constant failure rates. This is a particularly reasonable assumption as long as we're focused on failures during a particular, relatively short interval \begin_inset Formula $A$ \end_inset . Towards the end of this paper, as we examine failures over many repair intervals, the assumption becomes more tenuous, and we note some of the issues. \end_layout \begin_layout Subsubsection Estimate Adaptation \end_layout \begin_layout Standard Even assuming constant failure rates, however, it will be rare that the duration of \begin_inset Formula $A$ \end_inset coincides with the available failure rate data, particularly since we want to view \begin_inset Formula $A$ \end_inset as a tunable parameter. It's necessary to be able adapt failure rates baselined against any given duration to the selected value of \begin_inset Formula $A$ \end_inset . \end_layout \begin_layout Standard Another issue is that failure rates of hardware, etc., are necessarily continuous in nature, while the per-interval failure/survival rates that are of interest for file reliability calculations are discrete -- a peer either survives or fails during the interval. The continuous nature of failure rates means that the common and obvious methods for estimating failure rates result in values that follow continuous, not discrete distributions. The difference is minor for small failure probabilities, and converges to zero as the number of intervals goes to infinity, but is important enough in some cases to be worth correcting for. \end_layout \begin_layout Standard Continuous failure rates are described in terms of mean time to failure, and under the assumption that failure rates are constant, are exponentially distributed. Under these assumptions, the probability that a machine fails at time \begin_inset Formula $t$ \end_inset , is \begin_inset Formula \[ f\left(t\right)=\lambda e^{-\lambda t}\] \end_inset where \begin_inset Formula $\lambda$ \end_inset represents the per unit-time failure rate. The probability that a machine fails at or before time \begin_inset Formula $A$ \end_inset is therefore \begin_inset Formula \begin{align} F\left(t\right) & =\int_{0}^{A}f\left(x\right)dx\nonumber \\ & =\int_{0}^{A}\lambda e^{-\lambda x}dx\nonumber \\ & =1-e^{-\lambda A}\label{eq:failure-time}\end{align} \end_inset \end_layout \begin_layout Standard Note that \begin_inset Formula $A$ \end_inset and \begin_inset Formula $\lambda$ \end_inset in \begin_inset CommandInset ref LatexCommand ref reference "eq:failure-time" \end_inset must be expressed in consistent time units. If they're different, unit conversions should be applied in the normal way. For example, if the estimate for \begin_inset Formula $\lambda$ \end_inset is 750 failures per million hours, and \begin_inset Formula $A$ \end_inset is one month, then either \begin_inset Formula $A$ \end_inset should be represented as \begin_inset Formula $30\cdot24/1000000=.00072$ \end_inset , or \begin_inset Formula $\lambda$ \end_inset should be converted to failures per month. Or both may be converted to hours. \end_layout \begin_layout Subsubsection Acquiring Peer Reliability Estimates \end_layout \begin_layout Standard Need to write this. \end_layout \begin_layout Subsection Uniform Reliability \begin_inset CommandInset label LatexCommand label name "sub:Fixed-Reliability" \end_inset \end_layout \begin_layout Standard In the simplest case, the peers holding the file shares all have the same reliability \begin_inset Formula $p$ \end_inset , and are all independent from one another. Let \begin_inset Formula $K$ \end_inset be a random variable that represents the number of shares that survive \begin_inset Formula $A$ \end_inset . Each share's survival can be viewed as an independent Bernoulli trial with a success probability of \begin_inset Formula $p$ \end_inset , which means that \begin_inset Formula $K$ \end_inset follows the binomial distribution with parameters \begin_inset Formula $N$ \end_inset and \begin_inset Formula $p$ \end_inset . That is, \begin_inset Formula $K\sim B(N,p)$ \end_inset . \end_layout \begin_layout Theorem Binomial Distribution Theorem \end_layout \begin_layout Theorem Consider \begin_inset Formula $n$ \end_inset independent Bernoulli trials \begin_inset Foot status collapsed \begin_layout Plain Layout A Bernoulli trial is simply a test of some sort that results in one of two outcomes, one of which is designated success and the other failure. The classic example of a Bernoulli trial is a coin toss. \end_layout \end_inset that succeed with probability \begin_inset Formula $p$ \end_inset , and let \begin_inset Formula $K$ \end_inset be a random variable that represents the number, \begin_inset Formula $m$ \end_inset , of successes, \begin_inset Formula $0\le m\le n$ \end_inset . We say that \begin_inset Formula $K$ \end_inset follows the Binomial Distribution with parameters n and p, denoted \begin_inset Formula $K\sim B(n,p)$ \end_inset . The probability mass function (PMF) of K is a function that gives the probabili ty that \begin_inset Formula $K$ \end_inset takes a particular value \begin_inset Formula $m$ \end_inset (the probability that there are exactly \begin_inset Formula $m$ \end_inset successful trials, and therefore \begin_inset Formula $n-m$ \end_inset failures). The PMF of K is \begin_inset Formula \begin{equation} Pr[K=m]=f(m;n,p)=\binom{n}{m}p^{m}(1-p)^{n-m}\label{eq:binomial-pmf}\end{equation} \end_inset \end_layout \begin_layout Proof Consider the specific case of exactly \begin_inset Formula $m$ \end_inset successes followed by \begin_inset Formula $n-m$ \end_inset failures, because each success has probability \begin_inset Formula $p$ \end_inset , each failure has probability \begin_inset Formula $1-p$ \end_inset , and the trials are independent, the probability of this exact case occurring is \begin_inset Formula $p^{m}\left(1-p\right)^{\left(n-m\right)}$ \end_inset , the product of the probabilities of the outcome of each trial. \end_layout \begin_layout Proof Now consider any reordering of these \begin_inset Formula $m$ \end_inset successes and \begin_inset Formula $n$ \end_inset failures. Any such reordering occurs with the same probability \begin_inset Formula $p^{m}\left(1-p\right)^{\left(n-m\right)}$ \end_inset , but with the terms of the product reordered. Since multiplication is commutative, each such reordering has the same probability. There are n-choose-m such orderings, and each ordering is an independent event, meaning we can sum the probabilities of the individual orderings, so the probability that any ordering of \begin_inset Formula $m$ \end_inset successes and \begin_inset Formula $n-m$ \end_inset failures occurs is given by \begin_inset Formula \[ \binom{n}{m}p^{m}\left(1-p\right)^{\left(n-m\right)}\] \end_inset which is the right-hand-side of equation \begin_inset CommandInset ref LatexCommand ref reference "eq:binomial-pmf" \end_inset . \end_layout \begin_layout Standard A file survives if at least \begin_inset Formula $k$ \end_inset of the \begin_inset Formula $N$ \end_inset shares survive. Equation \begin_inset CommandInset ref LatexCommand ref reference "eq:binomial-pmf" \end_inset gives the probability that exactly \begin_inset Formula $i$ \end_inset shares survive, for any \begin_inset Formula $1\leq i\leq n$ \end_inset , so the probability that fewer than \begin_inset Formula $k$ \end_inset survive is the sum of the probabilities that \begin_inset Formula $0,1,2,\ldots,k-1$ \end_inset shares survive. That is: \end_layout \begin_layout Standard \begin_inset Formula \begin{equation} Pr[file\, lost]=\sum_{i=0}^{k-1}\binom{n}{i}p^{i}(1-p)^{n-i}\label{eq:simple-failure}\end{equation} \end_inset \end_layout \begin_layout Subsection Independent Reliability \begin_inset CommandInset label LatexCommand label name "sub:Independent-Reliability" \end_inset \end_layout \begin_layout Standard Equation \begin_inset CommandInset ref LatexCommand ref reference "eq:simple-failure" \end_inset assumes that all shares have the same probability of survival, but as explained above, this is not necessarily true. A more accurate model allows each share \begin_inset Formula $s_{i}$ \end_inset an independent probability of survival \begin_inset Formula $p_{i}$ \end_inset . Each share's survival can still be treated as an independent Bernoulli trial, but with success probability \begin_inset Formula $p_{i}$ \end_inset . Under this assumption, \begin_inset Formula $K$ \end_inset follows a generalized binomial distribution with parameters \begin_inset Formula $N$ \end_inset and \begin_inset Formula $p_{1},p_{2},\dots,p_{N}$ \end_inset . \end_layout \begin_layout Standard The PMF for this generalized \begin_inset Formula $K$ \end_inset does not have a simple closed-form representation. However, the PMFs for random variables representing individual share survival do. Let \begin_inset Formula $K_{i}$ \end_inset be a random variable such that: \end_layout \begin_layout Standard \begin_inset Formula \[ K_{i}=\begin{cases} 1 & \textnormal{if }s_{i}\textnormal{ survives}\\ 0 & \textnormal{if }s_{i}\textnormal{ fails}\end{cases}\] \end_inset \end_layout \begin_layout Standard The PMF for \begin_inset Formula $K_{i}$ \end_inset is very simple: \begin_inset Formula \[ Pr[K_{i}=j]=\begin{cases} p_{i} & j=1\\ 1-p_{i} & j=0\end{cases}\] \end_inset which can also be expressed as \begin_inset Formula \[ Pr[K_{i}=j]=f\left(j\right)=\left(1-p_{i}\right)\left(1-j\right)+p_{i}\left(j\right)\] \end_inset \end_layout \begin_layout Standard Note that since each \begin_inset Formula $K_{i}$ \end_inset represents the count of shares \begin_inset Formula $s_{i}$ \end_inset that survives (either 0 or 1), if we add up all of the individual survivor counts, we get the group survivor count. That is: \begin_inset Formula \[ \sum_{i=1}^{N}K_{i}=K\] \end_inset Effectively, we have separated \begin_inset Formula $K$ \end_inset into the series of Bernoulli trials that make it up. \end_layout \begin_layout Theorem Discrete Convolution Theorem \end_layout \begin_layout Theorem Let \begin_inset Formula $X$ \end_inset and \begin_inset Formula $Y$ \end_inset be discrete random variables with probability mass functions given by \begin_inset Formula $Pr\left[X=x\right]=f(x)$ \end_inset and \begin_inset Formula $Pr\left[Y=y\right]=g(y).$ \end_inset Let \begin_inset Formula $Z$ \end_inset be the discrete random random variable obtained by summing \begin_inset Formula $X$ \end_inset and \begin_inset Formula $Y$ \end_inset . \end_layout \begin_layout Theorem The probability mass function of \begin_inset Formula $Z$ \end_inset is given by \begin_inset Formula \[ Pr[Z=z]=h(z)=\left(f\star g\right)(z)\] \end_inset where \begin_inset Formula $\star$ \end_inset denotes the discrete convolution operation: \begin_inset Formula \[ \left(f\star g\right)\left(n\right)=\sum_{m=-\infty}^{\infty}f\left(m\right)g\left(m-n\right)\] \end_inset \end_layout \begin_layout Proof The proof is beyond the scope of this paper. \end_layout \begin_layout Standard If we denote the PMF of \begin_inset Formula $K$ \end_inset with \begin_inset Formula $f$ \end_inset and the PMF of \begin_inset Formula $K_{i}$ \end_inset with \begin_inset Formula $g_{i}$ \end_inset (more formally, \begin_inset Formula $Pr[K=x]=f(x)$ \end_inset and \begin_inset Formula $Pr[K_{i}=x]=g_{i}(x)$ \end_inset ) then since \begin_inset Formula $K=\sum_{i=1}^{N}K_{i}$ \end_inset , according to the discrete convolution theorem \begin_inset Formula $f=g_{1}\star g_{2}\star g_{3}\star\ldots\star g_{N}$ \end_inset . Since convolution is associative, this can also be written as \begin_inset Formula $ $ \end_inset \begin_inset Formula \begin{equation} f=(\ldots((g_{1}\star g_{2})\star g_{3})\star\ldots)\star g_{N})\label{eq:convolution}\end{equation} \end_inset Therefore, \begin_inset Formula $f$ \end_inset can be computed as a sequence of convolution operations on the simple PMFs of the random variables \begin_inset Formula $K_{i}$ \end_inset . In fact, for large \begin_inset Formula $N$ \end_inset , equation \begin_inset CommandInset ref LatexCommand ref reference "eq:convolution" \end_inset turns out to be a more effective means of computing the PMF of \begin_inset Formula $K$ \end_inset than the binomial theorem. even in the case of shares with identical survival probability. The reason it's better is because the calculation of \begin_inset Formula $\binom{n}{m}$ \end_inset in equation \begin_inset CommandInset ref LatexCommand ref reference "eq:binomial-pmf" \end_inset produces very large values that overflow unless arbitrary precision numeric representations are used. \end_layout \begin_layout Standard Note also that it is not necessary to have very simple PMFs like those of the \begin_inset Formula $K_{i}$ \end_inset . Any share or set of shares that has a known PMF can be combined with any other set with a known PMF by convolution, as long as the two share sets are independent. The reverse holds as well; given a group with an empirically-derived PMF, in it's theoretically possible to solve for an individual PMF, and thereby determine \begin_inset Formula $p_{i}$ \end_inset even when per-share data is unavailable. \end_layout \begin_layout Subsection Multiple Failure Modes \begin_inset CommandInset label LatexCommand label name "sub:Multiple-Failure-Modes" \end_inset \end_layout \begin_layout Standard In modeling share survival probabilities, it's useful to be able to analyze separately each of the various failure modes. For example, if reliable statistics for disk failure can be obtained, then a probability mass function for that form of failure can be generated. Similarly, statistics on other hardware failures, administrative errors, network losses, etc., can all be estimated independently. If those estimates can then be combined into a single PMF for a share, then we can use it to predict failures for that share. \end_layout \begin_layout Standard Combining independent failure modes for a single share is straightforward. If \begin_inset Formula $p_{i,j}$ \end_inset is the probability of survival of the \begin_inset Formula $j$ \end_inset th failure mode of share \begin_inset Formula $i$ \end_inset , \begin_inset Formula $1\leq j\leq m$ \end_inset , then \begin_inset Formula \[ Pr[K_{i}=k]=f_{i}(k)=\begin{cases} \prod_{j=1}^{m}p_{i,j} & k=1\\ 1-\prod_{j=1}^{m}p_{i,j} & k=0\end{cases}\] \end_inset is the survival PMF. \end_layout \begin_layout Subsection Multi-share failures \begin_inset CommandInset label LatexCommand label name "sub:Multi-share-failures" \end_inset \end_layout \begin_layout Standard If there are failure modes that affect multiple computers, we can also construct the PMF that predicts their survival. The key observation is that the PMF has non-zero probabilities only for \begin_inset Formula $0$ \end_inset survivors and \begin_inset Formula $n$ \end_inset survivors, where \begin_inset Formula $n$ \end_inset is the number of shares in the set. If \begin_inset Formula $p$ \end_inset is the probability of survival, the PMF of \begin_inset Formula $K$ \end_inset , a random variable representing the number of survivors is \begin_inset Formula \[ Pr[K=k]=f(k)=\begin{cases} p & k=n\\ 0 & 0 \begin_inset Text \begin_layout Plain Layout \begin_inset Formula $k$ \end_inset \end_layout \end_inset \begin_inset Text \begin_layout Plain Layout \begin_inset Formula $Pr[K=k]$ \end_inset \end_layout \end_inset \begin_inset Text \begin_layout Plain Layout \begin_inset Formula $Pr[file\, loss]=Pr[K \begin_inset Text \begin_layout Plain Layout \begin_inset Formula $N/k$ \end_inset \end_layout \end_inset \begin_inset Text \begin_layout Plain Layout 1 \end_layout \end_inset \begin_inset Text \begin_layout Plain Layout \begin_inset Formula $1.60\times10^{-9}$ \end_inset \end_layout \end_inset \begin_inset Text \begin_layout Plain Layout \begin_inset Formula $2.53\times10^{-11}$ \end_inset \end_layout \end_inset \begin_inset Text \begin_layout Plain Layout 12 \end_layout \end_inset \begin_inset Text \begin_layout Plain Layout 2 \end_layout \end_inset \begin_inset Text \begin_layout Plain Layout \begin_inset Formula $3.80\times10^{-8}$ \end_inset \end_layout \end_inset \begin_inset Text \begin_layout Plain Layout \begin_inset Formula $1.63\times10^{-9}$ \end_inset \end_layout \end_inset \begin_inset Text \begin_layout Plain Layout 6 \end_layout \end_inset \begin_inset Text \begin_layout Plain Layout 3 \end_layout \end_inset \begin_inset Text \begin_layout Plain Layout \begin_inset Formula $4.04\times10^{-7}$ \end_inset \end_layout \end_inset \begin_inset Text \begin_layout Plain Layout \begin_inset Formula $3.70\times10^{-8}$ \end_inset \end_layout \end_inset \begin_inset Text \begin_layout Plain Layout 4 \end_layout \end_inset \begin_inset Text \begin_layout Plain Layout 4 \end_layout \end_inset \begin_inset Text \begin_layout Plain Layout \begin_inset Formula $2.06\times10^{-6}$ \end_inset \end_layout \end_inset \begin_inset Text \begin_layout Plain Layout \begin_inset Formula $4.44\times10^{-7}$ \end_inset \end_layout \end_inset \begin_inset Text \begin_layout Plain Layout 3 \end_layout \end_inset \begin_inset Text \begin_layout Plain Layout 5 \end_layout \end_inset \begin_inset Text \begin_layout Plain Layout \begin_inset Formula $2.10\times10^{-5}$ \end_inset \end_layout \end_inset \begin_inset Text \begin_layout Plain Layout \begin_inset Formula $2.50\times10^{-6}$ \end_inset \end_layout \end_inset \begin_inset Text \begin_layout Plain Layout 2.4 \end_layout \end_inset \begin_inset Text \begin_layout Plain Layout 6 \end_layout \end_inset \begin_inset Text \begin_layout Plain Layout \begin_inset Formula $0.000428$ \end_inset \end_layout \end_inset \begin_inset Text \begin_layout Plain Layout \begin_inset Formula $2.35\times10^{-5}$ \end_inset \end_layout \end_inset \begin_inset Text \begin_layout Plain Layout 2 \end_layout \end_inset \begin_inset Text \begin_layout Plain Layout 7 \end_layout \end_inset \begin_inset Text \begin_layout Plain Layout \begin_inset Formula $0.00417$ \end_inset \end_layout \end_inset \begin_inset Text \begin_layout Plain Layout \begin_inset Formula $0.000452$ \end_inset \end_layout \end_inset \begin_inset Text \begin_layout Plain Layout 1.7 \end_layout \end_inset \begin_inset Text \begin_layout Plain Layout 8 \end_layout \end_inset \begin_inset Text \begin_layout Plain Layout \begin_inset Formula $0.0157$ \end_inset \end_layout \end_inset \begin_inset Text \begin_layout Plain Layout \begin_inset Formula $0.00462$ \end_inset \end_layout \end_inset \begin_inset Text \begin_layout Plain Layout 1.5 \end_layout \end_inset \begin_inset Text \begin_layout Plain Layout 9 \end_layout \end_inset \begin_inset Text \begin_layout Plain Layout \begin_inset Formula $0.00127$ \end_inset \end_layout \end_inset \begin_inset Text \begin_layout Plain Layout \begin_inset Formula $0.0203$ \end_inset \end_layout \end_inset \begin_inset Text \begin_layout Plain Layout 1.3 \end_layout \end_inset \begin_inset Text \begin_layout Plain Layout 10 \end_layout \end_inset \begin_inset Text \begin_layout Plain Layout \begin_inset Formula $0.0230$ \end_inset \end_layout \end_inset \begin_inset Text \begin_layout Plain Layout \begin_inset Formula $0.0216$ \end_inset \end_layout \end_inset \begin_inset Text \begin_layout Plain Layout 1.2 \end_layout \end_inset \begin_inset Text \begin_layout Plain Layout 11 \end_layout \end_inset \begin_inset Text \begin_layout Plain Layout \begin_inset Formula $0.208$ \end_inset \end_layout \end_inset \begin_inset Text \begin_layout Plain Layout \begin_inset Formula $0.0446$ \end_inset \end_layout \end_inset \begin_inset Text \begin_layout Plain Layout 1.1 \end_layout \end_inset \begin_inset Text \begin_layout Plain Layout 12 \end_layout \end_inset \begin_inset Text \begin_layout Plain Layout \begin_inset Formula $0.747$ \end_inset \end_layout \end_inset \begin_inset Text \begin_layout Plain Layout \begin_inset Formula $0.253$ \end_inset \end_layout \end_inset \begin_inset Text \begin_layout Plain Layout 1 \end_layout \end_inset \end_inset \end_layout \begin_layout Plain Layout \begin_inset Caption \begin_layout Plain Layout \align left \begin_inset CommandInset label LatexCommand label name "tab:Example-PMF" \end_inset Example PMF \end_layout \end_inset \end_layout \begin_layout Plain Layout \end_layout \end_inset \end_layout \begin_layout Standard The table demonstrates the importance of the selection of \begin_inset Formula $k$ \end_inset , and the tradeoff against file size expansion. Note that the survival of exactly 9 servers is significantly less likely than the survival of 8 or 10 servers. This is, again, an artifact of the group failure modes. Because of this, there is no reason to choose \begin_inset Formula $k=9$ \end_inset over \begin_inset Formula $k=10$ \end_inset . Normally, reducing the number of shares needed for reassembly improve the file's chances of survival, but in this case it provides a minuscule gain in reliability at the cost of a 10% increase in bandwidth and storage consumed. \end_layout \begin_layout Subsection Share Duplication \end_layout \begin_layout Standard Before moving on to consider issues other than single-interval file loss, let's analyze one more possibility, that of \begin_inset Quotes eld \end_inset cheap \begin_inset Quotes erd \end_inset file repair via share duplication. \end_layout \begin_layout Standard Initially, files are split using erasure coding, which creates \begin_inset Formula $N$ \end_inset unique shares, any \begin_inset Formula $k$ \end_inset of which can be used to to reconstruct the file. When shares are lost, proper repair downloads some \begin_inset Formula $k$ \end_inset shares, reconstructs the original file and then uses the erasure coding algorithm to reconstruct the lost shares, then redeploys them to peers in the network. This is a somewhat expensive process. \end_layout \begin_layout Standard A cheaper repair option is simply to direct some peer that has share \begin_inset Formula $s_{i}$ \end_inset to send a copy to another peer, thus increasing by one the number of shares in the network. This is not as good as actually replacing the lost share, though. Suppose that more shares were lost, leaving only \begin_inset Formula $k$ \end_inset shares remaining. If two of those shares are identical, because one was duplicated in this fashion, then only \begin_inset Formula $k-1$ \end_inset shares truly remain, and the file can no longer be reconstructed. \end_layout \begin_layout Standard However, such cheap repair is not completely pointless; it does increase file survivability. But by how much? \end_layout \begin_layout Standard Effectively, share duplication simply increases the probability that \begin_inset Formula $s_{i}$ \end_inset will survive, by providing two locations from which to retrieve it. We can view the two copies of the single share as one, but with a higher probability of survival than would be provided by either of the two peers. In particular, if \begin_inset Formula $p_{1}$ \end_inset and \begin_inset Formula $p_{2}$ \end_inset are the probabilities that the two peers will survive, respectively, then \begin_inset Formula \[ Pr[s_{i}\, survives]=p_{1}+p_{2}-p_{1}p_{2}\] \end_inset \end_layout \begin_layout Standard More generally, if a single share is deployed on \begin_inset Formula $n$ \end_inset peers, each with a PMF \begin_inset Formula $f_{i}(j),0\leq j\leq1,1\leq i\leq n$ \end_inset , the share survival count is a random variable \begin_inset Formula $K$ \end_inset and the probability of share loss is \begin_inset Formula \[ Pr[K=0]=(f_{1}\star f_{2}\star\ldots\star f_{n})(0)\] \end_inset \end_layout \begin_layout Standard From that, we can construct a share PMF in the obvious way, which can then be convolved with the other share PMFs to produce the share set PMF. \end_layout \begin_layout Example Suppose a file has \begin_inset Formula $N=10,k=3$ \end_inset and that all servers have survival probability \begin_inset Formula $p=.9$ \end_inset . Given a full complement of shares, \begin_inset Formula $Pr[\textrm{file\, loss}]=3.74\times10^{-7}$ \end_inset . Suppose that four shares are lost, which increases \begin_inset Formula $Pr[\textrm{file\, loss}]$ \end_inset to \begin_inset Formula $.00127$ \end_inset , a value \begin_inset Formula $3400$ \end_inset times greater. Rather than doing a proper reconstruction, we could direct four peers still holding shares to send a copy of their share to new peer, which changes the composition of the shares from one of six, unique \begin_inset Quotes eld \end_inset standard \begin_inset Quotes erd \end_inset shares, to one of two standard shares, each with survival probability \begin_inset Formula $.9$ \end_inset and four \begin_inset Quotes eld \end_inset doubled \begin_inset Quotes erd \end_inset shares, each with survival probability \begin_inset Formula $2p-p^{2}\approxeq.99$ \end_inset . \end_layout \begin_layout Example Combining the two single-peer share PMFs with the four double-share PMFs gives a new file survival probability of \begin_inset Formula $6.64\times10^{-6}$ \end_inset . Not as good as a full repair, but still quite respectable. Also, if storage were not a concern, all six shares could be duplicated, for a \begin_inset Formula $Pr[file\, loss]=1.48\times10^{-7}$ \end_inset , which is actually three time better than the nominal case. \end_layout \begin_layout Example The reason such cheap repairs may be attractive in many cases is that distribute d bandwidth is cheaper than bandwidth through a single peer. This is particularly true if that single peer has a very slow connection, which is common for home computers -- especially in the outbound direction. \end_layout \begin_layout Section Long-Term Reliability \end_layout \begin_layout Standard Thus far, we've focused entirely on the probability that a file survives the interval \begin_inset Formula $A$ \end_inset between repair times. The probability that a file survives long-term, though, is also important. As long as the probability of failure during a repair period is non-zero, a given file will eventually be lost. We want to know the probability of surviving for time \begin_inset Formula $T$ \end_inset , and how the parameters \begin_inset Formula $A$ \end_inset (time between repairs) and \begin_inset Formula $L$ \end_inset (allowed share low watermark) affect survival time. \end_layout \begin_layout Standard To model file survival time, let \begin_inset Formula $T$ \end_inset be a random variable denoting the time at which a given file becomes unrecovera ble, and \begin_inset Formula $R(t)=Pr[T>t]$ \end_inset be a function that gives the probability that the file survives to time \begin_inset Formula $t$ \end_inset . \begin_inset Formula $R(t)$ \end_inset is the cumulative distribution function of \begin_inset Formula $T$ \end_inset . \end_layout \begin_layout Standard Most survival functions are continuous, but \begin_inset Formula $R(t)$ \end_inset is inherently discrete and stochastic. The time steps are the repair intervals, each of length \begin_inset Formula $A$ \end_inset , so \begin_inset Formula $T$ \end_inset -values are multiples of \begin_inset Formula $A$ \end_inset . During each interval, the file's shares degrade according to the probability mass function of \begin_inset Formula $K$ \end_inset . \end_layout \begin_layout Subsection Aggressive Repair \end_layout \begin_layout Standard Let's first consider the case of an aggressive repairer. Every interval, this repairer checks the file for share losses and restores them. Thus, at the beginning of each interval, the file always has \begin_inset Formula $N$ \end_inset shares, distributed on servers with various individual and group failure probabilities, which will survive or fail per the output of random variable \begin_inset Formula $K$ \end_inset . \end_layout \begin_layout Standard For any interval, then, the probability that the file will survive is \begin_inset Formula $f\left(k\right)=Pr[K\geq k]$ \end_inset . Since each interval success or failure is independent, and assuming the share reliabilities remain constant over time, \begin_inset Formula \begin{equation} R\left(t\right)=f(k)^{t}\end{equation} \end_inset \end_layout \begin_layout Standard This simple survival function makes it simple to select parameters \begin_inset Formula $N$ \end_inset and \begin_inset Formula $K$ \end_inset such that \begin_inset Formula $R(t)\geq r$ \end_inset , where \begin_inset Formula $r$ \end_inset is a user-specified parameter indicating the desired probability of survival to time \begin_inset Formula $t$ \end_inset . Specifically, we can solve for \begin_inset Formula $f\left(k\right)$ \end_inset in \begin_inset Formula $r\leq f\left(k\right)^{t}$ \end_inset , giving: \begin_inset Formula \begin{equation} f\left(k\right)\geq\sqrt[t]{r}\end{equation} \end_inset \end_layout \begin_layout Standard So, given a PMF \begin_inset Formula $f\left(k\right)$ \end_inset , to assure the survival of a file to time \begin_inset Formula $t$ \end_inset with probability at least \begin_inset Formula $r$ \end_inset , choose \begin_inset Formula $k$ \end_inset such that \begin_inset Formula $f\left(k\right)\geq\sqrt[t]{r}$ \end_inset . For example, if \begin_inset Formula $A$ \end_inset is one month, and \begin_inset Formula $r=1-\nicefrac{1}{10^{6}}$ \end_inset and \begin_inset Formula $t=120$ \end_inset , or 10 years, we calculate \begin_inset Formula $f\left(k\right)\geq\sqrt[120]{.999999}\approx0.999999992$ \end_inset . Per the PMF of table \begin_inset CommandInset ref LatexCommand ref reference "tab:Example-PMF" \end_inset , this means \begin_inset Formula $k=2$ \end_inset , achieves the goal, at the cost of a six-fold expansion in stored file size. If the lesser goal of no more than \begin_inset Formula $\nicefrac{1}{1000}$ \end_inset probability of loss is taken, then since \begin_inset Formula $\sqrt[120]{.9999}=.999992$ \end_inset , \begin_inset Formula $k=5$ \end_inset achieves the goal with an expansion factor of \begin_inset Formula $2.4$ \end_inset . \end_layout \begin_layout Subsection Repair Cost \end_layout \begin_layout Standard The simplicity and predictability of aggressive repair is attractive, but there is a downside: Repairs cost processing power and bandwidth. The processing power is proportional to the size of the file, since the whole file must be reconstructed and then re-processed using the Reed-Solomon algorithm, while the bandwidth cost is proportional to the number of missing shares that must be replaced, \begin_inset Formula $N-K$ \end_inset . \end_layout \begin_layout Standard Let \begin_inset Formula $c\left(s,d,k\right)$ \end_inset be a cost function that combines the processing cost of regenerating a file of size \begin_inset Formula $s$ \end_inset and the bandwidth cost of downloading a file of size \begin_inset Formula $s$ \end_inset and uploading \begin_inset Formula $d$ \end_inset shares each of size \begin_inset Formula $\nicefrac{s}{k}$ \end_inset . Also, let \begin_inset Formula $D$ \end_inset denote the random variable \begin_inset Formula $N-K$ \end_inset , which is the number of shares that must be redistributed to bring the file share set back up to \begin_inset Formula $N$ \end_inset after degrading during an interval. The probability mass function of \begin_inset Formula $D$ \end_inset is \begin_inset Formula \[ Pr[D=d]=f(d)=\begin{cases} Pr\left[K=N\right]+Pr[K image/svg+xml DSA private key (256 bit string) DSA public key (2048+ bit string) salt read-cap + 192 64 (math) AES H H encryptedsalt data crypttext AES readkey shares otherstuff DSA signature private key 256 pubkey hash 256 FEC Hmerkletrees H write-cap storage index 64 SI:A 64 SI:B verify cap H H H H H H pubkey hash 256 64 SI:A : stored in share H deep-verify cap 192 64 H H AES writekey AES deepverifykey H tahoe_lafs-1.20.0/docs/proposed/mutable-DSA.txt0000644000000000000000000005031213615410400016202 0ustar00 (protocol proposal, work-in-progress, not authoritative) (this document describes DSA-based mutable files, as opposed to the RSA-based mutable files that were introduced in tahoe-0.7.0 . This proposal has not yet been implemented. Please see mutable-DSA.svg for a quick picture of the crypto scheme described herein) This file shows only the differences from RSA-based mutable files to (EC)DSA-based mutable files. You have to read and understand docs/specifications/mutable.rst before reading this file (mutable-DSA.txt). == new design criteria == * provide for variable number of semiprivate sections? * put e.g. filenames in one section, readcaps in another, writecaps in a third (ideally, to modify a filename you'd only have to modify one section, and we'd make encrypting/hashing more efficient by doing it on larger blocks of data, preferably one segment at a time instead of one writecap at a time) * cleanly distinguish between "container" (leases, write-enabler) and "slot contents" (everything that comes from share encoding) * sign all slot bits (to allow server-side verification) * someone reading the whole file should be able to read the share in a single linear pass with just a single seek to zero * writing the file should occur in two passes (3 seeks) in mostly linear order 1: write version/pubkey/topbits/salt 2: write zeros / seek+prefill where the hashchain/tree goes 3: write blocks 4: seek back 5: write hashchain/tree * storage format: consider putting container bits in a separate file - $SI.index (contains list of shnums, leases, other-cabal-members, WE, etc) - $SI-$shnum.share (actual share data) * possible layout: - version - pubkey - topbits (k, N, size, segsize, etc) - salt? (salt tree root?) - share hash root - share hash chain - block hash tree - (salts?) (salt tree?) - blocks - signature (of [version .. share hash root]) === SDMF slots overview === Each SDMF slot is created with a DSA public/private key pair, using a system-wide common modulus and generator, in which the private key is a random 256 bit number, and the public key is a larger value (about 2048 bits) that can be derived with a bit of math from the private key. The public key is known as the "verification key", while the private key is called the "signature key". The 256-bit signature key is used verbatim as the "write capability". This can be converted into the 2048ish-bit verification key through a fairly cheap set of modular exponentiation operations; this is done any time the holder of the write-cap wants to read the data. (Note that the signature key can either be a newly-generated random value, or the hash of something else, if we found a need for a capability that's stronger than the write-cap). This results in a write-cap which is 256 bits long and can thus be expressed in an ASCII/transport-safe encoded form (base62 encoding, fits in 72 characters, including a local-node http: convenience prefix). The private key is hashed to form a 256-bit "salt". The public key is also hashed to form a 256-bit "pubkey hash". These two values are concatenated, hashed, and truncated to 192 bits to form the first 192 bits of the read-cap. The pubkey hash is hashed by itself and truncated to 64 bits to form the last 64 bits of the read-cap. The full read-cap is 256 bits long, just like the write-cap. The first 192 bits of the read-cap are hashed and truncated to form the first 192 bits of the "traversal cap". The last 64 bits of the read-cap are hashed to form the last 64 bits of the traversal cap. This gives us a 256-bit traversal cap. The first 192 bits of the traversal-cap are hashed and truncated to form the first 64 bits of the storage index. The last 64 bits of the traversal-cap are hashed to form the last 64 bits of the storage index. This gives us a 128-bit storage index. The verification-cap is the first 64 bits of the storage index plus the pubkey hash, 320 bits total. The verification-cap doesn't need to be expressed in a printable transport-safe form, so it's ok that it's longer. The read-cap is hashed one way to form an AES encryption key that is used to encrypt the salt; this key is called the "salt key". The encrypted salt is stored in the share. The private key never changes, therefore the salt never changes, and the salt key is only used for a single purpose, so there is no need for an IV. The read-cap is hashed a different way to form the master data encryption key. A random "data salt" is generated each time the share's contents are replaced, and the master data encryption key is concatenated with the data salt, then hashed, to form the AES CTR-mode "read key" that will be used to encrypt the actual file data. This is to avoid key-reuse. An outstanding issue is how to avoid key reuse when files are modified in place instead of being replaced completely; this is not done in SDMF but might occur in MDMF. The master data encryption key is used to encrypt data that should be visible to holders of a write-cap or a read-cap, but not to holders of a traversal-cap. The private key is hashed one way to form the salt, and a different way to form the "write enabler master". For each storage server on which a share is kept, the write enabler master is concatenated with the server's nodeid and hashed, and the result is called the "write enabler" for that particular server. Note that multiple shares of the same slot stored on the same server will all get the same write enabler, i.e. the write enabler is associated with the "bucket", rather than the individual shares. The private key is hashed a third way to form the "data write key", which can be used by applications which wish to store some data in a form that is only available to those with a write-cap, and not to those with merely a read-cap. This is used to implement transitive read-onlyness of dirnodes. The traversal cap is hashed to work the "traversal key", which can be used by applications that wish to store data in a form that is available to holders of a write-cap, read-cap, or traversal-cap. The idea is that dirnodes will store child write-caps under the writekey, child names and read-caps under the read-key, and verify-caps (for files) or deep-verify-caps (for directories) under the traversal key. This would give the holder of a root deep-verify-cap the ability to create a verify manifest for everything reachable from the root, but not the ability to see any plaintext or filenames. This would make it easier to delegate filechecking and repair to a not-fully-trusted agent. The public key is stored on the servers, as is the encrypted salt, the (non-encrypted) data salt, the encrypted data, and a signature. The container records the write-enabler, but of course this is not visible to readers. To make sure that every byte of the share can be verified by a holder of the verify-cap (and also by the storage server itself), the signature covers the version number, the sequence number, the root hash "R" of the share merkle tree, the encoding parameters, and the encrypted salt. "R" itself covers the hash trees and the share data. The read-write URI is just the private key. The read-only URI is the read-cap key. The deep-verify URI is the traversal-cap. The verify-only URI contains the the pubkey hash and the first 64 bits of the storage index. FMW:b2a(privatekey) FMR:b2a(readcap) FMT:b2a(traversalcap) FMV:b2a(storageindex[:64])b2a(pubkey-hash) Note that this allows the read-only, deep-verify, and verify-only URIs to be derived from the read-write URI without actually retrieving any data from the share, but instead by regenerating the public key from the private one. Users of the read-only, deep-verify, or verify-only caps must validate the public key against their pubkey hash (or its derivative) the first time they retrieve the pubkey, before trusting any signatures they see. The SDMF slot is allocated by sending a request to the storage server with a desired size, the storage index, and the write enabler for that server's nodeid. If granted, the write enabler is stashed inside the slot's backing store file. All further write requests must be accompanied by the write enabler or they will not be honored. The storage server does not share the write enabler with anyone else. The SDMF slot structure will be described in more detail below. The important pieces are: * a sequence number * a root hash "R" * the data salt * the encoding parameters (including k, N, file size, segment size) * a signed copy of [seqnum,R,data_salt,encoding_params] (using signature key) * the verification key (not encrypted) * the share hash chain (part of a Merkle tree over the share hashes) * the block hash tree (Merkle tree over blocks of share data) * the share data itself (erasure-coding of read-key-encrypted file data) * the salt, encrypted with the salt key The access pattern for read (assuming we hold the write-cap) is: * generate public key from the private one * hash private key to get the salt, hash public key, form read-cap * form storage-index * use storage-index to locate 'k' shares with identical 'R' values * either get one share, read 'k' from it, then read k-1 shares * or read, say, 5 shares, discover k, either get more or be finished * or copy k into the URIs * .. jump to "COMMON READ", below To read (assuming we only hold the read-cap), do: * hash read-cap pieces to generate storage index and salt key * use storage-index to locate 'k' shares with identical 'R' values * retrieve verification key and encrypted salt * decrypt salt * hash decrypted salt and pubkey to generate another copy of the read-cap, make sure they match (this validates the pubkey) * .. jump to "COMMON READ" * COMMON READ: * read seqnum, R, data salt, encoding parameters, signature * verify signature against verification key * hash data salt and read-cap to generate read-key * read share data, compute block-hash Merkle tree and root "r" * read share hash chain (leading from "r" to "R") * validate share hash chain up to the root "R" * submit share data to erasure decoding * decrypt decoded data with read-key * submit plaintext to application The access pattern for write is: * generate pubkey, salt, read-cap, storage-index as in read case * generate data salt for this update, generate read-key * encrypt plaintext from application with read-key * application can encrypt some data with the data-write-key to make it only available to writers (used for transitively-readonly dirnodes) * erasure-code crypttext to form shares * split shares into blocks * compute Merkle tree of blocks, giving root "r" for each share * compute Merkle tree of shares, find root "R" for the file as a whole * create share data structures, one per server: * use seqnum which is one higher than the old version * share hash chain has log(N) hashes, different for each server * signed data is the same for each server * include pubkey, encrypted salt, data salt * now we have N shares and need homes for them * walk through peers * if share is not already present, allocate-and-set * otherwise, try to modify existing share: * send testv_and_writev operation to each one * testv says to accept share if their(seqnum+R) <= our(seqnum+R) * count how many servers wind up with which versions (histogram over R) * keep going until N servers have the same version, or we run out of servers * if any servers wound up with a different version, report error to application * if we ran out of servers, initiate recovery process (described below) ==== Cryptographic Properties ==== This scheme protects the data's confidentiality with 192 bits of key material, since the read-cap contains 192 secret bits (derived from an encrypted salt, which is encrypted using those same 192 bits plus some additional public material). The integrity of the data (assuming that the signature is valid) is protected by the 256-bit hash which gets included in the signature. The privilege of modifying the data (equivalent to the ability to form a valid signature) is protected by a 256 bit random DSA private key, and the difficulty of computing a discrete logarithm in a 2048-bit field. There are a few weaker denial-of-service attacks possible. If N-k+1 of the shares are damaged or unavailable, the client will be unable to recover the file. Any coalition of more than N-k shareholders will be able to effect this attack by merely refusing to provide the desired share. The "write enabler" shared secret protects existing shares from being displaced by new ones, except by the holder of the write-cap. One server cannot affect the other shares of the same file, once those other shares are in place. The worst DoS attack is the "roadblock attack", which must be made before those shares get placed. Storage indexes are effectively random (being derived from the hash of a random value), so they are not guessable before the writer begins their upload, but there is a window of vulnerability during the beginning of the upload, when some servers have heard about the storage index but not all of them. The roadblock attack we want to prevent is when the first server that the uploader contacts quickly runs to all the other selected servers and places a bogus share under the same storage index, before the uploader can contact them. These shares will normally be accepted, since storage servers create new shares on demand. The bogus shares would have randomly-generated write-enablers, which will of course be different than the real uploader's write-enabler, since the malicious server does not know the write-cap. If this attack were successful, the uploader would be unable to place any of their shares, because the slots have already been filled by the bogus shares. The uploader would probably try for peers further and further away from the desired location, but eventually they will hit a preconfigured distance limit and give up. In addition, the further the writer searches, the less likely it is that a reader will search as far. So a successful attack will either cause the file to be uploaded but not be reachable, or it will cause the upload to fail. If the uploader tries again (creating a new privkey), they may get lucky and the malicious servers will appear later in the query list, giving sufficient honest servers a chance to see their share before the malicious one manages to place bogus ones. The first line of defense against this attack is the timing challenges: the attacking server must be ready to act the moment a storage request arrives (which will only occur for a certain percentage of all new-file uploads), and only has a few seconds to act before the other servers will have allocated the shares (and recorded the write-enabler, terminating the window of vulnerability). The second line of defense is post-verification, and is possible because the storage index is partially derived from the public key hash. A storage server can, at any time, verify every public bit of the container as being signed by the verification key (this operation is recommended as a continual background process, when disk usage is minimal, to detect disk errors). The server can also hash the verification key to derive 64 bits of the storage index. If it detects that these 64 bits do not match (but the rest of the share validates correctly), then the implication is that this share was stored to the wrong storage index, either due to a bug or a roadblock attack. If an uploader finds that they are unable to place their shares because of "bad write enabler errors" (as reported by the prospective storage servers), it can "cry foul", and ask the storage server to perform this verification on the share in question. If the pubkey and storage index do not match, the storage server can delete the bogus share, thus allowing the real uploader to place their share. Of course the origin of the offending bogus share should be logged and reported to a central authority, so corrective measures can be taken. It may be necessary to have this "cry foul" protocol include the new write-enabler, to close the window during which the malicious server can re-submit the bogus share during the adjudication process. If the problem persists, the servers can be placed into pre-verification mode, in which this verification is performed on all potential shares before being committed to disk. This mode is more CPU-intensive (since normally the storage server ignores the contents of the container altogether), but would solve the problem completely. The mere existence of these potential defenses should be sufficient to deter any actual attacks. Note that the storage index only has 64 bits of pubkey-derived data in it, which is below the usual crypto guidelines for security factors. In this case it's a pre-image attack which would be needed, rather than a collision, and the actual attack would be to find a keypair for which the public key can be hashed three times to produce the desired portion of the storage index. We believe that 64 bits of material is sufficiently resistant to this form of pre-image attack to serve as a suitable deterrent. === SMDF Slot Format === This SMDF data lives inside a server-side MutableSlot container. The server is generally oblivious to this format, but it may look inside the container when verification is desired. This data is tightly packed. There are no gaps left between the different fields, and the offset table is mainly present to allow future flexibility of key sizes. # offset size name 1 0 1 version byte, \x01 for this format 2 1 8 sequence number. 2^64-1 must be handled specially, TBD 3 9 32 "R" (root of share hash Merkle tree) 4 41 32 data salt (readkey is H(readcap+data_salt)) 5 73 32 encrypted salt (AESenc(key=H(readcap), salt) 6 105 18 encoding parameters: 105 1 k 106 1 N 107 8 segment size 115 8 data length (of original plaintext) 7 123 36 offset table: 127 4 (9) signature 131 4 (10) share hash chain 135 4 (11) block hash tree 139 4 (12) share data 143 8 (13) EOF 8 151 256 verification key (2048bit DSA key) 9 407 40 signature=DSAsig(H([1,2,3,4,5,6])) 10 447 (a) share hash chain, encoded as: "".join([pack(">H32s", shnum, hash) for (shnum,hash) in needed_hashes]) 11 ?? (b) block hash tree, encoded as: "".join([pack(">32s",hash) for hash in block_hash_tree]) 12 ?? LEN share data 13 ?? -- EOF (a) The share hash chain contains ceil(log(N)) hashes, each 32 bytes long. This is the set of hashes necessary to validate this share's leaf in the share Merkle tree. For N=10, this is 4 hashes, i.e. 128 bytes. (b) The block hash tree contains ceil(length/segsize) hashes, each 32 bytes long. This is the set of hashes necessary to validate any given block of share data up to the per-share root "r". Each "r" is a leaf of the share has tree (with root "R"), from which a minimal subset of hashes is put in the share hash chain in (8). == TODO == Every node in a given tahoe grid must have the same common DSA moduli and exponent, but different grids could use different parameters. We haven't figured out how to define a "grid id" yet, but I think the DSA parameters should be part of that identifier. In practical terms, this might mean that the Introducer tells each node what parameters to use, or perhaps the node could have a config file which specifies them instead. The shares MUST have a ciphertext hash of some sort (probably a merkle tree over the blocks, and/or a flat hash of the ciphertext), just like immutable files do. Without this, a malicious publisher could produce some shares that result in file A, and other shares that result in file B, and upload both of them (incorporating both into the share hash tree). The result would be a read-cap that would sometimes resolve to file A, and sometimes to file B, depending upon which servers were used for the download. By including a ciphertext hash in the SDMF data structure, the publisher must commit to just a single ciphertext, closing this hole. See ticket #492 for more details. tahoe_lafs-1.20.0/docs/proposed/mutsemi.svg0000644000000000000000000022264013615410400015614 0ustar00 image/svg+xml signing (private) key semi-private key verifying (public) key read-write cap read-only cap verify cap shares Merkle Tree SHA256d SHA256d SHA256d FEC salt encryption key plaintext ciphertext SHA256dtruncated AES-CTR share 1 share 2 share 3 share 4 tahoe_lafs-1.20.0/docs/proposed/old-accounts-introducer.txt0000644000000000000000000001576513615410400020730 0ustar00This is a proposal for handing accounts and quotas in Tahoe. Nothing is final yet.. we are still evaluating the options. = Account Management: Introducer-based = A Tahoe grid can be configured in several different modes. The simplest mode (which is also the default) is completely permissive: all storage servers will accept shares from all clients, and no attempt is made to keep track of who is storing what. Access to the grid is mostly equivalent to having access to the Introducer (or convincing one of the existing members to give you a list of all their storage server FURLs). This mode, while a good starting point, does not accomodate any sort of auditing or quota management. Even in a small friendnet, operators might like to know how much of their storage space is being consumed by Alice, so they might be able to ask her to cut back when overall disk usage is getting to high. In a larger commercial deployment, a service provider needs to be able to get accurate usage numbers so they can bill the user appropriately. In addition, the operator may want the ability to delete all of Bob's shares (i.e. cancel any outstanding leases) when he terminates his account. There are several lease-management/garbage-collection/deletion strategies possible for a Tahoe grid, but the most efficient ones require knowledge of lease ownership, so that renewals and expiration can take place on a per-account basis rather than a (more numerous) per-share basis. == Accounts == To accomplish this, "Accounts" can be established in a Tahoe grid. There is nominally one account per human user of the grid, but of course a user might use multiple accounts, or an account might be shared between multiple users. The Account is the smallest unit of quota and lease management. Accounts are created by an "Account Manager". In a commercial network there will be just one (centralized) account manager, and all storage nodes will be configured to require a valid account before providing storage services. In a friendnet, each peer can run their own account manager, and servers will accept accounts from any of the managers (this mode is permissive but allows quota-tracking of non-malicious users). The account manager is free to manage the accounts as it pleases. Large systems will probably use a database to correlate things like username, storage consumed, billing status, etc. == Overview == The Account Manager ("AM") replaces the normal Introducer node: grids which use an Account Manager will not run an Introducer, and the participating nodes will not be configured with an "introducer.furl". Instead, each client will be configured with a different "account.furl", which gives that client access to a specific account. These account FURLs point to an object inside the Account Manager which exists solely for the benefit of that one account. When the client needs access to storage servers, it will use this account object to acquire personalized introductions to a per-account "Personal Storage Server" facet, one per storage server node. For example, Alice would wind up with PSS[1A] on server 1, and PSS[2A] on server 2. Bob would get PSS[1B] and PSS[2B]. These PSS facets provide the same remote methods as the old generic SS facet, except that every time they create a lease object, the account information of the holder is recorded in that lease. The client stores a list of these PSS facet FURLs in persistent storage, and uses them in the "get_permuted_peers" function that all uploads and downloads use to figure out who to talk to when looking for shares or shareholders. Each Storage Server has a private facet that it gives to the Account Manager. This facet allows the AM to create PSS facets for a specific account. In particular, the AM tells the SS "please create account number 42, and tell me the PSS FURL that I should give to the client". The SS creates an object which remembers the account number, creates a FURL for it, and returns the FURL. If there is a single central account manager, then account numbers can be small integers. (if there are multiple ones, they need to be large random strings to ensure uniqueness). To avoid requiring large (accounts*servers) lookup tables, a given account should use the same identifer for all the servers it talks to. When this can be done, the PSS and Account FURLs are generated as MAC'ed copies of the account number. More specifically, the PSS FURL is a MAC'ed copy of the account number: each SS has a private secret "S", and it creates a string "%d-%s" % (accountnum, b2a(hash(S+accountnum))) to use as the swissnum part of the FURL. The SS uses tub.registerNameLookupHandler to add a function that tries to validate inbound FURLs against this scheme: if successful, it creates a new PSS object with the account number stashed inside. This allows the server to minimize their per-user storage requirements but still insure that PSS FURLs are unguessable. Account FURLs are created by the Account Manager in a similar fashion, using a MAC of the account number. The Account Manager can use the same account number to index other information in a database, like account status, billing status, etc. The mechanism by which Account FURLs are minted is left up to the account manager, but the simple AM that the 'tahoe create-account-manager' command makes has a "new-account" FURL which accepts a username and creates an account for them. The 'tahoe create-account' command is a CLI frontend to this facility. In a friendnet, you could publish this FURL to your friends, allowing everyone to make their own account. In a commercial grid, this facility would be reserved use by the same code which handles billing. == Creating the Account Manager == The 'tahoe create-account-manager' command is used to create a simple account manager node. When started, this node will write several FURLs to its private/ directory, some of which should be provided to other services. * new-account.furl : this FURL allows the holder to create new accounts * manage-accounts.furl : this FURL allows the holder to list and modify all existing accounts * serverdesk.furl : this FURL is used by storage servers to make themselves available to all account holders == Configuring the Storage Servers == To use an account manager, each storage server node should be given access to the AM's serverdesk (by simply copying "serverdesk.furl" into the storage server's base directory). In addition, it should *not* be given an introducer.furl . The serverdesk FURL tells the SS that it should allow the AM to create PSS facets for each account, and the lack of an introducer FURL tells the SS to not make its generic SS facet available to anyone. The combination means that clients must acquire PSS facets instead of using the generic one. == Configuring Clients == Each client should be configured to use a specific account by copying their account FURL into their basedir, in a file named "account.furl". In addition, these client nodes should *not* have an "introducer.furl". This combination tells the client to ask the AM for ... tahoe_lafs-1.20.0/docs/proposed/old-accounts-pubkey.txt0000644000000000000000000010361513615410400020041 0ustar00This is a proposal for handing accounts and quotas in Tahoe. Nothing is final yet.. we are still evaluating the options. = Accounts = The basic Tahoe account is defined by a DSA key pair. The holder of the private key has the ability to consume storage in conjunction with a specific account number. The Account Server has a long-term keypair. Valid accounts are marked as such by the Account Server's signature on a "membership card", which binds a specific pubkey to an account number and declares that this pair is a valid account. Each Storage Server which participates in the AS's domain will have the AS's pubkey in its list of valid AS keys, and will thus accept membership cards that were signed by that AS. If the SS accepts multiple ASs, then it will give each a distinct number, and leases will be labled with an (AS#,Account#) pair. If there is only one AS, then leases will be labeled with just the Account#. Each client node is given the FURL of their personal Account object. The Account will accept a DSA public key and return a signed membership card that authorizes the corresponding private key to consume storage on behalf of the account. The client will create its own DSA keypair the first time it connects to the Account, and will then use the resulting membership card for all subsequent storage operations. == Storage Server Goals == The Storage Server cares about two things: 1: maintaining an accurate refcount on each bucket, so it can delete the bucket when the refcount goes to zero 2: being able to answer questions about aggregate usage per account The SS conceptually maintains a big matrix of lease information: one column per account, one row per storage index. The cells contain a boolean (has-lease or no-lease). If the grid uses per-lease timers, then each has-lease cell also contains a lease timer. This matrix may be stored in a variety of ways: entries in each share file, or items in a SQL database, according to the desired tradeoff between complexity, robustness, read speed, and write speed. Each client (by virtue of their knowledge of an authorized private key) gets to manipulate their column of this matrix in any way they like: add lease, renew lease, delete lease. (TODO: for reconcilliation purposes, the should also be able to enumerate leases). == Storage Operations == Side-effect-causing storage operations come in three forms: 1: allocate bucket / add lease to existing bucket arguments: storage_index=, storage_server=, ueb_hash=, account= 2: renew lease arguments: storage_index=, storage_server=, account= 3: cancel lease arguments: storage_index=, storage_server=, account= (where lease renewal is only relevant for grids which use per-lease timers). Clients do add-lease when they upload a file, and cancel-lease when they remove their last reference to it. Storage Servers publish a "public storage port" through the introducer, which does not actually enable storage operations, but is instead used in a rights-amplification pattern to grant authorized parties access to a "personal storage server facet". This personal facet is the one that implements allocate_bucket. All clients get access to the same public storage port, which means that we can improve the introduction mechanism later (to use a gossip-based protocol) without affecting the authority-granting protocols. The public storage port accepts signed messages asking for storage authority. It responds by creating a personal facet and making it available to the requester. The account number is curried into the facet, so that all lease-creating operations will record this account number into the lease. By restricting the nature of the personal facets that a client can access, we restrict them to using their designated account number. ======================================== There are two kinds of signed messages: use (other names: connection, FURLification, activation, reification, grounding, specific-making, ?), and delegation. The FURLification message results in a FURL that points to an object which can actually accept RIStorageServer methods. The delegation message results in a new signed message. The furlification message looks like: (pubkey, signed(serialized({limitations}, beneficiary_furl))) The delegation message looks like: (pubkey, signed(serialized({limitations}, delegate_pubkey))) The limitations dict indicates what the resulting connection or delegation can be used for. All limitations for the cert chain are applied, and the result must be restricted to their overall minimum. The following limitation keys are defined: 'account': a number. All resulting leases must be tagged with this account number. A chain with multiple distinct 'account' limitations is an error (the result will not permit leases) 'SI': a storage index (binary string). Leases may only be created for this specific storage index, no other. 'serverid': a peerid (binary string). Leases may only be created on the storage server identified by this serverid. 'UEB_hash': (binary string): Leases may only be created for shares which contain a matching UEB_hash. Note: this limitation is a nuisance to implement correctly: it requires that the storage server parse the share and verify all hashes. 'before': a timestamp (seconds since epoch). All leases must be made before this time. In addition, all liverefs and FURLs must expire and cease working at this time. 'server_size': a number, measuring share size (in bytes). A storage server which sees this message should keep track of how much storage space has been consumed using this liveref/FURL, and throw an exception when receiving a lease request that would bring this total above 'server_size'. Note: this limitation is a nuisance to implement (it works best if 'before' is used and provides a short lifetime). Actually, let's merge the two, and put the type in the limitations dict. 'furl_to' and 'delegate_key' are mutually exclusive. 'furl_to': (string): Used only on furlification messages. This requests the recipient to create an object which implements the given access, then send a FURL which references this object to an RIFURLReceiver.furl() call at the given 'furl_to' FURL. To reduce the number of extra roundtrips, both foolscap calls include an extra (ignored) argument that will carry the object being referenced by the FURL, used to pre-load the recipient's foolscap table. In addition, the response message will contain a nonce, to allow the same beneficiary to be used for multiple messages: def process(limitations, nonce, ignored): facet = create_storage_facet(limitations) facet_furl = tub.registerReference(facet) d = tub.getReference(limitations['furl_to']) d.addCallback(lambda rref: rref.furl(facet_furl, nonce, facet)) The server must always send the facet/facet_furl to the furl_to beneficiary, and never to the 'ignored' argument (even though for well-behaved clients these will both refer to the same target). This is to prevent a rogue server from echoing a client's signed message to some other server, to try to steal the client's authority. The facet_furl should be persistent, so to reduce storage space, facet_furl should contain an HMAC'ed list of all limitations, and create_storage_facet() should be deferred until the client actually tries to use the furl. This leads to 150-200 byte base32 swissnums. 'delegate_key': (binary string, a DSA pubkey). Used only on delegation messages. This requests all observers to accept messages signed by the given public key and to apply the associated limitations. I also want to keep the message size small, so I'm going to define a custom netstring-based encoding format for it (JSON expands binary data by about 3.5x). Each dict entry will be encoded as netstring(key)+netstring(value). The container is responsible for providing the size of this serialized structure. The actual message will then look like: def make_message(privkey, limitations): message_to_sign = "".join([ netstring(k) + netstring(v) for k,v in limitations ]) signature = privkey.sign(message_to_sign) pubkey = privkey.get_public_key() msg = netstring(message_to_sign) + netstring(signature) + netstring(pubkey) return msg The deserialization code MUST throw an exception if the same limitations key appears twice, to ensure that everybody interprets the dict the same way. These messages are passed over foolscap connections as a single string. They are also saved to disk in this format. Code should only store them in a deserialized form if the signature has been verified, the cert chain verified, and the limitations accumulated. The membership card is just the following: membership_card = make_message(account_server_privkey, {'account': account_number, 'before': time.time() + 1*MONTH, 'delegate_key': client_pubkey}) This card is provided on demand by the given user's Account facet, for whatever pubkey they submit. When a client learns about a new storage server, they create a new receiver object (and stash the peerid in it), and submit the following message to the RIStorageServerWelcome.get_personal_facet() method: class Receiver(foolscap.Referenceable): def remote_furl(self, facet_furl, nonce, ignored_facet): self.stash = facet_furl receiver = Receiver() nonce = make_nonce() mymsg = make_message(client_privkey, {'furl_to': receiver_furl}) send([membership_card, mymsg], nonce, receiver) Note that the receiver_furl will probably not have a routeable address, but this won't matter because the client is already attached, so foolscap can use the existing connection. The receiver should use facet_furl in preference to ignored_facet for consistency, but (unlike the server's use of receiver_furl) there is no security risk in using ignored_facet (since both are coming from the same source). The server will validate the cert chain (see below) and wind up with a complete list of limitations that are to be applied to the facet it will provide to the caller. This list must combine limitations from the entire chain: in particular it must enforce the account= limitation from the membership card. The server will then serialize this limitation dict into a string, compute a fixed-size HMAC code using a server-private secret, then base32 encode the (hmac+limitstring) value (and prepend a "0-" version indicator). The resulting string is used as the swissnum portion of the FURL that is sent to the furl_to target. Later, when the client tries to dereference this FURL, a Tub.registerNameLookupHandler hook will notice the attempt, claim the "0-" namespace, base32decode the string, check the HMAC, decode the limitation dict, then create and return an RIStorageServer facet with these limitations. The client should cache the (peerid, FURL) mapping in persistent storage. Later, when it learns about this storage server again, it will use the cached FURL instead of signing another message. If the getReference or the storage operation fails with StorageAuthorityExpiredError, the cache entry should be removed and the client should sign a new message to obtain a new one. (security note: an evil storage server can take 'mymsg' and present it to someone else, but other servers will only send the resulting authority to the client's receiver_furl, so the evil server cannot benefit from this. The receiver object has the serverid curried into it, so the evil server can only affect the client's mapping for this one serverid, not anything else, so the server cannot hurt the client in any way other than denying service to itself. It might be a good idea to include serverid= in the message, but it isn't clear that it really helps anything). When the client wants to use a Helper, it needs to delegate some amount of storage authority to the helper. The first phase has the client send the storage index to the helper, so it can query servers and decide whether the file needs to be uploaded or not. If it decides yes, the Helper creates a new Uploader object and a receiver object, and sends the Uploader liveref and the receiver FURL to the client. The client then creates a message for the helper to use: helper_msg = make_message(client_privkey, {'furl_to': helper_rx_furl, 'SI': storage_index, 'before': time.time() + 1*DAY, #? 'server_size': filesize/k+overhead, }) The client then sends (membership_card, helper_msg) to the helper. The Helper sends (membership_card, helper_msg) to each storage server that it needs to use for the upload. This gives the Helper access to a limited facet on each storage server. This facet gives the helper the authority to upload data for a specific storage index, for a limited time, using leases that are tagged by the user's account number. The helper cannot use the client's storage authority for any other file. The size limit prevents the helper from storing some other (larger) file of its own using this authority. The time restriction allows the storage servers to expire their 'server_size' table entry quickly, and prevents the helper from hanging on to the storage authority indefinitely. The Helper only gets one furl_to target, which must be used for multiple SS peerids. The helper's receiver must parse the FURL that gets returned to determine which server is which. [problems: an evil server could deliver a bogus FURL which points to a different server. The Helper might reject the real server's good FURL as a duplicate. This allows an evil server to block access to a good server. Queries could be sent sequentially, which would partially mitigate this problem (an evil server could send multiple requests). Better: if the cert-chain send message could include a nonce, which is supposed to be returned with the FURL, then the helper could use this to correlate sends and receives.] === repair caps === There are three basic approaches to provide a Repairer with the storage authority that it needs. The first is to give the Repairer complete authority: allow it to place leases for whatever account number it wishes. This is simple and requires the least overhead, but of course it give the Repairer the ability to abuse everyone's quota. The second is to give the Repairer no user authority: instead, give the repairer its own account, and build it to keep track of which leases it is holding on behalf of one of its customers. This repairer will slowly accumulate quota space over time, as it creates new shares to replace ones that have decayed. Eventually, when the client comes back online, the client should establish its own leases on these new shares and allow the repairer to cancel its temporary ones. The third approach is in between the other two: give the repairer some limited authority over the customer's account, but not enough to let it consume the user's whole quota. To create the storage-authority portion of a (one-month) repair-cap, the client creates a new DSA keypair (repair_privkey, repair_pubkey), and then creates a signed message and bundles it into the repaircap: repair_msg = make_message(client_privkey, {'delegate_key': repair_pubkey, 'SI': storage_index, 'UEB_hash': file_ueb_hash}) repair_cap = (verify_cap, repair_privkey, (membership_card, repair_msg)) This gives the holder of the repair cap a time-limited authority to upload shares for the given storage index which contain the given data. This prohibits the repair-cap from being used to upload or repair any other file. When the repairer needs to upload a new share, it will use the delegated key to create its own signed message: upload_msg = make_message(repair_privkey, {'furl_to': repairer_rx_furl}) send(membership_card, repair_msg, upload_msg) The biggest problem with the low-authority approaches is the expiration time of the membership card, which limits the duration for which the repair-cap authority is valid. It would be nice if repair-caps could last a long time, years perhaps, so that clients can be offline for a similar period of time. However to retain a reasonable revocation interval for users, the membership card's before= timeout needs to be closer to a month. [it might be reasonable to use some sort of rights-amplification: the repairer has a special cert which allows it to remove the before= value from a chain]. === chain verification === The server will create a chain that starts with the AS's certificate: an unsigned message which derives its authority from being manually placed in the SS's configdir. The only limitation in the AS certificate will be on some kind of meta-account, in case we want to use multiple account servers and allow their account numbers to live in distinct number spaces (think sub-accounts or business partners to buy storage in bulk and resell it to users). The rest of the chain comes directly from what the client sent. The server walks the chain, keeping an accumulated limitations dictionary along the way. At each step it knows the pubkey that was delegated by the previous step. == client config == Clients are configured with an Account FURL that points to a private facet on the Account Server. The client generates a private key at startup. It sends the pubkey to the AS facet, which will return a signed delegate_key message (the "membership card") that grants the client's privkey any storage authority it wishes (as long as the account number is set to a specific value). The client stores this membership card in private/membership.cert . RIStorageServer messages will accept an optional account= argument. If left unspecified, the value is taken from the limitations that were curried into the SS facet. In all cases, the value used must meet those limitations. The value must not be None: Helpers/Repairers or other super-powered storage clients are obligated to specify an account number. == server config == Storage servers are configured with an unsigned root authority message. This is like the output of make_message(account_server_privkey, {}) but has empty 'signature' and 'pubkey' strings. This root goes into NODEDIR/storage_authority_root.cert . It is prepended to all chains that arrive. [if/when we accept multiple authorities, storage_authority_root.cert will turn into a storage_authority_root/ directory with *.cert files, and each arriving chain will cause a search through these root certs for a matching pubkey. The empty limitations will be replaced by {domain=X}, which is used as a sort of meta-account.. the details depend upon whether we express account numbers as an int (with various ranges) or as a tuple] The root authority message is published by the Account Server through its web interface, and also into a local file: NODEDIR/storage_authority_root.cert . The admin of the storage server is responsible for copying this file into place, thus enabling clients to use storage services. ---------------------------------------- -- Text beyond this point is out-of-date, and exists purely for background -- Each storage server offers a "public storage port", which only accepts signed messages. The Introducer mechanism exists to give clients a reference to a set of these public storage ports. All clients get access to the same ports. If clients did all their work themselves, these public storage ports would be enough, and no further code would be necessary (all storage requests would we signed the same way). Fundamentally, each storage request must be signed by the account's private key, giving the SS an authenticated Account Number to go with the request. This is used to index the correct cell in the lease matrix. The holder of the account privkey is allowed to manipulate their column of the matrix in any way they like: add leases, renew leases, delete leases. (TODO: for reconcilliation purposes, they should also be able to enumerate leases). The storage request is sent in the form of a signed request message, accompanied by the membership card. For example: req = SIGN("allocate SI=123 SSID=abc", accountprivkey) , membership_card -> RemoteBucketWriter reference Upon receipt of this request, the storage server will return a reference to a RemoteBucketWriter object, which the client can use to fill and close the bucket. The SS must perform two DSA signature verifications before accepting this request. The first is to validate the membership card: the Account Server's pubkey is used to verify the membership card's signature, from which an account pubkey and account# is extracted. The second is to validate the request: the account pubkey is used to verify the request signature. If both are valid, the full request (with account# and storage index) is delivered to the internal StorageServer object. Note that the signed request message includes the Storage Server's node ID, to prevent this storage server from taking the signed message and echoing to other storage servers. Each SS will ignore any request that is not addressed to the right SSID. Also note that the SI= and SSID= fields may contain wildcards, if the signing client so chooses. == Caching Signature Verification == We add some complexity to this simple model to achieve two goals: to enable fine-grained delegation of storage capabilities (specifically for renewers and repairers), and to reduce the number of public-key crypto operations that must be performed. The first enhancement is to allow the SS to cache the results of the verification step. To do this, the client creates a signed message which asks the SS to return a FURL of an object which can be used to execute further operations *without* a DSA signature. The FURL is expected to contain a MAC'ed string that contains the account# and the argument restrictions, effectively currying a subset of arguments into the RemoteReference. Clients which do all their operations themselves would use this to obtain a private storage port for each public storage port, stashing the FURLs in a local table, and then later storage operations would be done to those FURLs instead of creating signed requests. For example: req = SIGN("FURL(allocate SI=* SSID=abc)", accountprivkey), membership_card -> FURL Tub.getReference(FURL).allocate(SI=123) -> RemoteBucketWriter reference == Renewers and Repairers A brief digression is in order, to motivate the other enhancement. The "manifest" is a list of caps, one for each node that is reachable from the user's root directory/directories. The client is expected to generate the manifest on a periodic basis (perhaps once a day), and to keep track of which files/dirnodes have been added and removed. Items which have been removed must be explicitly dereferenced to reclaim their storage space. For grids which use per-file lease timers, the manifest is used to drive the Renewer: a process which renews the lease timers on a periodic basis (perhaps once a week). The manifest can also be used to drive a Checker, which in turn feeds work into the Repairer. The manifest should contain the minimum necessary authority to do its job, which generally means it contains the "verify cap" for each node. For immutable files, the verify cap contains the storage index and the UEB hash: enough information to retrieve and validate the ciphertext but not enough to decrypt it. For mutable files, the verify cap contains the storage index and the pubkey hash, which also serves to retrieve and validate ciphertext but not decrypt it. If the client does its own Renewing and Repairing, then a verifycap-based manifest is sufficient. However, if the user wants to be able to turn their computer off for a few months and still keep their files around, they need to delegate this job off to some other willing node. In a commercial network, there will be centralized (and perhaps trusted) Renewer/Repairer nodes, but in a friendnet these may not be available, and the user will depend upon one of their friends being willing to run this service for them while they are away. In either of these cases, the verifycaps are not enough: the Renewer will need additional authority to renew the client's leases, and the Repairer will need the authority to create new shares (in the client's name) when necessary. A trusted central service could be given all-account superpowers, allowing it to exercise storage authority on behalf of all users as it pleases. If this is the case, the verifycaps are sufficient. But if we desire to grant less authority to the Renewer/Repairer, then we need a mechanism to attenuate this authority. The usual objcap approach is to create a proxy: an intermediate object which itself is given full authority, but which is unwilling to exercise more than a portion of that authority in response to incoming requests. The not-fully-trusted service is then only given access to the proxy, not the final authority. For example: class Proxy(RemoteReference): def __init__(self, original, storage_index): self.original = original self.storage_index = storage_index def remote_renew_leases(self): return self.original.renew_leases(self.storage_index) renewer.grant(Proxy(target, "abcd")) But this approach interposes the proxy in the calling chain, requiring the machine which hosts the proxy to be available and on-line at all times, which runs opposite to our use case (turning the client off for a month). == Creating Attenuated Authorities == The other enhancement is to use more public-key operations to allow the delegation of reduced authority to external helper services. Specifically, we want to give then Renewer the ability to renew leases for a specific file, rather than giving it lease-renewal power for all files. Likewise, the Repairer should have the ability to create new shares, but only for the file that is being repaired, not for unrelated files. If we do not mind giving the storage servers the ability to replay their inbound message to other storage servers, then the client can simply generate a signed message with a wildcard SSID= argument and leave it in the care of the Renewer or Repairer. For example, the Renewer would get: SIGN("renew-lease SI=123 SSID=*", accountprivkey), membership_card Then, when the Renewer needed to renew a lease, it would deliver this signed request message to the storage server. The SS would verify the signatures just as if the message came from the original client, find them good, and perform the desired operation. With this approach, the manifest that is delivered to the remote Renewer process needs to include a signed lease-renewal request for each file: we use the term "renew-cap" for this combined (verifycap + signed lease-renewal request) message. Likewise the "repair-cap" would be the verifycap plus a signed allocate-bucket message. A renew-cap manifest would be enough for a remote Renewer to do its job, a repair-cap manifest would provide a remote Repairer with enough authority, and a cancel-cap manifest would be used for a remote Canceller (used, e.g., to make sure that file has been dereferenced even if the client does not stick around long enough to track down and inform all of the storage servers involved). The only concern is that the SS could also take this exact same renew-lease message and deliver it to other storage servers. This wouldn't cause a concern for mere lease renewal, but the allocate-share message might be a bit less comfortable (you might not want to grant the first storage server the ability to claim space in your name on all other storage servers). Ideally we'd like to send a different message to each storage server, each narrowed in scope to a single SSID, since then none of these messages would be useful on any other SS. If the client knew the identities of all the storage servers in the system ahead of time, it might create a whole slew of signed messages, but a) this is a lot of signatures, only a fraction of which will ever actually be used, and b) new servers might be introduced after the manifest is created, particularly if we're talking about repair-caps instead of renewal-caps. The Renewer can't generate these one-per-SSID messages from the SSID=* message, because it doesn't have a privkey to make the correct signatures. So without some other mechanism, we're stuck with these relatively coarse authorities. If we want to limit this sort of authority, then we need to introduce a new method. The client begins by generating a new DSA keypair. Then it signs a message that declares the new pubkey to be valid for a specific subset of storage operations (such as "renew-lease SI=123 SSID=*"). Then it delivers the new privkey, the declaration message, and the membership card to the Renewer. The renewer uses the new privkey to sign its own one-per-SSID request message for each server, then sends the (signed request, declaration, membership card) triple to the server. The server needs to perform three verification checks per message: first the membership card, then the declaration message, then the actual request message. == Other Enhancements == If a given authority is likely to be used multiple times, the same give-me-a-FURL trick can be used to cut down on the number of public key operations that must be performed. This is trickier with the per-SI messages. When storing the manifest, things like the membership card should be amortized across a set of common entries. An isolated renew-cap needs to contain the verifycap, the signed renewal request, and the membership card. But a manifest with a thousand entries should only include one copy of the membership card. It might be sensible to define a signed renewal request that grants authority for a set of storage indicies, so that the signature can be shared among several entries (to save space and perhaps processing time). The request could include a Bloom filter of authorized SI values: when the request is actually sent to the server, the renewer would add a list of actual SI values to renew, and the server would accept all that are contained in the filter. == Revocation == The lifetime of the storage authority included in the manifest's renew-caps or repair-caps will determine the lifetime of those caps. In particular, if we implement account revocation by using time-limited membership cards (requiring the client to get a new card once a month), then the repair-caps won't work for more than a month, which kind of defeats the purpose. A related issue is the FURL-shortcut: the MAC'ed message needs to include a validity period of some sort, and if the client tries to use a old FURL they should get an error message that will prompt them to try and acquire a newer one. ------------------------------ The client can produce a repair-cap manifest for a specific Repairer's pubkey, so it can produce a signed message that includes the pubkey (instead of needing to generate a new privkey just for this purpose). The result is not a capability, since it can only be used by the holder of the corresponding privkey. So the generic form of the storage operation message is the request (which has all the argument values filled in), followed by a chain of authorizations. The first authorization must be signed by the Account Server's key. Each authorization must be signed by the key mentioned in the previous one. Each one adds a new limitation on the power of the following ones. The actual request is bounded by all the limitations of the chain. The membership card is an authorization that simply limits the account number that can be used: "op=* SI=* SSID=* account=4 signed-by=CLIENT-PUBKEY". So a repair manifest created for a Repairer with pubkey ABCD could consist of a list of verifycaps plus a single authorization (using a Bloom filter to identify the SIs that were allowed): SIGN("allocate SI=[bloom] SSID=* signed-by=ABCD") If/when the Repairer needed to allocate a share, it would use its own privkey to sign an additional message and send the whole list to the SS: request=allocate SI=1234 SSID=EEFS account=4 shnum=2 SIGN("allocate SI=1234 SSID=EEFS", ABCD) SIGN("allocate SI=[bloom] SSID=* signed-by=ABCD", clientkey) membership: SIGN("op=* SI=* SSID=* account=4 signed-by=clientkey", ASkey) [implicit]: ASkey ---------------------------------------- Things would be a lot simpler if the Repairer (actually the Re-Leaser) had everybody's account authority. One simplifying approach: the Repairer/Re-Leaser has its own account, and the shares it creates are leased under that account number. The R/R keeps track of which leases it has created for whom. When the client eventually comes back online, it is told to perform a re-leasing run, and after that occurs the R/R can cancel its own temporary leases. This would effectively transfer storage quota from the original client to the R/R over time (as shares are regenerated by the R/R while the client remains offline). If the R/R is centrally managed, the quota mechanism can sum the R/R's numbers with the SS's numbers when determining how much storage is consumed by any given account. Not quite as clean as storing the exact information in the SS's lease tables directly, but: * the R/R no longer needs any special account authority (it merely needs an accurate account number, which can be supplied by giving the client a specific facet that is bound to that account number) * the verify-cap manifest is sufficient to perform repair * no extra DSA keys are necessary * account authority could be implemented with either DSA keys or personal SS facets: i.e. we don't need the delegability aspects of DSA keys for use by the repair mechanism (we might still want them to simplify introduction). I *think* this would eliminate all that complexity of chained authorization messages. tahoe_lafs-1.20.0/docs/specifications/CHK-hashes.svg0000644000000000000000000011626413615410400017163 0ustar00 image/svg+xml data(plaintext) data(crypttext) shares plaintexthash tree crypttexthash tree sharehash tree URI Extension Block plaintext root plaintext (flat) hash crypttext root crypttext (flat) hash share root URI encryptionkey storageindex UEBhash AES FEC A B : B is derived from A by hashing, therefore B validates A A B : B is derived from A by encryption or erasure coding A B : A is used as an index to retrieve data B SHARE CHK File Hashes plaintext hashes removedsee #453 tahoe_lafs-1.20.0/docs/specifications/Makefile0000644000000000000000000000070413615410400016213 0ustar00SOURCES = CHK-hashes.svg file-encoding1.svg file-encoding2.svg \ file-encoding3.svg file-encoding4.svg file-encoding5.svg \ file-encoding6.svg PNGS = $(patsubst %.svg,%.png,$(SOURCES)) EPSS = $(patsubst %.svg,%.eps,$(SOURCES)) .PHONY: images-png images-eps all: $(PNGS) $(EPSS) images-png: $(PNGS) images-eps: $(EPSS) %.png: %.svg inkscape -b white -d 90 -D --export-png $@ $< %.eps: %.svg inkscape --export-eps $@ $< clean: rm -f *.png *.eps tahoe_lafs-1.20.0/docs/specifications/URI-extension.rst0000644000000000000000000000425513615410400017763 0ustar00.. -*- coding: utf-8-with-signature -*- =================== URI Extension Block =================== This block is a serialized dictionary with string keys and string values (some of which represent numbers, some of which are SHA-256 hashes). All buckets hold an identical copy. The hash of the serialized data is kept in the URI. The download process must obtain a valid copy of this data before any decoding can take place. The download process must also obtain other data before incremental validation can be performed. Full-file validation (for clients who do not wish to do incremental validation) can be performed solely with the data from this block. At the moment, this data block contains the following keys (and an estimate on their sizes):: size 5 segment_size 7 num_segments 2 needed_shares 2 total_shares 3 codec_name 3 codec_params 5+1+2+1+3=12 tail_codec_params 12 share_root_hash 32 (binary) or 52 (base32-encoded) each plaintext_hash plaintext_root_hash crypttext_hash crypttext_root_hash Some pieces are needed elsewhere (size should be visible without pulling the block, the Tahoe3 algorithm needs total_shares to find the right peers, all peer selection algorithms need needed_shares to ask a minimal set of peers). Some pieces are arguably redundant but are convenient to have present (test_encode.py makes use of num_segments). The rule for this data block is that it should be a constant size for all files, regardless of file size. Therefore hash trees (which have a size that depends linearly upon the number of segments) are stored elsewhere in the bucket, with only the hash tree root stored in this data block. This block will be serialized as follows:: assert that all keys match ^[a-zA-z_\-]+$ sort all the keys lexicographically for k in keys: write("%s:" % k) write(netstring(data[k])) Serialized size:: dense binary (but decimal) packing: 160+46=206 including 'key:' (185) and netstring (6*3+7*4=46) on values: 231 including 'key:%d\n' (185+13=198) and printable values (46+5*52=306)=504 We'll go with the 231-sized block, and provide a tool to dump it as text if we really want one. tahoe_lafs-1.20.0/docs/specifications/derive_renewal_secret.py0000644000000000000000000000621513615410400021470 0ustar00 """ This is a reference implementation of the lease renewal secret derivation protocol in use by Tahoe-LAFS clients as of 1.16.0. """ from allmydata.util.base32 import ( a2b as b32decode, b2a as b32encode, ) from allmydata.util.hashutil import ( tagged_hash, tagged_pair_hash, ) def derive_renewal_secret(lease_secret: bytes, storage_index: bytes, tubid: bytes) -> bytes: assert len(lease_secret) == 32 assert len(storage_index) == 16 assert len(tubid) == 20 bucket_renewal_tag = b"allmydata_bucket_renewal_secret_v1" file_renewal_tag = b"allmydata_file_renewal_secret_v1" client_renewal_tag = b"allmydata_client_renewal_secret_v1" client_renewal_secret = tagged_hash(lease_secret, client_renewal_tag) file_renewal_secret = tagged_pair_hash( file_renewal_tag, client_renewal_secret, storage_index, ) peer_id = tubid return tagged_pair_hash(bucket_renewal_tag, file_renewal_secret, peer_id) def demo(): secret = b32encode(derive_renewal_secret( b"lease secretxxxxxxxxxxxxxxxxxxxx", b"storage indexxxx", b"tub idxxxxxxxxxxxxxx", )).decode("ascii") print("An example renewal secret: {}".format(secret)) def test(): # These test vectors created by intrumenting Tahoe-LAFS # bb57fcfb50d4e01bbc4de2e23dbbf7a60c004031 to emit `self.renew_secret` in # allmydata.immutable.upload.ServerTracker.query and then uploading a # couple files to a couple different storage servers. test_vector = [ dict(lease_secret=b"boity2cdh7jvl3ltaeebuiobbspjmbuopnwbde2yeh4k6x7jioga", storage_index=b"vrttmwlicrzbt7gh5qsooogr7u", tubid=b"v67jiisoty6ooyxlql5fuucitqiok2ic", expected=b"osd6wmc5vz4g3ukg64sitmzlfiaaordutrez7oxdp5kkze7zp5zq", ), dict(lease_secret=b"boity2cdh7jvl3ltaeebuiobbspjmbuopnwbde2yeh4k6x7jioga", storage_index=b"75gmmfts772ww4beiewc234o5e", tubid=b"v67jiisoty6ooyxlql5fuucitqiok2ic", expected=b"35itmusj7qm2pfimh62snbyxp3imreofhx4djr7i2fweta75szda", ), dict(lease_secret=b"boity2cdh7jvl3ltaeebuiobbspjmbuopnwbde2yeh4k6x7jioga", storage_index=b"75gmmfts772ww4beiewc234o5e", tubid=b"lh5fhobkjrmkqjmkxhy3yaonoociggpz", expected=b"srrlruge47ws3lm53vgdxprgqb6bz7cdblnuovdgtfkqrygrjm4q", ), dict(lease_secret=b"vacviff4xfqxsbp64tdr3frg3xnkcsuwt5jpyat2qxcm44bwu75a", storage_index=b"75gmmfts772ww4beiewc234o5e", tubid=b"lh5fhobkjrmkqjmkxhy3yaonoociggpz", expected=b"b4jledjiqjqekbm2erekzqumqzblegxi23i5ojva7g7xmqqnl5pq", ), ] for n, item in enumerate(test_vector): derived = b32encode(derive_renewal_secret( b32decode(item["lease_secret"]), b32decode(item["storage_index"]), b32decode(item["tubid"]), )) assert derived == item["expected"] , \ "Test vector {} failed: {} (expected) != {} (derived)".format( n, item["expected"], derived, ) print("{} test vectors validated".format(len(test_vector))) test() demo() tahoe_lafs-1.20.0/docs/specifications/dirnodes.rst0000644000000000000000000005623413615410400017125 0ustar00.. -*- coding: utf-8-with-signature -*- ========================== Tahoe-LAFS Directory Nodes ========================== As explained in the architecture docs, Tahoe-LAFS can be roughly viewed as a collection of three layers. The lowest layer is the key-value store: it provides operations that accept files and upload them to the grid, creating a URI in the process which securely references the file's contents. The middle layer is the file store, creating a structure of directories and filenames resembling the traditional Unix or Windows filesystems. The top layer is the application layer, which uses the lower layers to provide useful services to users, like a backup application, or a way to share files with friends. This document examines the middle layer, the "file store". 1. `Key-value Store Primitives`_ 2. `File Store Goals`_ 3. `Dirnode Goals`_ 4. `Dirnode secret values`_ 5. `Dirnode storage format`_ 6. `Dirnode sizes, mutable-file initial read sizes`_ 7. `Design Goals, redux`_ 1. `Confidentiality leaks in the storage servers`_ 2. `Integrity failures in the storage servers`_ 3. `Improving the efficiency of dirnodes`_ 4. `Dirnode expiration and leases`_ 8. `Starting Points: root dirnodes`_ 9. `Mounting and Sharing Directories`_ 10. `Revocation`_ Key-value Store Primitives ========================== In the lowest layer (key-value store), there are two operations that reference immutable data (which we refer to as "CHK URIs" or "CHK read-capabilities" or "CHK read-caps"). One puts data into the grid (but only if it doesn't exist already), the other retrieves it:: chk_uri = put(data) data = get(chk_uri) We also have three operations which reference mutable data (which we refer to as "mutable slots", or "mutable write-caps and read-caps", or sometimes "SSK slots"). One creates a slot with some initial contents, a second replaces the contents of a pre-existing slot, and the third retrieves the contents:: mutable_uri = create(initial_data) replace(mutable_uri, new_data) data = get(mutable_uri) File Store Goals ================ The main goal for the middle (file store) layer is to give users a way to organize the data that they have uploaded into the grid. The traditional way to do this in computer filesystems is to put this data into files, give those files names, and collect these names into directories. Each directory is a set of name-entry pairs, each of which maps a "child name" to a directory entry pointing to an object of some kind. Those child objects might be files, or they might be other directories. Each directory entry also contains metadata. The directory structure is therefore a directed graph of nodes, in which each node might be a directory node or a file node. All file nodes are terminal nodes. Dirnode Goals ============= What properties might be desirable for these directory nodes? In no particular order: 1. functional. Code which does not work doesn't count. 2. easy to document, explain, and understand 3. confidential: it should not be possible for others to see the contents of a directory 4. integrity: it should not be possible for others to modify the contents of a directory 5. available: directories should survive host failure, just like files do 6. efficient: in storage, communication bandwidth, number of round-trips 7. easy to delegate individual directories in a flexible way 8. updateness: everybody looking at a directory should see the same contents 9. monotonicity: everybody looking at a directory should see the same sequence of updates Some of these goals are mutually exclusive. For example, availability and consistency are opposing, so it is not possible to achieve #5 and #8 at the same time. Moreover, it takes a more complex architecture to get close to the available-and-consistent ideal, so #2/#6 is in opposition to #5/#8. Tahoe-LAFS v0.7.0 introduced distributed mutable files, which use public-key cryptography for integrity, and erasure coding for availability. These achieve roughly the same properties as immutable CHK files, but their contents can be replaced without changing their identity. Dirnodes are then just a special way of interpreting the contents of a specific mutable file. Earlier releases used a "vdrive server": this server was abolished in the v0.7.0 release. For details of how mutable files work, please see :doc:`mutable`. For releases since v0.7.0, we achieve most of our desired properties. The integrity and availability of dirnodes is equivalent to that of regular (immutable) files, with the exception that there are more simultaneous-update failure modes for mutable slots. Delegation is quite strong: you can give read-write or read-only access to any subtree, and the data format used for dirnodes is such that read-only access is transitive: i.e. if you grant Bob read-only access to a parent directory, then Bob will get read-only access (and *not* read-write access) to its children. Relative to the previous "vdrive server"-based scheme, the current distributed dirnode approach gives better availability, but cannot guarantee updateness quite as well, and requires far more network traffic for each retrieval and update. Mutable files are somewhat less available than immutable files, simply because of the increased number of combinations (shares of an immutable file are either present or not, whereas there are multiple versions of each mutable file, and you might have some shares of version 1 and other shares of version 2). In extreme cases of simultaneous update, mutable files might suffer from non-monotonicity. Dirnode secret values ===================== As mentioned before, dirnodes are simply a special way to interpret the contents of a mutable file, so the secret keys and capability strings described in :doc:`mutable` are all the same. Each dirnode contains an RSA public/private keypair, and the holder of the "write capability" will be able to retrieve the private key (as well as the AES encryption key used for the data itself). The holder of the "read capability" will be able to obtain the public key and the AES data key, but not the RSA private key needed to modify the data. The "write capability" for a dirnode grants read-write access to its contents. This is expressed on concrete form as the "dirnode write cap": a printable string which contains the necessary secrets to grant this access. Likewise, the "read capability" grants read-only access to a dirnode, and can be represented by a "dirnode read cap" string. For example, URI:DIR2:swdi8ge1s7qko45d3ckkyw1aac%3Aar8r5j99a4mezdojejmsfp4fj1zeky9gjigyrid4urxdimego68o is a write-capability URI, while URI:DIR2-RO:buxjqykt637u61nnmjg7s8zkny:ar8r5j99a4mezdojejmsfp4fj1zeky9gjigyrid4urxdimego68o is a read-capability URI, both for the same dirnode. Dirnode storage format ====================== Each dirnode is stored in a single mutable file, distributed in the Tahoe-LAFS grid. The contents of this file are a serialized list of netstrings, one per child. Each child is a list of four netstrings: (name, rocap, rwcap, metadata). (Remember that the contents of the mutable file are encrypted by the read-cap, so this section describes the plaintext contents of the mutable file, *after* it has been decrypted by the read-cap.) The name is simple a UTF-8 -encoded child name. The 'rocap' is a read-only capability URI to that child, either an immutable (CHK) file, a mutable file, or a directory. It is also possible to store 'unknown' URIs that are not recognized by the current version of Tahoe-LAFS. The 'rwcap' is a read-write capability URI for that child, encrypted with the dirnode's write-cap: this enables the "transitive readonlyness" property, described further below. The 'metadata' is a JSON-encoded dictionary of type,value metadata pairs. Some metadata keys are pre-defined, the rest are left up to the application. Each rwcap is stored as IV + ciphertext + MAC. The IV is a 16-byte random value. The ciphertext is obtained by using AES in CTR mode on the rwcap URI string, using a key that is formed from a tagged hash of the IV and the dirnode's writekey. The MAC is written only for compatibility with older Tahoe-LAFS versions and is no longer verified. If Bob has read-only access to the 'bar' directory, and he adds it as a child to the 'foo' directory, then he will put the read-only cap for 'bar' in both the rwcap and rocap slots (encrypting the rwcap contents as described above). If he has full read-write access to 'bar', then he will put the read-write cap in the 'rwcap' slot, and the read-only cap in the 'rocap' slot. Since other users who have read-only access to 'foo' will be unable to decrypt its rwcap slot, this limits those users to read-only access to 'bar' as well, thus providing the transitive readonlyness that we desire. Dirnode sizes, mutable-file initial read sizes ============================================== How big are dirnodes? When reading dirnode data out of mutable files, how large should our initial read be? If we guess exactly, we can read a dirnode in a single round-trip, and update one in two RTT. If we guess too high, we'll waste some amount of bandwidth. If we guess low, we need to make a second pass to get the data (or the encrypted privkey, for writes), which will cost us at least another RTT. Assuming child names are between 10 and 99 characters long, how long are the various pieces of a dirnode? :: netstring(name) ~= 4+len(name) chk-cap = 97 (for 4-char filesizes) dir-rw-cap = 88 dir-ro-cap = 91 netstring(cap) = 4+len(cap) encrypted(cap) = 16+cap+32 JSON({}) = 2 JSON({ctime=float,mtime=float,'tahoe':{linkcrtime=float,linkmotime=float}}): 137 netstring(metadata) = 4+137 = 141 so a CHK entry is:: 5+ 4+len(name) + 4+97 + 5+16+97+32 + 4+137 And a 15-byte filename gives a 416-byte entry. When the entry points at a subdirectory instead of a file, the entry is a little bit smaller. So an empty directory uses 0 bytes, a directory with one child uses about 416 bytes, a directory with two children uses about 832, etc. When the dirnode data is encoding using our default 3-of-10, that means we get 139ish bytes of data in each share per child. The pubkey, signature, and hashes form the first 935ish bytes of the container, then comes our data, then about 1216 bytes of encprivkey. So if we read the first:: 1kB: we get 65bytes of dirnode data : only empty directories 2kB: 1065bytes: about 8 3kB: 2065bytes: about 15 entries, or 6 entries plus the encprivkey 4kB: 3065bytes: about 22 entries, or about 13 plus the encprivkey So we've written the code to do an initial read of 4kB from each share when we read the mutable file, which should give good performance (one RTT) for small directories. Design Goals, redux =================== How well does this design meet the goals? 1. functional: YES: the code works and has extensive unit tests 2. documentable: YES: this document is the existence proof 3. confidential: YES: see below 4. integrity: MOSTLY: a coalition of storage servers can rollback individual mutable files, but not a single one. No server can substitute fake data as genuine. 5. availability: YES: as long as 'k' storage servers are present and have the same version of the mutable file, the dirnode will be available. 6. efficient: MOSTLY: network: single dirnode lookup is very efficient, since clients can fetch specific keys rather than being required to get or set the entire dirnode each time. Traversing many directories takes a lot of roundtrips, and these can't be collapsed with promise-pipelining because the intermediate values must only be visible to the client. Modifying many dirnodes at once (e.g. importing a large pre-existing directory tree) is pretty slow, since each graph edge must be created independently. storage: each child has a separate IV, which makes them larger than if all children were aggregated into a single encrypted string 7. delegation: VERY: each dirnode is a completely independent object, to which clients can be granted separate read-write or read-only access 8. updateness: VERY: with only a single point of access, and no caching, each client operation starts by fetching the current value, so there are no opportunities for staleness 9. monotonicity: VERY: the single point of access also protects against retrograde motion Confidentiality leaks in the storage servers -------------------------------------------- Dirnode (and the mutable files upon which they are based) are very private against other clients: traffic between the client and the storage servers is protected by the Foolscap SSL connection, so they can observe very little. Storage index values are hashes of secrets and thus unguessable, and they are not made public, so other clients cannot snoop through encrypted dirnodes that they have not been told about. Storage servers can observe access patterns and see ciphertext, but they cannot see the plaintext (of child names, metadata, or URIs). If an attacker operates a significant number of storage servers, they can infer the shape of the directory structure by assuming that directories are usually accessed from root to leaf in rapid succession. Since filenames are usually much shorter than read-caps and write-caps, the attacker can use the length of the ciphertext to guess the number of children of each node, and might be able to guess the length of the child names (or at least their sum). From this, the attacker may be able to build up a graph with the same shape as the plaintext file store, but with unlabeled edges and unknown file contents. Integrity failures in the storage servers ----------------------------------------- The mutable file's integrity mechanism (RSA signature on the hash of the file contents) prevents the storage server from modifying the dirnode's contents without detection. Therefore the storage servers can make the dirnode unavailable, but not corrupt it. A sufficient number of colluding storage servers can perform a rollback attack: replace all shares of the whole mutable file with an earlier version. To prevent this, when retrieving the contents of a mutable file, the client queries more servers than necessary and uses the highest available version number. This insures that one or two misbehaving storage servers cannot cause this rollback on their own. Improving the efficiency of dirnodes ------------------------------------ The current mutable-file -based dirnode scheme suffers from certain inefficiencies. A very large directory (with thousands or millions of children) will take a significant time to extract any single entry, because the whole file must be downloaded first, then parsed and searched to find the desired child entry. Likewise, modifying a single child will require the whole file to be re-uploaded. The current design assumes (and in some cases, requires) that dirnodes remain small. The mutable files on which dirnodes are based are currently using "SDMF" ("Small Distributed Mutable File") design rules, which state that the size of the data shall remain below one megabyte. More advanced forms of mutable files (MDMF and LDMF) are in the design phase to allow efficient manipulation of larger mutable files. This would reduce the work needed to modify a single entry in a large directory. Judicious caching may help improve the reading-large-directory case. Some form of mutable index at the beginning of the dirnode might help as well. The MDMF design rules allow for efficient random-access reads from the middle of the file, which would give the index something useful to point at. The current SDMF design generates a new RSA public/private keypair for each directory. This takes some time and CPU effort (around 100 milliseconds on a relatively high-end 2021 laptop) per directory. We have designed (but not yet built) a DSA-based mutable file scheme which will use shared parameters to reduce the directory-creation effort to a bare minimum (picking a random number instead of generating two random primes). When a backup program is run for the first time, it needs to copy a large amount of data from a pre-existing local filesystem into reliable storage. This means that a large and complex directory structure needs to be duplicated in the dirnode layer. With the one-object-per-dirnode approach described here, this requires as many operations as there are edges in the imported filesystem graph. Another approach would be to aggregate multiple directories into a single storage object. This object would contain a serialized graph rather than a single name-to-child dictionary. Most directory operations would fetch the whole block of data (and presumeably cache it for a while to avoid lots of re-fetches), and modification operations would need to replace the whole thing at once. This "realm" approach would have the added benefit of combining more data into a single encrypted bundle (perhaps hiding the shape of the graph from a determined attacker), and would reduce round-trips when performing deep directory traversals (assuming the realm was already cached). It would also prevent fine-grained rollback attacks from working: a coalition of storage servers could change the entire realm to look like an earlier state, but it could not independently roll back individual directories. The drawbacks of this aggregation would be that small accesses (adding a single child, looking up a single child) would require pulling or pushing a lot of unrelated data, increasing network overhead (and necessitating test-and-set semantics for the modification side, which increases the chances that a user operation will fail, making it more challenging to provide promises of atomicity to the user). It would also make it much more difficult to enable the delegation ("sharing") of specific directories. Since each aggregate "realm" provides all-or-nothing access control, the act of delegating any directory from the middle of the realm would require the realm first be split into the upper piece that isn't being shared and the lower piece that is. This splitting would have to be done in response to what is essentially a read operation, which is not traditionally supposed to be a high-effort action. On the other hand, it may be possible to aggregate the ciphertext, but use distinct encryption keys for each component directory, to get the benefits of both schemes at once. Dirnode expiration and leases ----------------------------- Dirnodes are created any time a client wishes to add a new directory. How long do they live? What's to keep them from sticking around forever, taking up space that nobody can reach any longer? Mutable files are created with limited-time "leases", which keep the shares alive until the last lease has expired or been cancelled. Clients which know and care about specific dirnodes can ask to keep them alive for a while, by renewing a lease on them (with a typical period of one month). Clients are expected to assist in the deletion of dirnodes by canceling their leases as soon as they are done with them. This means that when a client unlinks a directory, it should also cancel its lease on that directory. When the lease count on a given share goes to zero, the storage server can delete the related storage. Multiple clients may all have leases on the same dirnode: the server may delete the shares only after all of the leases have gone away. We expect that clients will periodically create a "manifest": a list of so-called "refresh capabilities" for all of the dirnodes and files that they can reach. They will give this manifest to the "repairer", which is a service that keeps files (and dirnodes) alive on behalf of clients who cannot take on this responsibility for themselves. These refresh capabilities include the storage index, but do *not* include the readkeys or writekeys, so the repairer does not get to read the files or directories that it is helping to keep alive. After each change to the user's file store, the client creates a manifest and looks for differences from their previous version. Anything which was removed prompts the client to send out lease-cancellation messages, allowing the data to be deleted. Starting Points: root dirnodes ============================== Any client can record the URI of a directory node in some external form (say, in a local file) and use it as the starting point of later traversal. Each Tahoe-LAFS user is expected to create a new (unattached) dirnode when they first start using the grid, and record its URI for later use. Mounting and Sharing Directories ================================ The biggest benefit of this dirnode approach is that sharing individual directories is almost trivial. Alice creates a subdirectory that she wants to use to share files with Bob. This subdirectory is attached to Alice's file store at "alice:shared-with-bob". She asks her file store for the read-only directory URI for that new directory, and emails it to Bob. When Bob receives the URI, he attaches the given URI into one of his own directories, perhaps at a place named "bob:shared-with-alice". Every time Alice writes a file into this directory, Bob will be able to read it. (It is also possible to share read-write URIs between users, but that makes it difficult to follow the `Prime Coordination Directive`_ .) Neither Alice nor Bob will get access to any files above the mounted directory: there are no 'parent directory' pointers. If Alice creates a nested set of directories, "alice:shared-with-bob/subdir2", and gives a read-only URI to shared-with-bob to Bob, then Bob will be unable to write to either shared-with-bob/ or subdir2/. .. _`Prime Coordination Directive`: ../write_coordination.rst A suitable UI needs to be created to allow users to easily perform this sharing action: dragging a folder from their file store to an IM or email user icon, for example. The UI will need to give the sending user an opportunity to indicate whether they want to grant read-write or read-only access to the recipient. The recipient then needs an interface to drag the new folder into their file store and give it a home. Revocation ========== When Alice decides that she no longer wants Bob to be able to access the shared directory, what should she do? Suppose she's shared this folder with both Bob and Carol, and now she wants Carol to retain access to it but Bob to be shut out. Ideally Carol should not have to do anything: her access should continue unabated. The current plan is to have her client create a deep copy of the folder in question, delegate access to the new folder to the remaining members of the group (Carol), asking the lucky survivors to replace their old reference with the new one. Bob may still have access to the old folder, but he is now the only one who cares: everyone else has moved on, and he will no longer be able to see their new changes. In a strict sense, this is the strongest form of revocation that can be accomplished: there is no point trying to force Bob to forget about the files that he read a moment before being kicked out. In addition it must be noted that anyone who can access the directory can proxy for Bob, reading files to him and accepting changes whenever he wants. Preventing delegation between communication parties is just as pointless as asking Bob to forget previously accessed files. However, there may be value to configuring the UI to ask Carol to not share files with Bob, or to removing all files from Bob's view at the same time his access is revoked. tahoe_lafs-1.20.0/docs/specifications/file-encoding.rst0000644000000000000000000001751313615410400020016 0ustar00.. -*- coding: utf-8-with-signature -*- ============= File Encoding ============= When the client wishes to upload an immutable file, the first step is to decide upon an encryption key. There are two methods: convergent or random. The goal of the convergent-key method is to make sure that multiple uploads of the same file will result in only one copy on the grid, whereas the random-key method does not provide this "convergence" feature. The convergent-key method computes the SHA-256d hash of a single-purpose tag, the encoding parameters, a "convergence secret", and the contents of the file. It uses a portion of the resulting hash as the AES encryption key. There are security concerns with using convergence this approach (the "partial-information guessing attack", please see ticket #365 for some references), so Tahoe uses a separate (randomly-generated) "convergence secret" for each node, stored in NODEDIR/private/convergence . The encoding parameters (k, N, and the segment size) are included in the hash to make sure that two different encodings of the same file will get different keys. This method requires an extra IO pass over the file, to compute this key, and encryption cannot be started until the pass is complete. This means that the convergent-key method will require at least two total passes over the file. The random-key method simply chooses a random encryption key. Convergence is disabled, however this method does not require a separate IO pass, so upload can be done with a single pass. This mode makes it easier to perform streaming upload. Regardless of which method is used to generate the key, the plaintext file is encrypted (using AES in CTR mode) to produce a ciphertext. This ciphertext is then erasure-coded and uploaded to the servers. Two hashes of the ciphertext are generated as the encryption proceeds: a flat hash of the whole ciphertext, and a Merkle tree. These are used to verify the correctness of the erasure decoding step, and can be used by a "verifier" process to make sure the file is intact without requiring the decryption key. The encryption key is hashed (with SHA-256d and a single-purpose tag) to produce the "Storage Index". This Storage Index (or SI) is used to identify the shares produced by the method described below. The grid can be thought of as a large table that maps Storage Index to a ciphertext. Since the ciphertext is stored as erasure-coded shares, it can also be thought of as a table that maps SI to shares. Anybody who knows a Storage Index can retrieve the associated ciphertext: ciphertexts are not secret. .. image:: file-encoding1.svg The ciphertext file is then broken up into segments. The last segment is likely to be shorter than the rest. Each segment is erasure-coded into a number of "blocks". This takes place one segment at a time. (In fact, encryption and erasure-coding take place at the same time, once per plaintext segment). Larger segment sizes result in less overhead overall, but increase both the memory footprint and the "alacrity" (the number of bytes we have to receive before we can deliver validated plaintext to the user). The current default segment size is 128KiB. One block from each segment is sent to each shareholder (aka leaseholder, aka landlord, aka storage node, aka peer). The "share" held by each remote shareholder is nominally just a collection of these blocks. The file will be recoverable when a certain number of shares have been retrieved. .. image:: file-encoding2.svg The blocks are hashed as they are generated and transmitted. These block hashes are put into a Merkle hash tree. When the last share has been created, the merkle tree is completed and delivered to the peer. Later, when we retrieve these blocks, the peer will send many of the merkle hash tree nodes ahead of time, so we can validate each block independently. The root of this block hash tree is called the "block root hash" and used in the next step. .. image:: file-encoding3.svg There is a higher-level Merkle tree called the "share hash tree". Its leaves are the block root hashes from each share. The root of this tree is called the "share root hash" and is included in the "URI Extension Block", aka UEB. The ciphertext hash and Merkle tree are also put here, along with the original file size, and the encoding parameters. The UEB contains all the non-secret values that could be put in the URI, but would have made the URI too big. So instead, the UEB is stored with the share, and the hash of the UEB is put in the URI. The URI then contains the secret encryption key and the UEB hash. It also contains the basic encoding parameters (k and N) and the file size, to make download more efficient (by knowing the number of required shares ahead of time, sufficient download queries can be generated in parallel). The URI (also known as the immutable-file read-cap, since possessing it grants the holder the capability to read the file's plaintext) is then represented as a (relatively) short printable string like so:: URI:CHK:auxet66ynq55naiy2ay7cgrshm:6rudoctmbxsmbg7gwtjlimd6umtwrrsxkjzthuldsmo4nnfoc6fa:3:10:1000000 .. image:: file-encoding4.svg During download, when a peer begins to transmit a share, it first transmits all of the parts of the share hash tree that are necessary to validate its block root hash. Then it transmits the portions of the block hash tree that are necessary to validate the first block. Then it transmits the first block. It then continues this loop: transmitting any portions of the block hash tree to validate block#N, then sending block#N. .. image:: file-encoding5.svg So the "share" that is sent to the remote peer actually consists of three pieces, sent in a specific order as they become available, and retrieved during download in a different order according to when they are needed. The first piece is the blocks themselves, one per segment. The last block will likely be shorter than the rest, because the last segment is probably shorter than the rest. The second piece is the block hash tree, consisting of a total of two SHA-1 hashes per block. The third piece is a hash chain from the share hash tree, consisting of log2(numshares) hashes. During upload, all blocks are sent first, followed by the block hash tree, followed by the share hash chain. During download, the share hash chain is delivered first, followed by the block root hash. The client then uses the hash chain to validate the block root hash. Then the peer delivers enough of the block hash tree to validate the first block, followed by the first block itself. The block hash chain is used to validate the block, then it is passed (along with the first block from several other peers) into decoding, to produce the first segment of crypttext, which is then decrypted to produce the first segment of plaintext, which is finally delivered to the user. .. image:: file-encoding6.svg Hashes ====== All hashes use SHA-256d, as defined in Practical Cryptography (by Ferguson and Schneier). All hashes use a single-purpose tag, e.g. the hash that converts an encryption key into a storage index is defined as follows:: SI = SHA256d(netstring("allmydata_immutable_key_to_storage_index_v1") + key) When two separate values need to be combined together in a hash, we wrap each in a netstring. Using SHA-256d (instead of plain SHA-256) guards against length-extension attacks. Using the tag protects our Merkle trees against attacks in which the hash of a leaf is confused with a hash of two children (allowing an attacker to generate corrupted data that nevertheless appears to be valid), and is simply good "cryptograhic hygiene". The `“Chosen Protocol Attack” by Kelsey, Schneier, and Wagner`_ is relevant. Putting the tag in a netstring guards against attacks that seek to confuse the end of the tag with the beginning of the subsequent value. .. _“Chosen Protocol Attack” by Kelsey, Schneier, and Wagner: http://www.schneier.com/paper-chosen-protocol.html tahoe_lafs-1.20.0/docs/specifications/file-encoding1.svg0000644000000000000000000005133013615410400020061 0ustar00 image/svg+xml FILE (plaintext) convergentencryptionkey AES-CTR FILE (crypttext) tag storageindex SHA-256 SHA-256 tag encoding parameters randomencryptionkey or tahoe_lafs-1.20.0/docs/specifications/file-encoding2.svg0000644000000000000000000012415013615410400020063 0ustar00 image/svg+xml FILE (crypttext) segA segB segC segD FEC block A1 block A2 block A3 block A4 FEC block B1 block B2 block B3 block B4 FEC block C1 block C2 block C3 block C4 FEC block D1 block D2 block D3 block D4 share4 server 4 tahoe_lafs-1.20.0/docs/specifications/file-encoding3.svg0000644000000000000000000005543713615410400020077 0ustar00 image/svg+xml SHA SHA SHA SHA SHA SHA SHA block A4 block B4 block C4 block D4 share4 server 4 Merkle Tree block hash tree "block root hash" tahoe_lafs-1.20.0/docs/specifications/file-encoding4.svg0000644000000000000000000007453413615410400020077 0ustar00 image/svg+xml blockroot hashes SHA s1 s2 s3 s4 SHA SHA shares share1 share2 share3 share4 Merkle Tree share hash tree "share root hash" URI Extension Block file size encoding parameters share root hash URI / "file read-cap" UEB hash encryption key SHA other hashes tahoe_lafs-1.20.0/docs/specifications/file-encoding5.svg0000644000000000000000000006546613615410400020104 0ustar00 image/svg+xml blockroot hashes SHA s1 s2 s3 s4 SHA SHA share hash tree SHA s5 s6 s7 s8 SHA SHA Merkle Tree "share root hash" SHA merkle hash chainto validate s1 tahoe_lafs-1.20.0/docs/specifications/file-encoding6.svg0000644000000000000000000011116113615410400020065 0ustar00 image/svg+xml SHA SHA SHA SHA SHA SHA block A4 block B4 block C4 block D4 share4 server 4 Merkle Tree block hash tree "block root hash" blockroot hashes SHA s1 s2 s3 s4 SHA SHA Merkle Tree share hash tree "share root hash" merkle hash chainto validate s4 s4 tahoe_lafs-1.20.0/docs/specifications/http-storage-node-protocol.rst0000644000000000000000000014154313615410400022517 0ustar00.. -*- coding: utf-8 -*- Storage Node Protocol ("Great Black Swamp", "GBS") ================================================== The target audience for this document is developers working on Tahoe-LAFS or on an alternate implementation intended to be interoperable. After reading this document, one should expect to understand how Tahoe-LAFS clients interact over the network with Tahoe-LAFS storage nodes. The primary goal of the introduction of this protocol is to simplify the task of implementing a Tahoe-LAFS storage server. Specifically, it should be possible to implement a Tahoe-LAFS storage server without a Foolscap implementation (substituting a simpler GBS server implementation). The Tahoe-LAFS client will also need to change but it is not expected that it will be noticably simplified by this change (though this may be the first step towards simplifying it). Glossary -------- `Foolscap `_ an RPC/RMI (Remote Procedure Call / Remote Method Invocation) protocol for use with Twisted storage server a Tahoe-LAFS process configured to offer storage and reachable over the network for store and retrieve operations storage service a Python object held in memory in the storage server which provides the implementation of the storage protocol introducer a Tahoe-LAFS process at a known location configured to re-publish announcements about the location of storage servers :ref:`fURLs ` a self-authenticating URL-like string which can be used to locate a remote object using the Foolscap protocol (the storage service is an example of such an object) :ref:`NURLs ` a self-authenticating URL-like string almost exactly like a fURL but without being tied to Foolscap swissnum a short random string which is part of a fURL/NURL and which acts as a shared secret to authorize clients to use a storage service lease state associated with a share informing a storage server of the duration of storage desired by a client share a single unit of client-provided arbitrary data to be stored by a storage server (in practice, one of the outputs of applying ZFEC encoding to some ciphertext with some additional metadata attached) bucket a group of one or more immutable shares held by a storage server and having a common storage index slot a group of one or more mutable shares held by a storage server and having a common storage index (sometimes "slot" is considered a synonym for "storage index of a slot") storage index a 16 byte string which can address a slot or a bucket (in practice, derived by hashing the encryption key associated with contents of that slot or bucket) write enabler a short secret string which storage servers require to be presented before allowing mutation of any mutable share lease renew secret a short secret string which storage servers required to be presented before allowing a particular lease to be renewed Additional terms related to the Tahoe-LAFS project in general are defined in the :doc:`../glossary` The key words "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL NOT", "SHOULD", "SHOULD NOT", "RECOMMENDED", "MAY", and "OPTIONAL" in this document are to be interpreted as described in RFC 2119. Motivation ---------- Foolscap ~~~~~~~~ Foolscap is a remote method invocation protocol with several distinctive features. At its core it allows separate processes to refer each other's objects and methods using a capability-based model. This allows for extremely fine-grained access control in a system that remains highly securable without becoming overwhelmingly complicated. Supporting this is a flexible and extensible serialization system which allows data to be exchanged between processes in carefully controlled ways. Tahoe-LAFS avails itself of only a small portion of these features. A Tahoe-LAFS storage server typically only exposes one object with a fixed set of methods to clients. A Tahoe-LAFS introducer node does roughly the same. Tahoe-LAFS exchanges simple data structures that have many common, standard serialized representations. In exchange for this slight use of Foolscap's sophisticated mechanisms, Tahoe-LAFS pays a substantial price: * Foolscap is implemented only for Python. Tahoe-LAFS is thus limited to being implemented only in Python. * There is only one Python implementation of Foolscap. The implementation is therefore the de facto standard and understanding of the protocol often relies on understanding that implementation. * The Foolscap developer community is very small. The implementation therefore advances very little and some non-trivial part of the maintenance cost falls on the Tahoe-LAFS project. * The extensible serialization system imposes substantial complexity compared to the simple data structures Tahoe-LAFS actually exchanges. HTTP ~~~~ HTTP is a request/response protocol that has become the lingua franca of the internet. Combined with the principles of Representational State Transfer (REST) it is widely employed to create, update, and delete data in collections on the internet. HTTP itself provides only modest functionality in comparison to Foolscap. However its simplicity and widespread use have led to a diverse and almost overwhelming ecosystem of libraries, frameworks, toolkits, and so on. By adopting HTTP in place of Foolscap Tahoe-LAFS can realize the following concrete benefits: * Practically every language or runtime has an HTTP protocol implementation (or a dozen of them) available. This change paves the way for new Tahoe-LAFS implementations using tools better suited for certain situations (mobile client implementations, high-performance server implementations, easily distributed desktop clients, etc). * The simplicity of and vast quantity of resources about HTTP make it a very easy protocol to learn and use. This change reduces the barrier to entry for developers to contribute improvements to Tahoe-LAFS's network interactions. * For any given language there is very likely an HTTP implementation with a large and active developer community. Tahoe-LAFS can therefore benefit from the large effort being put into making better libraries for using HTTP. * One of the core features of HTTP is the mundane transfer of bulk data and implementions are often capable of doing this with extreme efficiency. The alignment of this core feature with a core activity of Tahoe-LAFS of transferring bulk data means that a substantial barrier to improved Tahoe-LAFS runtime performance will be eliminated. TLS ~~~ The Foolscap-based protocol provides *some* of Tahoe-LAFS's confidentiality, integrity, and authentication properties by leveraging TLS. An HTTP-based protocol can make use of TLS in largely the same way to provide the same properties. Provision of these properties *is* dependant on implementers following Great Black Swamp's rules for x509 certificate validation (rather than the standard "web" rules for validation). Design Requirements ------------------- Security ~~~~~~~~ Summary !!!!!!! The storage node protocol should offer at minimum the security properties offered by the Foolscap-based protocol. The Foolscap-based protocol offers: * **Peer authentication** by way of checked x509 certificates * **Message authentication** by way of TLS * **Message confidentiality** by way of TLS * A careful configuration of the TLS connection parameters *may* also offer **forward secrecy**. However, Tahoe-LAFS' use of Foolscap takes no steps to ensure this is the case. * **Storage authorization** by way of a capability contained in the fURL addressing a storage service. Discussion !!!!!!!!!! A client node relies on a storage node to persist certain data until a future retrieval request is made. In this way, the client node is vulnerable to attacks which cause the data not to be persisted. Though this vulnerability can be (and typically is) mitigated by including redundancy in the share encoding parameters for stored data, it is still sensible to attempt to minimize unnecessary vulnerability to this attack. One way to do this is for the client to be confident the storage node with which it is communicating is really the expected node. That is, for the client to perform **peer authentication** of the storage node it connects to. This allows it to develop a notion of that node's reputation over time. The more retrieval requests the node satisfies correctly the more it probably will satisfy correctly. Therefore, the protocol must include some means for verifying the identify of the storage node. The initialization of the client with the correct identity information is out of scope for this protocol (the system may be trust-on-first-use, there may be a third-party identity broker, etc). With confidence that communication is proceeding with the intended storage node, it must also be possible to trust that data is exchanged without modification. That is, the protocol must include some means to perform **message authentication**. This is most likely done using cryptographic MACs (such as those used in TLS). The messages which enable the mutable shares feature include secrets related to those shares. For example, the write enabler secret is used to restrict the parties with write access to mutable shares. It is exchanged over the network as part of a write operation. An attacker learning this secret can overwrite share data with garbage (lacking a separate encryption key, there is no way to write data which appears legitimate to a legitimate client). Therefore, **message confidentiality** is necessary when exchanging these secrets. **Forward secrecy** is preferred so that an attacker recording an exchange today cannot launch this attack at some future point after compromising the necessary keys. A storage service offers service only to some clients. A client proves their authorization to use the storage service by presenting a shared secret taken from the fURL. In this way **storage authorization** is performed to prevent disallowed parties from consuming any storage resources. Functionality ------------- Tahoe-LAFS application-level information must be transferred using this protocol. This information is exchanged with a dozen or so request/response-oriented messages. Some of these messages carry large binary payloads. Others are small structured-data messages. Some facility for expansion to support new information exchanges should also be present. Solutions --------- An HTTP-based protocol, dubbed "Great Black Swamp" (or "GBS"), is described below. This protocol aims to satisfy the above requirements at a lower level of complexity than the current Foolscap-based protocol. Summary (Non-normative) ~~~~~~~~~~~~~~~~~~~~~~~ Communication with the storage node will take place using TLS. The TLS version and configuration will be dictated by an ongoing understanding of best practices. The storage node will present an x509 certificate during the TLS handshake. Storage clients will require that the certificate have a valid signature. The Subject Public Key Information (SPKI) hash of the certificate will constitute the storage node's identity. The **tub id** portion of the storage node fURL will be replaced with the SPKI hash. When connecting to a storage node, the client will take the following steps to gain confidence it has reached the intended peer: * It will perform the usual cryptographic verification of the certificate presented by the storage server. That is, it will check that the certificate itself is well-formed, that it is currently valid [#]_, and that the signature it carries is valid. * It will compare the SPKI hash of the certificate to the expected value. The specifics of the comparison are the same as for the comparison specified by `RFC 7469`_ with "sha256" [#]_. To further clarify, consider this example. Alice operates a storage node. Alice generates a key pair and secures it properly. Alice generates a self-signed storage node certificate with the key pair. Alice's storage node announces (to an introducer) a NURL containing (among other information) the SPKI hash. Imagine the SPKI hash is ``i5xb...``. This results in a NURL of ``pb://i5xb...@example.com:443/g3m5...#v=1``. Bob creates a client node pointed at the same introducer. Bob's client node receives the announcement from Alice's storage node (indirected through the introducer). Bob's client node recognizes the NURL as referring to an HTTP-dialect server due to the ``v=1`` fragment. Bob's client node can now perform a TLS handshake with a server at the address in the NURL location hints (``example.com:443`` in this example). Following the above described validation procedures, Bob's client node can determine whether it has reached Alice's storage node or not. If and only if the validation procedure is successful does Bob's client node conclude it has reached Alice's storage node. **Peer authentication** has been achieved. Additionally, by continuing to interact using TLS, Bob's client and Alice's storage node are assured of both **message authentication** and **message confidentiality**. Bob's client further inspects the NURL for the *swissnum*. When Bob's client issues HTTP requests to Alice's storage node it includes the *swissnum* in its requests. **Storage authorization** has been achieved. .. note:: Foolscap TubIDs are 20 bytes (SHA1 digest of the certificate). They are encoded with `Base32`_ for a length of 32 bytes. SPKI information discussed here is 32 bytes (SHA256 digest). They would be encoded in `Base32`_ for a length of 52 bytes. `unpadded base64url`_ provides a more compact encoding of the information while remaining URL-compatible. This would encode the SPKI information for a length of merely 43 bytes. SHA1, the current Foolscap hash function, is not a practical choice at this time due to advances made in `attacking SHA1`_. The selection of a safe hash function with output smaller than SHA256 could be the subject of future improvements. A 224 bit hash function (SHA3-224, for example) might be suitable - improving the encoded length to 38 bytes. Transition ~~~~~~~~~~ To provide a seamless user experience during this protocol transition, there should be a period during which both protocols are supported by storage nodes. The GBS announcement will be introduced in a way that *updated client* software can recognize. Its introduction will also be made in such a way that *non-updated client* software disregards the new information (of which it cannot make any use). Storage nodes will begin to operate a new GBS server. They may re-use their existing x509 certificate or generate a new one. Generation of a new certificate allows for certain non-optimal conditions to be addressed: * The ``commonName`` of ``newpb_thingy`` may be changed to a more descriptive value. * A ``notValidAfter`` field with a timestamp in the past may be updated. Storage nodes will announce a new NURL for this new HTTP-based server. This NURL will be announced alongside their existing Foolscap-based server's fURL. Such an announcement will resemble this:: { "anonymous-storage-FURL": "pb://...", # The old entry "anonymous-storage-NURLs": ["pb://...#v=1"] # The new, additional entry } The transition process will proceed in three stages: 1. The first stage represents the starting conditions in which clients and servers can speak only Foolscap. #. The intermediate stage represents a condition in which some clients and servers can both speak Foolscap and GBS. #. The final stage represents the desired condition in which all clients and servers speak only GBS. During the first stage only one client/server interaction is possible: the storage server announces only Foolscap and speaks only Foolscap. During the final stage there is only one supported interaction: the client and server are both updated and speak GBS to each other. During the intermediate stage there are four supported interactions: 1. Both the client and server are non-updated. The interaction is just as it would be during the first stage. #. The client is updated and the server is non-updated. The client will see the Foolscap announcement and the lack of a GBS announcement. It will speak to the server using Foolscap. #. The client is non-updated and the server is updated. The client will see the Foolscap announcement. It will speak Foolscap to the storage server. #. Both the client and server are updated. The client will see the GBS announcement and disregard the Foolscap announcement. It will speak GBS to the server. There is one further complication: the client maintains a cache of storage server information (to avoid continuing to rely on the introducer after it has been introduced). The follow sequence of events is likely: 1. The client connects to an introducer. #. It receives an announcement for a non-updated storage server (Foolscap only). #. It caches this announcement. #. At some point, the storage server is updated. #. The client uses the information in its cache to open a Foolscap connection to the storage server. Ideally, the client would not rely on an update from the introducer to give it the GBS NURL for the updated storage server. In practice, we have decided not to implement this functionality. Server Details -------------- The protocol primarily enables interaction with "resources" of two types: storage indexes and shares. A particular resource is addressed by the HTTP request path. Details about the interface are encoded in the HTTP message body. String Encoding ~~~~~~~~~~~~~~~ .. _Base32: Base32 !!!!!! Where the specification refers to Base32 the meaning is *unpadded* Base32 encoding as specified by `RFC 4648`_ using a *lowercase variation* of the alphabet from Section 6. That is, the alphabet is: .. list-table:: Base32 Alphabet :header-rows: 1 * - Value - Encoding - Value - Encoding - Value - Encoding - Value - Encoding * - 0 - a - 9 - j - 18 - s - 27 - 3 * - 1 - b - 10 - k - 19 - t - 28 - 4 * - 2 - c - 11 - l - 20 - u - 29 - 5 * - 3 - d - 12 - m - 21 - v - 30 - 6 * - 4 - e - 13 - n - 22 - w - 31 - 7 * - 5 - f - 14 - o - 23 - x - - * - 6 - g - 15 - p - 24 - y - - * - 7 - h - 16 - q - 25 - z - - * - 8 - i - 17 - r - 26 - 2 - - Message Encoding ~~~~~~~~~~~~~~~~ Clients and servers MUST use the ``Content-Type`` and ``Accept`` header fields as specified in `RFC 9110`_ for message body negotiation. The encoding for HTTP message bodies SHOULD be `CBOR`_. Clients submitting requests using this encoding MUST include a ``Content-Type: application/cbor`` request header field. A request MAY be submitted using an alternate encoding by declaring this in the ``Content-Type`` header field. A request MAY indicate its preference for an alternate encoding in the response using the ``Accept`` header field. A request which includes no ``Accept`` header field MUST be interpreted in the same way as a request including a ``Accept: application/cbor`` header field. Clients and servers MAY support additional request and response message body encodings. Clients and servers SHOULD support ``application/json`` request and response message body encoding. For HTTP messages carrying binary share data, this is expected to be a particularly poor encoding. However, for HTTP messages carrying small payloads of strings, numbers, and containers it is expected that JSON will be more convenient than CBOR for ad hoc testing and manual interaction. For this same reason, JSON is used throughout for the examples presented here. Because of the simple types used throughout and the equivalence described in `RFC 7049`_ these examples should be representative regardless of which of these two encodings is chosen. There are two exceptions to this rule. 1. Sets !!!!!!! For CBOR messages, any sequence that is semantically a set (i.e. no repeated values allowed, order doesn't matter, and elements are hashable in Python) should be sent as a set. Tag 6.258 is used to indicate sets in CBOR; see `the CBOR registry `_ for more details. The JSON encoding does not support sets. Sets MUST be represented as arrays in JSON-encoded messages. 2. Bytes !!!!!!!! The CBOR encoding natively supports a bytes type while the JSON encoding does not. Bytes MUST be represented as strings giving the `Base64`_ representation of the original bytes value. HTTP Design ~~~~~~~~~~~ The HTTP interface described here is informed by the ideas of REST (Representational State Transfer). For ``GET`` requests query parameters are preferred over values encoded in the request body. For other requests query parameters are encoded into the message body. Many branches of the resource tree are conceived as homogenous containers: one branch contains all of the share data; another branch contains all of the lease data; etc. Clients and servers MUST use the ``Authorization`` header field, as specified in `RFC 9110`_, for authorization of all requests to all endpoints specified here. The authentication *type* MUST be ``Tahoe-LAFS``. Clients MUST present the `Base64`_-encoded representation of the swissnum from the NURL used to locate the storage service as the *credentials*. If credentials are not presented or the swissnum is not associated with a storage service then the server MUST issue a ``401 UNAUTHORIZED`` response and perform no other processing of the message. Requests to certain endpoints MUST include additional secrets in the ``X-Tahoe-Authorization`` headers field. The endpoints which require these secrets are: * ``PUT /storage/v1/lease/:storage_index``: The secrets included MUST be ``lease-renew-secret`` and ``lease-cancel-secret``. * ``POST /storage/v1/immutable/:storage_index``: The secrets included MUST be ``lease-renew-secret``, ``lease-cancel-secret``, and ``upload-secret``. * ``PATCH /storage/v1/immutable/:storage_index/:share_number``: The secrets included MUST be ``upload-secret``. * ``PUT /storage/v1/immutable/:storage_index/:share_number/abort``: The secrets included MUST be ``upload-secret``. * ``POST /storage/v1/mutable/:storage_index/read-test-write``: The secrets included MUST be ``lease-renew-secret``, ``lease-cancel-secret``, and ``write-enabler``. If these secrets are: 1. Missing. 2. The wrong length. 3. Not the expected kind of secret. 4. They are otherwise unparseable before they are actually semantically used. the server MUST respond with ``400 BAD REQUEST`` and perform no other processing of the message. 401 is not used because this isn't an authorization problem, this is a "you sent garbage and should know better" bug. If authorization using the secret fails, then the server MUST send a ``401 UNAUTHORIZED`` response and perform no other processing of the message. Encoding ~~~~~~~~ * ``storage_index`` MUST be `Base32`_ encoded in URLs. * ``share_number`` MUST be a decimal representation General ~~~~~~~ ``GET /storage/v1/version`` !!!!!!!!!!!!!!!!!!!!!!!!!!! This endpoint allows clients to retrieve some basic metadata about a storage server from the storage service. The response MUST validate against this CDDL schema:: {'http://allmydata.org/tahoe/protocols/storage/v1' => { 'maximum-immutable-share-size' => uint 'maximum-mutable-share-size' => uint 'available-space' => uint } 'application-version' => bstr } The server SHOULD populate as many fields as possible with accurate information about its behavior. For fields which relate to a specific API the semantics are documented below in the section for that API. For fields that are more general than a single API the semantics are as follows: * available-space: The server SHOULD use this field to advertise the amount of space that it currently considers unused and is willing to allocate for client requests. The value is a number of bytes. ``PUT /storage/v1/lease/:storage_index`` !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! Either renew or create a new lease on the bucket addressed by ``storage_index``. The renew secret and cancellation secret should be included as ``X-Tahoe-Authorization`` headers. For example:: X-Tahoe-Authorization: lease-renew-secret X-Tahoe-Authorization: lease-cancel-secret If the ``lease-renew-secret`` value matches an existing lease then the expiration time of that lease will be changed to 31 days after the time of this operation. If it does not match an existing lease then a new lease will be created with this ``lease-renew-secret`` which expires 31 days after the time of this operation. ``lease-renew-secret`` and ``lease-cancel-secret`` values must be 32 bytes long. The server treats them as opaque values. :ref:`Share Leases` gives details about how the Tahoe-LAFS storage client constructs these values. In these cases the response is ``NO CONTENT`` with an empty body. It is possible that the storage server will have no shares for the given ``storage_index`` because: * no such shares have ever been uploaded. * a previous lease expired and the storage server reclaimed the storage by deleting the shares. In these cases the server takes no action and returns ``NOT FOUND``. Discussion `````````` We considered an alternative where ``lease-renew-secret`` and ``lease-cancel-secret`` are placed in query arguments on the request path. This increases chances of leaking secrets in logs. Putting the secrets in the body reduces the chances of leaking secrets, but eventually we chose headers as the least likely information to be logged. Several behaviors here are blindly copied from the Foolscap-based storage server protocol. * There is a cancel secret but there is no API to use it to cancel a lease (see ticket:3768). * The lease period is hard-coded at 31 days. These are not necessarily ideal behaviors but they are adopted to avoid any *semantic* changes between the Foolscap- and HTTP-based protocols. It is expected that some or all of these behaviors may change in a future revision of the HTTP-based protocol. Immutable --------- Writing ~~~~~~~ ``POST /storage/v1/immutable/:storage_index`` !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! Initialize an immutable storage index with some buckets. The server MUST allow share data to be written to the buckets at most one time. The server MAY create a lease for the buckets. Details of the buckets to create are encoded in the request body. The request body MUST validate against this CDDL schema:: { share-numbers: #6.258([0*256 uint]) allocated-size: uint } For example:: {"share-numbers": [1, 7, ...], "allocated-size": 12345} The server SHOULD accept a value for **allocated-size** that is less than or equal to the lesser of the values of the server's version message's **maximum-immutable-share-size** or **available-space** values. The request MUST include ``X-Tahoe-Authorization`` HTTP headers that set the various secrets—upload, lease renewal, lease cancellation—that will be later used to authorize various operations. For example:: X-Tahoe-Authorization: lease-renew-secret X-Tahoe-Authorization: lease-cancel-secret X-Tahoe-Authorization: upload-secret The response body MUST include encoded information about the created buckets. The response body MUST validate against this CDDL schema:: { already-have: #6.258([0*256 uint]) allocated: #6.258([0*256 uint]) } For example:: {"already-have": [1, ...], "allocated": [7, ...]} The upload secret is an opaque _byte_ string. Handling repeat calls: * If the same API call is repeated with the same upload secret, the response is the same and no change is made to server state. This is necessary to ensure retries work in the face of lost responses from the server. * If the API calls is with a different upload secret, this implies a new client, perhaps because the old client died. Or it may happen because the client wants to upload a different share number than a previous client. New shares will be created, existing shares will be unchanged, regardless of whether the upload secret matches or not. Discussion `````````` We considered making this ``POST /storage/v1/immutable`` instead. The motivation was to keep *storage index* out of the request URL. Request URLs have an elevated chance of being logged by something. We were concerned that having the *storage index* logged may increase some risks. However, we decided this does not matter because: * the *storage index* can only be used to retrieve (not decrypt) the ciphertext-bearing share. * the *storage index* is already persistently present on the storage node in the form of directory names in the storage servers ``shares`` directory. * the request is made via HTTPS and so only Tahoe-LAFS can see the contents, therefore no proxy servers can perform any extra logging. * Tahoe-LAFS itself does not currently log HTTP request URLs. The response includes ``already-have`` and ``allocated`` for two reasons: * If an upload is interrupted and the client loses its local state that lets it know it already uploaded some shares then this allows it to discover this fact (by inspecting ``already-have``) and only upload the missing shares (indicated by ``allocated``). * If an upload has completed a client may still choose to re-balance storage by moving shares between servers. This might be because a server has become unavailable and a remaining server needs to store more shares for the upload. It could also just be that the client's preferred servers have changed. Regarding upload secrets, the goal is for uploading and aborting (see next sections) to be authenticated by more than just the storage index. In the future, we may want to generate them in a way that allows resuming/canceling when the client has issues. In the short term, they can just be a random byte string. The primary security constraint is that each upload to each server has its own unique upload key, tied to uploading that particular storage index to this particular server. Rejected designs for upload secrets: * Upload secret per share number. In order to make the secret unguessable by attackers, which includes other servers, it must contain randomness. Randomness means there is no need to have a secret per share, since adding share-specific content to randomness doesn't actually make the secret any better. ``PATCH /storage/v1/immutable/:storage_index/:share_number`` !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! Write data for the indicated share. The share number MUST belong to the storage index. The request body MUST be the raw share data (i.e., ``application/octet-stream``). The request MUST include a *Content-Range* header field; for large transfers this allows partially complete uploads to be resumed. For example, a 1MiB share can be divided in to eight separate 128KiB chunks. Each chunk can be uploaded in a separate request. Each request can include a *Content-Range* value indicating its placement within the complete share. If any one of these requests fails then at most 128KiB of upload work needs to be retried. The server MUST recognize when all of the data has been received and mark the share as complete (which it can do because it was informed of the size when the storage index was initialized). The request MUST include a ``X-Tahoe-Authorization`` header that includes the upload secret:: X-Tahoe-Authorization: upload-secret Responses: * When a chunk that does not complete the share is successfully uploaded the response MUST be ``OK``. The response body MUST indicate the range of share data that has yet to be uploaded. The response body MUST validate against this CDDL schema:: { required: [0* {begin: uint, end: uint}] } For example:: { "required": [ { "begin": , "end": } , ... ] } * When the chunk that completes the share is successfully uploaded the response MUST be ``CREATED``. * If the *Content-Range* for a request covers part of the share that has already, and the data does not match already written data, the response MUST be ``CONFLICT``. In this case the client MUST abort the upload. The client MAY then restart the upload from scratch. Discussion `````````` ``PUT`` verbs are only supposed to be used to replace the whole resource, thus the use of ``PATCH``. From RFC 7231:: An origin server that allows PUT on a given target resource MUST send a 400 (Bad Request) response to a PUT request that contains a Content-Range header field (Section 4.2 of [RFC7233]), since the payload is likely to be partial content that has been mistakenly PUT as a full representation. Partial content updates are possible by targeting a separately identified resource with state that overlaps a portion of the larger resource, or by using a different method that has been specifically defined for partial updates (for example, the PATCH method defined in [RFC5789]). ``PUT /storage/v1/immutable/:storage_index/:share_number/abort`` !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! This cancels an *in-progress* upload. The request MUST include a ``X-Tahoe-Authorization`` header that includes the upload secret:: X-Tahoe-Authorization: upload-secret If there is an incomplete upload with a matching upload-secret then the server MUST consider the abort to have succeeded. In this case the response MUST be ``OK``. The server MUST respond to all future requests as if the operations related to this upload did not take place. If there is no incomplete upload with a matching upload-secret then the server MUST respond with ``Method Not Allowed`` (405). The server MUST make no client-visible changes to its state in this case. ``POST /storage/v1/immutable/:storage_index/:share_number/corrupt`` !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! Advise the server the data read from the indicated share was corrupt. The request body includes an human-meaningful text string with details about the corruption. It also includes potentially important details about the share. The request body MUST validate against this CDDL schema:: { reason: tstr .size (1..32765) } For example:: {"reason": "expected hash abcd, got hash efgh"} The report pertains to the immutable share with a **storage index** and **share number** given in the request path. If the identified **storage index** and **share number** are known to the server then the response SHOULD be accepted and made available to server administrators. In this case the response SHOULD be ``OK``. If the response is not accepted then the response SHOULD be ``Not Found`` (404). Discussion `````````` The seemingly odd length limit on ``reason`` is chosen so that the *encoded* representation of the message is limited to 32768. Reading ~~~~~~~ ``GET /storage/v1/immutable/:storage_index/shares`` !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! Retrieve a list (semantically, a set) indicating all shares available for the indicated storage index. The response body MUST validate against this CDDL schema:: #6.258([0*256 uint]) For example:: [1, 5] If the **storage index** in the request path is not known to the server then the response MUST include an empty list. ``GET /storage/v1/immutable/:storage_index/:share_number`` !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! Read a contiguous sequence of bytes from one share in one bucket. The response body MUST be the raw share data (i.e., ``application/octet-stream``). The ``Range`` header MAY be used to request exactly one ``bytes`` range, in which case the response code MUST be ``Partial Content`` (206). Interpretation and response behavior MUST be as specified in RFC 7233 § 4.1. Multiple ranges in a single request are *not* supported; open-ended ranges are also not supported. Clients MUST NOT send requests using these features. If the response reads beyond the end of the data, the response MUST be shorter than the requested range. It MUST contain all data up to the end of the share and then end. The resulting ``Content-Range`` header MUST be consistent with the returned data. If the response to a query is an empty range, the server MUST send a ``No Content`` (204) response. Discussion `````````` Multiple ``bytes`` ranges are not supported. HTTP requires that the ``Content-Type`` of the response in that case be ``multipart/...``. The ``multipart`` major type brings along string sentinel delimiting as a means to frame the different response parts. There are many drawbacks to this framing technique: 1. It is resource-intensive to generate. 2. It is resource-intensive to parse. 3. It is complex to parse safely [#]_ [#]_ [#]_ [#]_. A previous revision of this specification allowed requesting one or more contiguous sequences from one or more shares. This *superficially* mirrored the Foolscap based interface somewhat closely. The interface was simplified to this version because this version is all that is required to let clients retrieve any desired information. It only requires that the client issue multiple requests. This can be done with pipelining or parallel requests to avoid an additional latency penalty. In the future, if there are performance goals, benchmarks can demonstrate whether they are achieved by a more complicated interface or some other change. Mutable ------- Writing ~~~~~~~ ``POST /storage/v1/mutable/:storage_index/read-test-write`` !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! General purpose read-test-and-write operation for mutable storage indexes. A mutable storage index is also called a "slot" (particularly by the existing Tahoe-LAFS codebase). The first write operation on a mutable storage index creates it (that is, there is no separate "create this storage index" operation as there is for the immutable storage index type). The request MUST include ``X-Tahoe-Authorization`` headers with write enabler and lease secrets:: X-Tahoe-Authorization: write-enabler X-Tahoe-Authorization: lease-cancel-secret X-Tahoe-Authorization: lease-renew-secret The request body MUST include test, read, and write vectors for the operation. The request body MUST validate against this CDDL schema:: { "test-write-vectors": { 0*256 share_number : { "test": [0*30 {"offset": uint, "size": uint, "specimen": bstr}] "write": [* {"offset": uint, "data": bstr}] "new-length": uint / null } } "read-vector": [0*30 {"offset": uint, "size": uint}] } share_number = uint For example:: { "test-write-vectors": { 0: { "test": [{ "offset": 3, "size": 5, "specimen": "hello" }, ...], "write": [{ "offset": 9, "data": "world" }, ...], "new-length": 5 } }, "read-vector": [{"offset": 3, "size": 12}, ...] } The response body contains a boolean indicating whether the tests all succeed (and writes were applied) and a mapping giving read data (pre-write). The response body MUST validate against this CDDL schema:: { "success": bool, "data": {0*256 share_number: [0* bstr]} } share_number = uint For example:: { "success": true, "data": { 0: ["foo"], 5: ["bar"], ... } } A client MAY send a test vector or read vector to bytes beyond the end of existing data. In this case a server MUST behave as if the test or read vector referred to exactly as much data exists. For example, consider the case where the server has 5 bytes of data for a particular share. If a client sends a read vector with an ``offset`` of 1 and a ``size`` of 4 then the server MUST respond with all of the data except the first byte. If a client sends a read vector with the same ``offset`` and a ``size`` of 5 (or any larger value) then the server MUST respond in the same way. Similarly, if there is no data at all, an empty byte string is returned no matter what the offset or length. Reading ~~~~~~~ ``GET /storage/v1/mutable/:storage_index/shares`` !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! Retrieve a set indicating all shares available for the indicated storage index. The response body MUST validate against this CDDL schema:: #6.258([0*256 uint]) For example:: [1, 5] ``GET /storage/v1/mutable/:storage_index/:share_number`` !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! Read data from the indicated mutable shares, just like ``GET /storage/v1/immutable/:storage_index``. The response body MUST be the raw share data (i.e., ``application/octet-stream``). The ``Range`` header MAY be used to request exactly one ``bytes`` range, in which case the response code MUST be ``Partial Content`` (206). Interpretation and response behavior MUST be specified in RFC 7233 § 4.1. Multiple ranges in a single request are *not* supported; open-ended ranges are also not supported. Clients MUST NOT send requests using these features. If the response reads beyond the end of the data, the response MUST be shorter than the requested range. It MUST contain all data up to the end of the share and then end. The resulting ``Content-Range`` header MUST be consistent with the returned data. If the response to a query is an empty range, the server MUST send a ``No Content`` (204) response. ``POST /storage/v1/mutable/:storage_index/:share_number/corrupt`` !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! Advise the server the data read from the indicated share was corrupt. Just like the immutable version. Sample Interactions ------------------- This section contains examples of client/server interactions to help illuminate the above specification. This section is non-normative. Immutable Data ~~~~~~~~~~~~~~ 1. Create a bucket for storage index ``AAAAAAAAAAAAAAAA`` to hold two immutable shares, discovering that share ``1`` was already uploaded:: POST /storage/v1/immutable/AAAAAAAAAAAAAAAA Authorization: Tahoe-LAFS nurl-swissnum X-Tahoe-Authorization: lease-renew-secret efgh X-Tahoe-Authorization: lease-cancel-secret jjkl X-Tahoe-Authorization: upload-secret xyzf {"share-numbers": [1, 7], "allocated-size": 48} 200 OK {"already-have": [1], "allocated": [7]} #. Upload the content for immutable share ``7``:: PATCH /storage/v1/immutable/AAAAAAAAAAAAAAAA/7 Authorization: Tahoe-LAFS nurl-swissnum Content-Range: bytes 0-15/48 X-Tahoe-Authorization: upload-secret xyzf 200 OK { "required": [ {"begin": 16, "end": 48 } ] } PATCH /storage/v1/immutable/AAAAAAAAAAAAAAAA/7 Authorization: Tahoe-LAFS nurl-swissnum Content-Range: bytes 16-31/48 X-Tahoe-Authorization: upload-secret xyzf 200 OK { "required": [ {"begin": 32, "end": 48 } ] } PATCH /storage/v1/immutable/AAAAAAAAAAAAAAAA/7 Authorization: Tahoe-LAFS nurl-swissnum Content-Range: bytes 32-47/48 X-Tahoe-Authorization: upload-secret xyzf 201 CREATED #. Download the content of the previously uploaded immutable share ``7``:: GET /storage/v1/immutable/AAAAAAAAAAAAAAAA?share=7 Authorization: Tahoe-LAFS nurl-swissnum Range: bytes=0-47 200 OK Content-Range: bytes 0-47/48 #. Renew the lease on all immutable shares in bucket ``AAAAAAAAAAAAAAAA``:: PUT /storage/v1/lease/AAAAAAAAAAAAAAAA Authorization: Tahoe-LAFS nurl-swissnum X-Tahoe-Authorization: lease-cancel-secret jjkl X-Tahoe-Authorization: lease-renew-secret efgh 204 NO CONTENT Mutable Data ~~~~~~~~~~~~ 1. Create mutable share number ``3`` with ``10`` bytes of data in slot ``BBBBBBBBBBBBBBBB``. The special test vector of size 1 but empty bytes will only pass if there is no existing share, otherwise it will read a byte which won't match `b""`:: POST /storage/v1/mutable/BBBBBBBBBBBBBBBB/read-test-write Authorization: Tahoe-LAFS nurl-swissnum X-Tahoe-Authorization: write-enabler abcd X-Tahoe-Authorization: lease-cancel-secret efgh X-Tahoe-Authorization: lease-renew-secret ijkl { "test-write-vectors": { 3: { "test": [{ "offset": 0, "size": 1, "specimen": "" }], "write": [{ "offset": 0, "data": "xxxxxxxxxx" }], "new-length": 10 } }, "read-vector": [] } 200 OK { "success": true, "data": [] } #. Safely rewrite the contents of a known version of mutable share number ``3`` (or fail):: POST /storage/v1/mutable/BBBBBBBBBBBBBBBB/read-test-write Authorization: Tahoe-LAFS nurl-swissnum X-Tahoe-Authorization: write-enabler abcd X-Tahoe-Authorization: lease-cancel-secret efgh X-Tahoe-Authorization: lease-renew-secret ijkl { "test-write-vectors": { 3: { "test": [{ "offset": 0, "size": , "specimen": "" }], "write": [{ "offset": 0, "data": "yyyyyyyyyy" }], "new-length": 10 } }, "read-vector": [] } 200 OK { "success": true, "data": [] } #. Download the contents of share number ``3``:: GET /storage/v1/mutable/BBBBBBBBBBBBBBBB?share=3 Authorization: Tahoe-LAFS nurl-swissnum Range: bytes=0-16 200 OK Content-Range: bytes 0-15/16 #. Renew the lease on previously uploaded mutable share in slot ``BBBBBBBBBBBBBBBB``:: PUT /storage/v1/lease/BBBBBBBBBBBBBBBB Authorization: Tahoe-LAFS nurl-swissnum X-Tahoe-Authorization: lease-cancel-secret efgh X-Tahoe-Authorization: lease-renew-secret ijkl 204 NO CONTENT .. _Base64: https://www.rfc-editor.org/rfc/rfc4648#section-4 .. _RFC 4648: https://tools.ietf.org/html/rfc4648 .. _RFC 7469: https://tools.ietf.org/html/rfc7469#section-2.4 .. _RFC 7049: https://tools.ietf.org/html/rfc7049#section-4 .. _RFC 9110: https://tools.ietf.org/html/rfc9110 .. _CBOR: http://cbor.io/ .. [#] The security value of checking ``notValidBefore`` and ``notValidAfter`` is not entirely clear. The arguments which apply to web-facing certificates do not seem to apply (due to the decision for Tahoe-LAFS to operate independently of the web-oriented CA system). Arguably, complexity is reduced by allowing an existing TLS implementation which wants to make these checks make them (compared to including additional code to either bypass them or disregard their results). Reducing complexity, at least in general, is often good for security. On the other hand, checking the validity time period forces certificate regeneration (which comes with its own set of complexity). A possible compromise is to recommend certificates with validity periods of many years or decades. "Recommend" may be read as "provide software supporting the generation of". What about key theft? If certificates are valid for years then a successful attacker can pretend to be a valid storage node for years. However, short-validity-period certificates are no help in this case. The attacker can generate new, valid certificates using the stolen keys. Therefore, the only recourse to key theft (really *identity theft*) is to burn the identity and generate a new one. Burning the identity is a non-trivial task. It is worth solving but it is not solved here. .. [#] More simply:: from hashlib import sha256 from cryptography.hazmat.primitives.serialization import ( Encoding, PublicFormat, ) from pybase64 import urlsafe_b64encode def check_tub_id(tub_id): spki_bytes = cert.public_key().public_bytes(Encoding.DER, PublicFormat.SubjectPublicKeyInfo) spki_sha256 = sha256(spki_bytes).digest() spki_encoded = urlsafe_b64encode(spki_sha256) assert spki_encoded == tub_id Note we use `unpadded base64url`_ rather than the Foolscap- and Tahoe-LAFS-preferred Base32. .. [#] https://www.cvedetails.com/cve/CVE-2017-5638/ .. [#] https://pivotal.io/security/cve-2018-1272 .. [#] https://nvd.nist.gov/vuln/detail/CVE-2017-5124 .. [#] https://efail.de/ .. _unpadded base64url: https://tools.ietf.org/html/rfc7515#appendix-C .. _attacking SHA1: https://en.wikipedia.org/wiki/SHA-1#Attacks tahoe_lafs-1.20.0/docs/specifications/index.rst0000644000000000000000000000051513615410400016414 0ustar00Specifications ============== This section contains various attempts at writing detailed specifications of the data formats used by Tahoe. .. toctree:: :maxdepth: 2 outline url uri file-encoding URI-extension mutable dirnodes lease servers-of-happiness backends/raic http-storage-node-protocol tahoe_lafs-1.20.0/docs/specifications/lease.rst0000644000000000000000000000710713615410400016402 0ustar00.. -*- coding: utf-8 -*- .. _share leases: Share Leases ============ A lease is a marker attached to a share indicating that some client has asked for that share to be retained for some amount of time. The intent is to allow clients and servers to collaborate to determine which data should still be retained and which can be discarded to reclaim storage space. Zero or more leases may be attached to any particular share. Renewal Secrets --------------- Each lease is uniquely identified by its **renewal secret**. This is a 32 byte string which can be used to extend the validity period of that lease. To a storage server a renewal secret is an opaque value which is only ever compared to other renewal secrets to determine equality. Storage clients will typically want to follow a scheme to deterministically derive the renewal secret for a particular share from information the client already holds about that share. This allows a client to maintain and renew single long-lived lease without maintaining additional local state. The scheme in use in Tahoe-LAFS as of 1.16.0 is as follows. * The **netstring encoding** of a byte string is the concatenation of: * the ascii encoding of the base 10 representation of the length of the string * ``":"`` * the string itself * ``","`` * The **sha256d digest** is the **sha256 digest** of the **sha256 digest** of a string. * The **sha256d tagged digest** is the **sha256d digest** of the concatenation of the **netstring encoding** of one string with one other unmodified string. * The **sha256d tagged pair digest** the **sha256d digest** of the concatenation of the **netstring encodings** of each of three strings. * The **bucket renewal tag** is ``"allmydata_bucket_renewal_secret_v1"``. * The **file renewal tag** is ``"allmydata_file_renewal_secret_v1"``. * The **client renewal tag** is ``"allmydata_client_renewal_secret_v1"``. * The **lease secret** is a 32 byte string, typically randomly generated once and then persisted for all future uses. * The **client renewal secret** is the **sha256d tagged digest** of (**lease secret**, **client renewal tag**). * The **storage index** is constructed using a capability-type-specific scheme. See ``storage_index_hash`` and ``ssk_storage_index_hash`` calls in ``src/allmydata/uri.py``. * The **file renewal secret** is the **sha256d tagged pair digest** of (**file renewal tag**, **client renewal secret**, **storage index**). * The **base32 encoding** is ``base64.b32encode`` lowercased and with trailing ``=`` stripped. * The **peer id** is the **base32 encoding** of the SHA1 digest of the server's x509 certificate. * The **renewal secret** is the **sha256d tagged pair digest** of (**bucket renewal tag**, **file renewal secret**, **peer id**). A reference implementation is available. .. literalinclude:: derive_renewal_secret.py :language: python :linenos: Cancel Secrets -------------- Lease cancellation is unimplemented. Nevertheless, a cancel secret is sent by storage clients to storage servers and stored in lease records. The scheme for deriving **cancel secret** in use in Tahoe-LAFS as of 1.16.0 is similar to that used to derive the **renewal secret**. The differences are: * Use of **client renewal tag** is replaced by use of **client cancel tag**. * Use of **file renewal secret** is replaced by use of **file cancel tag**. * Use of **bucket renewal tag** is replaced by use of **bucket cancel tag**. * **client cancel tag** is ``"allmydata_client_cancel_secret_v1"``. * **file cancel tag** is ``"allmydata_file_cancel_secret_v1"``. * **bucket cancel tag** is ``"allmydata_bucket_cancel_secret_v1"``. tahoe_lafs-1.20.0/docs/specifications/mut.svg0000644000000000000000000033207413615410400016111 0ustar00 image/svg+xml shares Merkle Tree AES-CTR SHA256d SHA256d SHA256d FEC salt encryption key write key read key verifying (public) key signing (private) key encrypted signing key verify cap read-write cap verify cap write key read-only cap verify cap read key plaintext ciphertext SHA256dtruncated SHA256dtruncated SHA256dtruncated SHA256dtruncated AES-CTR share 1 share 2 share 3 share 4 tahoe_lafs-1.20.0/docs/specifications/mutable.rst0000644000000000000000000010556613615410400016752 0ustar00.. -*- coding: utf-8-with-signature -*- ============= Mutable Files ============= 1. `Mutable Formats`_ 2. `Consistency vs. Availability`_ 3. `The Prime Coordination Directive: "Don't Do That"`_ 4. `Small Distributed Mutable Files`_ 1. `SDMF slots overview`_ 2. `Server Storage Protocol`_ 3. `Code Details`_ 4. `SMDF Slot Format`_ 5. `Recovery`_ 5. `Medium Distributed Mutable Files`_ 6. `Large Distributed Mutable Files`_ 7. `TODO`_ Mutable files are places with a stable identifier that can hold data that changes over time. In contrast to immutable slots, for which the identifier/capability is derived from the contents themselves, the mutable file identifier remains fixed for the life of the slot, regardless of what data is placed inside it. Each mutable file is referenced by two different caps. The "read-write" cap grants read-write access to its holder, allowing them to put whatever contents they like into the slot. The "read-only" cap is less powerful, only granting read access, and not enabling modification of the data. The read-write cap can be turned into the read-only cap, but not the other way around. The data in these files is distributed over a number of servers, using the same erasure coding that immutable files use, with 3-of-10 being a typical choice of encoding parameters. The data is encrypted and signed in such a way that only the holders of the read-write cap will be able to set the contents of the slot, and only the holders of the read-only cap will be able to read those contents. Holders of either cap will be able to validate the contents as being written by someone with the read-write cap. The servers who hold the shares are not automatically given the ability read or modify them: the worst they can do is deny service (by deleting or corrupting the shares), or attempt a rollback attack (which can only succeed with the cooperation of at least k servers). Mutable Formats =============== History ------- When mutable files first shipped in Tahoe-0.8.0 (15-Feb-2008), the only version available was "SDMF", described below. This was a limited-functionality placeholder, intended to be replaced with improved-efficiency "MDMF" files shortly afterwards. The development process took longer than expected, and MDMF didn't ship until Tahoe-1.9.0 (31-Oct-2011), and even then it was opt-in (not used by default). SDMF was intended for relatively small mutable files, up to a few megabytes. It uses only one segment, so alacrity (the measure of how quickly the first byte of plaintext is returned to the client) suffers, as the whole file must be downloaded even if you only want to get a single byte. The memory used by both clients and servers also scales with the size of the file, instead of being limited to the half-a-MB-or-so that immutable file operations use, so large files cause significant memory usage. To discourage the use of SDMF outside it's design parameters, the early versions of Tahoe enforced a maximum size on mutable files (maybe 10MB). Since most directories are built out of mutable files, this imposed a limit of about 30k entries per directory. In subsequent releases, this limit was removed, but the performance problems inherent in the SDMF implementation remained. In the summer of 2010, Google-Summer-of-Code student Kevan Carstensen took on the project of finally implementing MDMF. Because of my (Brian) design mistake in SDMF (not including a separate encryption seed in each segment), the share format for SDMF could not be used for MDMF, resulting in a larger gap between the two implementations (my original intention had been to make SDMF a clean subset of MDMF, where any single-segment MDMF file could be handled by the old SDMF code). In the fall of 2011, Kevan's code was finally integrated, and first made available in the Tahoe-1.9.0 release. SDMF vs. MDMF ------------- The improvement of MDMF is the use of multiple segments: individual 128-KiB sections of the file can be retrieved or modified independently. The improvement can be seen when fetching just a portion of the file (using a Range: header on the webapi), or when modifying a portion (again with a Range: header). It can also be seen indirectly when fetching the whole file: the first segment of data should be delivered faster from a large MDMF file than from an SDMF file, although the overall download will then proceed at the same rate. We've decided to make it opt-in for now: mutable files default to SDMF format unless explicitly configured to use MDMF, either in ``tahoe.cfg`` (see :doc:`../configuration`) or in the WUI or CLI command that created a new mutable file. The code can read and modify existing files of either format without user intervention. We expect to make MDMF the default in a subsequent release, perhaps 2.0. Which format should you use? SDMF works well for files up to a few MB, and can be handled by older versions (Tahoe-1.8.3 and earlier). If you do not need to support older clients, want to efficiently work with mutable files, and have code which will use Range: headers that make partial reads and writes, then MDMF is for you. Consistency vs. Availability ============================ There is an age-old battle between consistency and availability. Epic papers have been written, elaborate proofs have been established, and generations of theorists have learned that you cannot simultaneously achieve guaranteed consistency with guaranteed reliability. In addition, the closer to 0 you get on either axis, the cost and complexity of the design goes up. Tahoe's design goals are to largely favor design simplicity, then slightly favor read availability, over the other criteria. As we develop more sophisticated mutable slots, the API may expose multiple read versions to the application layer. The tahoe philosophy is to defer most consistency recovery logic to the higher layers. Some applications have effective ways to merge multiple versions, so inconsistency is not necessarily a problem (i.e. directory nodes can usually merge multiple "add child" operations). The Prime Coordination Directive: "Don't Do That" ================================================= The current rule for applications which run on top of Tahoe is "do not perform simultaneous uncoordinated writes". That means you need non-tahoe means to make sure that two parties are not trying to modify the same mutable slot at the same time. For example: * don't give the read-write URI to anyone else. Dirnodes in a private directory generally satisfy this case, as long as you don't use two clients on the same account at the same time * if you give a read-write URI to someone else, stop using it yourself. An inbox would be a good example of this. * if you give a read-write URI to someone else, call them on the phone before you write into it * build an automated mechanism to have your agents coordinate writes. For example, we expect a future release to include a FURL for a "coordination server" in the dirnodes. The rule can be that you must contact the coordination server and obtain a lock/lease on the file before you're allowed to modify it. If you do not follow this rule, Bad Things will happen. The worst-case Bad Thing is that the entire file will be lost. A less-bad Bad Thing is that one or more of the simultaneous writers will lose their changes. An observer of the file may not see monotonically-increasing changes to the file, i.e. they may see version 1, then version 2, then 3, then 2 again. Tahoe takes some amount of care to reduce the badness of these Bad Things. One way you can help nudge it from the "lose your file" case into the "lose some changes" case is to reduce the number of competing versions: multiple versions of the file that different parties are trying to establish as the one true current contents. Each simultaneous writer counts as a "competing version", as does the previous version of the file. If the count "S" of these competing versions is larger than N/k, then the file runs the risk of being lost completely. [TODO] If at least one of the writers remains running after the collision is detected, it will attempt to recover, but if S>(N/k) and all writers crash after writing a few shares, the file will be lost. Note that Tahoe uses serialization internally to make sure that a single Tahoe node will not perform simultaneous modifications to a mutable file. It accomplishes this by using a weakref cache of the MutableFileNode (so that there will never be two distinct MutableFileNodes for the same file), and by forcing all mutable file operations to obtain a per-node lock before they run. The Prime Coordination Directive therefore applies to inter-node conflicts, not intra-node ones. Small Distributed Mutable Files =============================== SDMF slots are suitable for small (<1MB) files that are editing by rewriting the entire file. The three operations are: * allocate (with initial contents) * set (with new contents) * get (old contents) The first use of SDMF slots will be to hold directories (dirnodes), which map encrypted child names to rw-URI/ro-URI pairs. SDMF slots overview ------------------- Each SDMF slot is created with a public/private key pair. The public key is known as the "verification key", while the private key is called the "signature key". The private key is hashed and truncated to 16 bytes to form the "write key" (an AES symmetric key). The write key is then hashed and truncated to form the "read key". The read key is hashed and truncated to form the 16-byte "storage index" (a unique string used as an index to locate stored data). The public key is hashed by itself to form the "verification key hash". The write key is hashed a different way to form the "write enabler master". For each storage server on which a share is kept, the write enabler master is concatenated with the server's nodeid and hashed, and the result is called the "write enabler" for that particular server. Note that multiple shares of the same slot stored on the same server will all get the same write enabler, i.e. the write enabler is associated with the "bucket", rather than the individual shares. The private key is encrypted (using AES in counter mode) by the write key, and the resulting crypttext is stored on the servers. so it will be retrievable by anyone who knows the write key. The write key is not used to encrypt anything else, and the private key never changes, so we do not need an IV for this purpose. The actual data is encrypted (using AES in counter mode) with a key derived by concatenating the readkey with the IV, the hashing the results and truncating to 16 bytes. The IV is randomly generated each time the slot is updated, and stored next to the encrypted data. The read-write URI consists of the write key and the verification key hash. The read-only URI contains the read key and the verification key hash. The verify-only URI contains the storage index and the verification key hash. :: URI:SSK-RW:b2a(writekey):b2a(verification_key_hash) URI:SSK-RO:b2a(readkey):b2a(verification_key_hash) URI:SSK-Verify:b2a(storage_index):b2a(verification_key_hash) Note that this allows the read-only and verify-only URIs to be derived from the read-write URI without actually retrieving the public keys. Also note that it means the read-write agent must validate both the private key and the public key when they are first fetched. All users validate the public key in exactly the same way. The SDMF slot is allocated by sending a request to the storage server with a desired size, the storage index, and the write enabler for that server's nodeid. If granted, the write enabler is stashed inside the slot's backing store file. All further write requests must be accompanied by the write enabler or they will not be honored. The storage server does not share the write enabler with anyone else. The SDMF slot structure will be described in more detail below. The important pieces are: * a sequence number * a root hash "R" * the encoding parameters (including k, N, file size, segment size) * a signed copy of [seqnum,R,encoding_params], using the signature key * the verification key (not encrypted) * the share hash chain (part of a Merkle tree over the share hashes) * the block hash tree (Merkle tree over blocks of share data) * the share data itself (erasure-coding of read-key-encrypted file data) * the signature key, encrypted with the write key The access pattern for read is: * hash read-key to get storage index * use storage index to locate 'k' shares with identical 'R' values * either get one share, read 'k' from it, then read k-1 shares * or read, say, 5 shares, discover k, either get more or be finished * or copy k into the URIs * read verification key * hash verification key, compare against verification key hash * read seqnum, R, encoding parameters, signature * verify signature against verification key * read share data, compute block-hash Merkle tree and root "r" * read share hash chain (leading from "r" to "R") * validate share hash chain up to the root "R" * submit share data to erasure decoding * decrypt decoded data with read-key * submit plaintext to application The access pattern for write is: * hash write-key to get read-key, hash read-key to get storage index * use the storage index to locate at least one share * read verification key and encrypted signature key * decrypt signature key using write-key * hash signature key, compare against write-key * hash verification key, compare against verification key hash * encrypt plaintext from application with read-key * application can encrypt some data with the write-key to make it only available to writers (use this for transitive read-onlyness of dirnodes) * erasure-code crypttext to form shares * split shares into blocks * compute Merkle tree of blocks, giving root "r" for each share * compute Merkle tree of shares, find root "R" for the file as a whole * create share data structures, one per server: * use seqnum which is one higher than the old version * share hash chain has log(N) hashes, different for each server * signed data is the same for each server * now we have N shares and need homes for them * walk through peers * if share is not already present, allocate-and-set * otherwise, try to modify existing share: * send testv_and_writev operation to each one * testv says to accept share if their(seqnum+R) <= our(seqnum+R) * count how many servers wind up with which versions (histogram over R) * keep going until N servers have the same version, or we run out of servers * if any servers wound up with a different version, report error to application * if we ran out of servers, initiate recovery process (described below) Server Storage Protocol ----------------------- The storage servers will provide a mutable slot container which is oblivious to the details of the data being contained inside it. Each storage index refers to a "bucket", and each bucket has one or more shares inside it. (In a well-provisioned network, each bucket will have only one share). The bucket is stored as a directory, using the base32-encoded storage index as the directory name. Each share is stored in a single file, using the share number as the filename. The container holds space for a container magic number (for versioning), the write enabler, the nodeid which accepted the write enabler (used for share migration, described below), a small number of lease structures, the embedded data itself, and expansion space for additional lease structures:: # offset size name 1 0 32 magic verstr "Tahoe mutable container v1\n\x75\x09\x44\x03\x8e" 2 32 20 write enabler's nodeid 3 52 32 write enabler 4 84 8 data size (actual share data present) (a) 5 92 8 offset of (8) count of extra leases (after data) 6 100 368 four leases, 92 bytes each 0 4 ownerid (0 means "no lease here") 4 4 expiration timestamp 8 32 renewal token 40 32 cancel token 72 20 nodeid which accepted the tokens 7 468 (a) data 8 ?? 4 count of extra leases 9 ?? n*92 extra leases The "extra leases" field must be copied and rewritten each time the size of the enclosed data changes. The hope is that most buckets will have four or fewer leases and this extra copying will not usually be necessary. The (4) "data size" field contains the actual number of bytes of data present in field (7), such that a client request to read beyond 504+(a) will result in an error. This allows the client to (one day) read relative to the end of the file. The container size (that is, (8)-(7)) might be larger, especially if extra size was pre-allocated in anticipation of filling the container with a lot of data. The offset in (5) points at the *count* of extra leases, at (8). The actual leases (at (9)) begin 4 bytes later. If the container size changes, both (8) and (9) must be relocated by copying. The server will honor any write commands that provide the write token and do not exceed the server-wide storage size limitations. Read and write commands MUST be restricted to the 'data' portion of the container: the implementation of those commands MUST perform correct bounds-checking to make sure other portions of the container are inaccessible to the clients. The two methods provided by the storage server on these "MutableSlot" share objects are: * readv(ListOf(offset=int, length=int)) * returns a list of bytestrings, of the various requested lengths * offset < 0 is interpreted relative to the end of the data * spans which hit the end of the data will return truncated data * testv_and_writev(write_enabler, test_vector, write_vector) * this is a test-and-set operation which performs the given tests and only applies the desired writes if all tests succeed. This is used to detect simultaneous writers, and to reduce the chance that an update will lose data recently written by some other party (written after the last time this slot was read). * test_vector=ListOf(TupleOf(offset, length, opcode, specimen)) * the opcode is a string, from the set [gt, ge, eq, le, lt, ne] * each element of the test vector is read from the slot's data and compared against the specimen using the desired (in)equality. If all tests evaluate True, the write is performed * write_vector=ListOf(TupleOf(offset, newdata)) * offset < 0 is not yet defined, it probably means relative to the end of the data, which probably means append, but we haven't nailed it down quite yet * write vectors are executed in order, which specifies the results of overlapping writes * return value: * error: OutOfSpace * error: something else (io error, out of memory, whatever) * (True, old_test_data): the write was accepted (test_vector passed) * (False, old_test_data): the write was rejected (test_vector failed) * both 'accepted' and 'rejected' return the old data that was used for the test_vector comparison. This can be used by the client to detect write collisions, including collisions for which the desired behavior was to overwrite the old version. In addition, the storage server provides several methods to access these share objects: * allocate_mutable_slot(storage_index, sharenums=SetOf(int)) * returns DictOf(int, MutableSlot) * get_mutable_slot(storage_index) * returns DictOf(int, MutableSlot) * or raises KeyError We intend to add an interface which allows small slots to allocate-and-write in a single call, as well as do update or read in a single call. The goal is to allow a reasonably-sized dirnode to be created (or updated, or read) in just one round trip (to all N shareholders in parallel). migrating shares ```````````````` If a share must be migrated from one server to another, two values become invalid: the write enabler (since it was computed for the old server), and the lease renew/cancel tokens. Suppose that a slot was first created on nodeA, and was thus initialized with WE(nodeA) (= H(WEM+nodeA)). Later, for provisioning reasons, the share is moved from nodeA to nodeB. Readers may still be able to find the share in its new home, depending upon how many servers are present in the grid, where the new nodeid lands in the permuted index for this particular storage index, and how many servers the reading client is willing to contact. When a client attempts to write to this migrated share, it will get a "bad write enabler" error, since the WE it computes for nodeB will not match the WE(nodeA) that was embedded in the share. When this occurs, the "bad write enabler" message must include the old nodeid (e.g. nodeA) that was in the share. The client then computes H(nodeB+H(WEM+nodeA)), which is the same as H(nodeB+WE(nodeA)). The client sends this along with the new WE(nodeB), which is H(WEM+nodeB). Note that the client only sends WE(nodeB) to nodeB, never to anyone else. Also note that the client does not send a value to nodeB that would allow the node to impersonate the client to a third node: everything sent to nodeB will include something specific to nodeB in it. The server locally computes H(nodeB+WE(nodeA)), using its own node id and the old write enabler from the share. It compares this against the value supplied by the client. If they match, this serves as proof that the client was able to compute the old write enabler. The server then accepts the client's new WE(nodeB) and writes it into the container. This WE-fixup process requires an extra round trip, and requires the error message to include the old nodeid, but does not require any public key operations on either client or server. Migrating the leases will require a similar protocol. This protocol will be defined concretely at a later date. Code Details ------------ The MutableFileNode class is used to manipulate mutable files (as opposed to ImmutableFileNodes). These are initially generated with client.create_mutable_file(), and later recreated from URIs with client.create_node_from_uri(). Instances of this class will contain a URI and a reference to the client (for peer selection and connection). NOTE: this section is out of date. Please see src/allmydata/interfaces.py (the section on IMutableFilesystemNode) for more accurate information. The methods of MutableFileNode are: * download_to_data() -> [deferred] newdata, NotEnoughSharesError * if there are multiple retrieveable versions in the grid, get() returns the first version it can reconstruct, and silently ignores the others. In the future, a more advanced API will signal and provide access to the multiple heads. * update(newdata) -> OK, UncoordinatedWriteError, NotEnoughSharesError * overwrite(newdata) -> OK, UncoordinatedWriteError, NotEnoughSharesError download_to_data() causes a new retrieval to occur, pulling the current contents from the grid and returning them to the caller. At the same time, this call caches information about the current version of the file. This information will be used in a subsequent call to update(), and if another change has occured between the two, this information will be out of date, triggering the UncoordinatedWriteError. update() is therefore intended to be used just after a download_to_data(), in the following pattern:: d = mfn.download_to_data() d.addCallback(apply_delta) d.addCallback(mfn.update) If the update() call raises UCW, then the application can simply return an error to the user ("you violated the Prime Coordination Directive"), and they can try again later. Alternatively, the application can attempt to retry on its own. To accomplish this, the app needs to pause, download the new (post-collision and post-recovery) form of the file, reapply their delta, then submit the update request again. A randomized pause is necessary to reduce the chances of colliding a second time with another client that is doing exactly the same thing:: d = mfn.download_to_data() d.addCallback(apply_delta) d.addCallback(mfn.update) def _retry(f): f.trap(UncoordinatedWriteError) d1 = pause(random.uniform(5, 20)) d1.addCallback(lambda res: mfn.download_to_data()) d1.addCallback(apply_delta) d1.addCallback(mfn.update) return d1 d.addErrback(_retry) Enthusiastic applications can retry multiple times, using a randomized exponential backoff between each. A particularly enthusiastic application can retry forever, but such apps are encouraged to provide a means to the user of giving up after a while. UCW does not mean that the update was not applied, so it is also a good idea to skip the retry-update step if the delta was already applied:: d = mfn.download_to_data() d.addCallback(apply_delta) d.addCallback(mfn.update) def _retry(f): f.trap(UncoordinatedWriteError) d1 = pause(random.uniform(5, 20)) d1.addCallback(lambda res: mfn.download_to_data()) def _maybe_apply_delta(contents): new_contents = apply_delta(contents) if new_contents != contents: return mfn.update(new_contents) d1.addCallback(_maybe_apply_delta) return d1 d.addErrback(_retry) update() is the right interface to use for delta-application situations, like directory nodes (in which apply_delta might be adding or removing child entries from a serialized table). Note that any uncoordinated write has the potential to lose data. We must do more analysis to be sure, but it appears that two clients who write to the same mutable file at the same time (even if both eventually retry) will, with high probability, result in one client observing UCW and the other silently losing their changes. It is also possible for both clients to observe UCW. The moral of the story is that the Prime Coordination Directive is there for a reason, and that recovery/UCW/retry is not a subsitute for write coordination. overwrite() tells the client to ignore this cached version information, and to unconditionally replace the mutable file's contents with the new data. This should not be used in delta application, but rather in situations where you want to replace the file's contents with completely unrelated ones. When raw files are uploaded into a mutable slot through the Tahoe-LAFS web-API (using POST and the ?mutable=true argument), they are put in place with overwrite(). The peer-selection and data-structure manipulation (and signing/verification) steps will be implemented in a separate class in allmydata/mutable.py . SMDF Slot Format ---------------- This SMDF data lives inside a server-side MutableSlot container. The server is oblivious to this format. This data is tightly packed. In particular, the share data is defined to run all the way to the beginning of the encrypted private key (the encprivkey offset is used both to terminate the share data and to begin the encprivkey). :: # offset size name 1 0 1 version byte, \x00 for this format 2 1 8 sequence number. 2^64-1 must be handled specially, TBD 3 9 32 "R" (root of share hash Merkle tree) 4 41 16 IV (share data is AES(H(readkey+IV)) ) 5 57 18 encoding parameters: 57 1 k 58 1 N 59 8 segment size 67 8 data length (of original plaintext) 6 75 32 offset table: 75 4 (8) signature 79 4 (9) share hash chain 83 4 (10) block hash tree 87 4 (11) share data 91 8 (12) encrypted private key 99 8 (13) EOF 7 107 436ish verification key (2048 RSA key) 8 543ish 256ish signature=RSAsign(sigkey, H(version+seqnum+r+IV+encparm)) 9 799ish (a) share hash chain, encoded as: "".join([pack(">H32s", shnum, hash) for (shnum,hash) in needed_hashes]) 10 (927ish) (b) block hash tree, encoded as: "".join([pack(">32s",hash) for hash in block_hash_tree]) 11 (935ish) LEN share data (no gap between this and encprivkey) 12 ?? 1216ish encrypted private key= AESenc(write-key, RSA-key) 13 ?? -- EOF (a) The share hash chain contains ceil(log(N)) hashes, each 32 bytes long. This is the set of hashes necessary to validate this share's leaf in the share Merkle tree. For N=10, this is 4 hashes, i.e. 128 bytes. (b) The block hash tree contains ceil(length/segsize) hashes, each 32 bytes long. This is the set of hashes necessary to validate any given block of share data up to the per-share root "r". Each "r" is a leaf of the share has tree (with root "R"), from which a minimal subset of hashes is put in the share hash chain in (8). Recovery -------- The first line of defense against damage caused by colliding writes is the Prime Coordination Directive: "Don't Do That". The second line of defense is to keep "S" (the number of competing versions) lower than N/k. If this holds true, at least one competing version will have k shares and thus be recoverable. Note that server unavailability counts against us here: the old version stored on the unavailable server must be included in the value of S. The third line of defense is our use of testv_and_writev() (described below), which increases the convergence of simultaneous writes: one of the writers will be favored (the one with the highest "R"), and that version is more likely to be accepted than the others. This defense is least effective in the pathological situation where S simultaneous writers are active, the one with the lowest "R" writes to N-k+1 of the shares and then dies, then the one with the next-lowest "R" writes to N-2k+1 of the shares and dies, etc, until the one with the highest "R" writes to k-1 shares and dies. Any other sequencing will allow the highest "R" to write to at least k shares and establish a new revision. The fourth line of defense is the fact that each client keeps writing until at least one version has N shares. This uses additional servers, if necessary, to make sure that either the client's version or some newer/overriding version is highly available. The fifth line of defense is the recovery algorithm, which seeks to make sure that at least *one* version is highly available, even if that version is somebody else's. The write-shares-to-peers algorithm is as follows: * permute peers according to storage index * walk through peers, trying to assign one share per peer * for each peer: * send testv_and_writev, using "old(seqnum+R) <= our(seqnum+R)" as the test * this means that we will overwrite any old versions, and we will overwrite simultaenous writers of the same version if our R is higher. We will not overwrite writers using a higher seqnum. * record the version that each share winds up with. If the write was accepted, this is our own version. If it was rejected, read the old_test_data to find out what version was retained. * if old_test_data indicates the seqnum was equal or greater than our own, mark the "Simultanous Writes Detected" flag, which will eventually result in an error being reported to the writer (in their close() call). * build a histogram of "R" values * repeat until the histogram indicate that some version (possibly ours) has N shares. Use new servers if necessary. * If we run out of servers: * if there are at least shares-of-happiness of any one version, we're happy, so return. (the close() might still get an error) * not happy, need to reinforce something, goto RECOVERY Recovery: * read all shares, count the versions, identify the recoverable ones, discard the unrecoverable ones. * sort versions: locate max(seqnums), put all versions with that seqnum in the list, sort by number of outstanding shares. Then put our own version. (TODO: put versions with seqnum us ahead of us?). * for each version: * attempt to recover that version * if not possible, remove it from the list, go to next one * if recovered, start at beginning of peer list, push that version, continue until N shares are placed * if pushing our own version, bump up the seqnum to one higher than the max seqnum we saw * if we run out of servers: * schedule retry and exponential backoff to repeat RECOVERY * admit defeat after some period? presumeably the client will be shut down eventually, maybe keep trying (once per hour?) until then. Medium Distributed Mutable Files ================================ These are just like the SDMF case, but: * We actually take advantage of the Merkle hash tree over the blocks, by reading a single segment of data at a time (and its necessary hashes), to reduce the read-time alacrity. * We allow arbitrary writes to any range of the file. * We add more code to first read each segment that a write must modify. This looks exactly like the way a normal filesystem uses a block device, or how a CPU must perform a cache-line fill before modifying a single word. * We might implement some sort of copy-based atomic update server call, to allow multiple writev() calls to appear atomic to any readers. MDMF slots provide fairly efficient in-place edits of very large files (a few GB). Appending data is also fairly efficient. Large Distributed Mutable Files =============================== LDMF slots (not implemented) would use a fundamentally different way to store the file, inspired by Mercurial's "revlog" format. This would enable very efficient insert/remove/replace editing of arbitrary spans. Multiple versions of the file can be retained, in a revision graph that can have multiple heads. Each revision can be referenced by a cryptographic identifier. There are two forms of the URI, one that means "most recent version", and a longer one that points to a specific revision. Metadata can be attached to the revisions, like timestamps, to enable rolling back an entire tree to a specific point in history. LDMF1 provides deltas but tries to avoid dealing with multiple heads. LDMF2 provides explicit support for revision identifiers and branching. TODO ==== improve allocate-and-write or get-writer-buckets API to allow one-call (or maybe two-call) updates. The challenge is in figuring out which shares are on which machines. First cut will have lots of round trips. (eventually) define behavior when seqnum wraps. At the very least make sure it can't cause a security problem. "the slot is worn out" is acceptable. (eventually) define share-migration lease update protocol. Including the nodeid who accepted the lease is useful, we can use the same protocol as we do for updating the write enabler. However we need to know which lease to update.. maybe send back a list of all old nodeids that we find, then try all of them when we accept the update? We now do this in a specially-formatted IndexError exception: "UNABLE to renew non-existent lease. I have leases accepted by " + "nodeids: '12345','abcde','44221' ." confirm that a repairer can regenerate shares without the private key. Hmm, without the write-enabler they won't be able to write those shares to the servers.. although they could add immutable new shares to new servers. tahoe_lafs-1.20.0/docs/specifications/outline.rst0000644000000000000000000002654313615410400016775 0ustar00.. -*- coding: utf-8-with-signature -*- ============================== Specification Document Outline ============================== While we do not yet have a clear set of specification documents for Tahoe (explaining the file formats, so that others can write interoperable implementations), this document is intended to lay out an outline for what these specs ought to contain. Think of this as the ISO 7-Layer Model for Tahoe. We currently imagine 4 documents. 1. `#1: Share Format, Encoding Algorithm`_ 2. `#2: Share Exchange Protocol`_ 3. `#3: Server Selection Algorithm, filecap format`_ 4. `#4: Directory Format`_ #1: Share Format, Encoding Algorithm ==================================== This document will describe the way that files are encrypted and encoded into shares. It will include a specification of the share format, and explain both the encoding and decoding algorithms. It will cover both mutable and immutable files. The immutable encoding algorithm, as described by this document, will start with a plaintext series of bytes, encoding parameters "k" and "N", and either an encryption key or a mechanism for deterministically deriving the key from the plaintext (the CHK specification). The algorithm will end with a set of N shares, and a set of values that must be included in the filecap to provide confidentiality (the encryption key) and integrity (the UEB hash). The immutable decoding algorithm will start with the filecap values (key and UEB hash) and "k" shares. It will explain how to validate the shares against the integrity information, how to reverse the erasure-coding, and how to decrypt the resulting ciphertext. It will result in the original plaintext bytes (or some subrange thereof). The sections on mutable files will contain similar information. This document is *not* responsible for explaining the filecap format, since full filecaps may need to contain additional information as described in document #3. Likewise it it not responsible for explaining where to put the generated shares or where to find them again later. It is also not responsible for explaining the access control mechanisms surrounding share upload, download, or modification ("Accounting" is the business of controlling share upload to conserve space, and mutable file shares require some sort of access control to prevent non-writecap holders from destroying shares). We don't yet have a document dedicated to explaining these, but let's call it "Access Control" for now. #2: Share Exchange Protocol =========================== This document explains the wire-protocol used to upload, download, and modify shares on the various storage servers. Given the N shares created by the algorithm described in document #1, and a set of servers who are willing to accept those shares, the protocols in this document will be sufficient to get the shares onto the servers. Likewise, given a set of servers who hold at least k shares, these protocols will be enough to retrieve the shares necessary to begin the decoding process described in document #1. The notion of a "storage index" is used to reference a particular share: the storage index is generated by the encoding process described in document #1. This document does *not* describe how to identify or choose those servers, rather it explains what to do once they have been selected (by the mechanisms in document #3). This document also explains the protocols that a client uses to ask a server whether or not it is willing to accept an uploaded share, and whether it has a share available for download. These protocols will be used by the mechanisms in document #3 to help decide where the shares should be placed. Where cryptographic mechanisms are necessary to implement access-control policy, this document will explain those mechanisms. In the future, Tahoe will be able to use multiple protocols to speak to storage servers. There will be alternative forms of this document, one for each protocol. The first one to be written will describe the Foolscap-based protocol that tahoe currently uses, but we anticipate a subsequent one to describe a more HTTP-based protocol. #3: Server Selection Algorithm, filecap format ============================================== This document has two interrelated purposes. With a deeper understanding of the issues, we may be able to separate these more cleanly in the future. The first purpose is to explain the server selection algorithm. Given a set of N shares, where should those shares be uploaded? Given some information stored about a previously-uploaded file, how should a downloader locate and recover at least k shares? Given a previously-uploaded mutable file, how should a modifier locate all (or most of) the shares with a reasonable amount of work? This question implies many things, all of which should be explained in this document: * the notion of a "grid", nominally a set of servers who could potentially hold shares, which might change over time * a way to configure which grid should be used * a way to discover which servers are a part of that grid * a way to decide which servers are reliable enough to be worth sending shares * an algorithm to handle servers which refuse shares * a way for a downloader to locate which servers have shares * a way to choose which shares should be used for download The server-selection algorithm has several obviously competing goals: * minimize the amount of work that must be done during upload * minimize the total storage resources used * avoid "hot spots", balance load among multiple servers * maximize the chance that enough shares will be downloadable later, by uploading lots of shares, and by placing them on reliable servers * minimize the work that the future downloader must do * tolerate temporary server failures, permanent server departure, and new server insertions * minimize the amount of information that must be added to the filecap The server-selection algorithm is defined in some context: some set of expectations about the servers or grid with which it is expected to operate. Different algorithms are appropriate for different situtations, so there will be multiple alternatives of this document. The first version of this document will describe the algorithm that the current (1.3.0) release uses, which is heavily weighted towards the two main use case scenarios for which Tahoe has been designed: the small, stable friendnet, and the allmydata.com managed grid. In both cases, we assume that the storage servers are online most of the time, they are uniformly highly reliable, and that the set of servers does not change very rapidly. The server-selection algorithm for this environment uses a permuted server list to achieve load-balancing, uses all servers identically, and derives the permutation key from the storage index to avoid adding a new field to the filecap. An alternative algorithm could give clients more precise control over share placement, for example by a user who wished to make sure that k+1 shares are located in each datacenter (to allow downloads to take place using only local bandwidth). This algorithm could skip the permuted list and use other mechanisms to accomplish load-balancing (or ignore the issue altogether). It could add additional information to the filecap (like a list of which servers received the shares) in lieu of performing a search at download time, perhaps at the expense of allowing a repairer to move shares to a new server after the initial upload. It might make up for this by storing "location hints" next to each share, to indicate where other shares are likely to be found, and obligating the repairer to update these hints. The second purpose of this document is to explain the format of the file capability string (or "filecap" for short). There are multiple kinds of capabilties (read-write, read-only, verify-only, repaircap, lease-renewal cap, traverse-only, etc). There are multiple ways to represent the filecap (compressed binary, human-readable, clickable-HTTP-URL, "tahoe:" URL, etc), but they must all contain enough information to reliably retrieve a file (given some context, of course). It must at least contain the confidentiality and integrity information from document #1 (i.e. the encryption key and the UEB hash). It must also contain whatever additional information the upload-time server-selection algorithm generated that will be required by the downloader. For some server-selection algorithms, the additional information will be minimal. For example, the 1.3.0 release uses the hash of the encryption key as a storage index, and uses the storage index to permute the server list, and uses an Introducer to learn the current list of servers. This allows a "close-enough" list of servers to be compressed into a filecap field that is already required anyways (the encryption key). It also adds k and N to the filecap, to speed up the downloader's search (the downloader knows how many shares it needs, so it can send out multiple queries in parallel). But other server-selection algorithms might require more information. Each variant of this document will explain how to encode that additional information into the filecap, and how to extract and use that information at download time. These two purposes are interrelated. A filecap that is interpreted in the context of the allmydata.com commercial grid, which uses tahoe-1.3.0, implies a specific peer-selection algorithm, a specific Introducer, and therefore a fairly-specific set of servers to query for shares. A filecap which is meant to be interpreted on a different sort of grid would need different information. Some filecap formats can be designed to contain more information (and depend less upon context), such as the way an HTTP URL implies the existence of a single global DNS system. Ideally a tahoe filecap should be able to specify which "grid" it lives in, with enough information to allow a compatible implementation of Tahoe to locate that grid and retrieve the file (regardless of which server-selection algorithm was used for upload). This more-universal format might come at the expense of reliability, however. Tahoe-1.3.0 filecaps do not contain hostnames, because the failure of DNS or an individual host might then impact file availability (however the Introducer contains DNS names or IP addresses). #4: Directory Format ==================== Tahoe directories are a special way of interpreting and managing the contents of a file (either mutable or immutable). These "dirnode" files are basically serialized tables that map child name to filecap/dircap. This document describes the format of these files. Tahoe-1.3.0 directories are "transitively readonly", which is accomplished by applying an additional layer of encryption to the list of child writecaps. The key for this encryption is derived from the containing file's writecap. This document must explain how to derive this key and apply it to the appropriate portion of the table. Future versions of the directory format are expected to contain "deep-traversal caps", which allow verification/repair of files without exposing their plaintext to the repair agent. This document wil be responsible for explaining traversal caps too. Future versions of the directory format will probably contain an index and more advanced data structures (for efficiency and fast lookups), instead of a simple flat list of (childname, childcap). This document will also need to describe metadata formats, including what access-control policies are defined for the metadata. tahoe_lafs-1.20.0/docs/specifications/servers-of-happiness.rst0000644000000000000000000002151413615410400021372 0ustar00.. -*- coding: utf-8-with-signature -*- ==================== Servers of Happiness ==================== When you upload a file to a Tahoe-LAFS grid, you expect that it will stay there for a while, and that it will do so even if a few of the peers on the grid stop working, or if something else goes wrong. An upload health metric helps to make sure that this actually happens. An upload health metric is a test that looks at a file on a Tahoe-LAFS grid and says whether or not that file is healthy; that is, whether it is distributed on the grid in such a way as to ensure that it will probably survive in good enough shape to be recoverable, even if a few things go wrong between the time of the test and the time that it is recovered. Our current upload health metric for immutable files is called 'servers-of-happiness'; its predecessor was called 'shares-of-happiness'. shares-of-happiness used the number of encoded shares generated by a file upload to say whether or not it was healthy. If there were more shares than a user-configurable threshold, the file was reported to be healthy; otherwise, it was reported to be unhealthy. In normal situations, the upload process would distribute shares fairly evenly over the peers in the grid, and in that case shares-of-happiness worked fine. However, because it only considered the number of shares, and not where they were on the grid, it could not detect situations where a file was unhealthy because most or all of the shares generated from the file were stored on one or two peers. servers-of-happiness addresses this by extending the share-focused upload health metric to also consider the location of the shares on grid. servers-of-happiness looks at the mapping of peers to the shares that they hold, and compares the cardinality of the largest happy subset of those to a user-configurable threshold. A happy subset of peers has the property that any k (where k is as in k-of-n encoding) peers within the subset can reconstruct the source file. This definition of file health provides a stronger assurance of file availability over time; with 3-of-10 encoding, and happy=7, a healthy file is still guaranteed to be available even if 4 peers fail. Measuring Servers of Happiness ============================== We calculate servers-of-happiness by computing a matching on a bipartite graph that is related to the layout of shares on the grid. One set of vertices is the peers on the grid, and one set of vertices is the shares. An edge connects a peer and a share if the peer will (or does, for existing shares) hold the share. The size of the maximum matching on this graph is the size of the largest happy peer set that exists for the upload. First, note that a bipartite matching of size n corresponds to a happy subset of size n. This is because a bipartite matching of size n implies that there are n peers such that each peer holds a share that no other peer holds. Then any k of those peers collectively hold k distinct shares, and can restore the file. A bipartite matching of size n is not necessary for a happy subset of size n, however (so it is not correct to say that the size of the maximum matching on this graph is the size of the largest happy subset of peers that exists for the upload). For example, consider a file with k = 3, and suppose that each peer has all three of those pieces. Then, since any peer from the original upload can restore the file, if there are 10 peers holding shares, and the happiness threshold is 7, the upload should be declared happy, because there is a happy subset of size 10, and 10 > 7. However, since a maximum matching on the bipartite graph related to this layout has only 3 edges, Tahoe-LAFS declares the upload unhealthy. Though it is not unhealthy, a share layout like this example is inefficient; for k = 3, and if there are n peers, it corresponds to an expansion factor of 10x. Layouts that are declared healthy by the bipartite graph matching approach have the property that they correspond to uploads that are either already relatively efficient in their utilization of space, or can be made to be so by deleting shares; and that place all of the shares that they generate, enabling redistribution of shares later without having to re-encode the file. Also, it is computationally reasonable to compute a maximum matching in a bipartite graph, and there are well-studied algorithms to do that. Issues ====== The uploader is good at detecting unhealthy upload layouts, but it doesn't always know how to make an unhealthy upload into a healthy upload if it is possible to do so; it attempts to redistribute shares to achieve happiness, but only in certain circumstances. The redistribution algorithm isn't optimal, either, so even in these cases it will not always find a happy layout if one can be arrived at through redistribution. We are investigating improvements to address these issues. We don't use servers-of-happiness for mutable files yet; this fix will likely come in Tahoe-LAFS version 1.13. ============================ Upload Strategy of Happiness ============================ As mentioned above, the uploader is good at detecting instances which do not pass the servers-of-happiness test, but the share distribution algorithm is not always successful in instances where happiness can be achieved. A new placement algorithm designed to pass the servers-of-happiness test, titled 'Upload Strategy of Happiness', is meant to fix these instances where the uploader is unable to achieve happiness. Calculating Share Placements ============================ We calculate share placement like so: 0. Start with an ordered list of servers. Maybe *2N* of them. 1. Query all servers for existing shares. 1a. Query remaining space from all servers. Every server that has enough free space is considered "readwrite" and every server with too little space is "readonly". 2. Construct a bipartite graph G1 of *readonly* servers to pre-existing shares, where an edge exists between an arbitrary readonly server S and an arbitrary share T if and only if S currently holds T. 3. Calculate a maximum matching graph of G1 (a set of S->T edges that has or is-tied-for the highest "happiness score"). There is a clever efficient algorithm for this, named "Ford-Fulkerson". There may be more than one maximum matching for this graph; we choose one of them arbitrarily, but prefer earlier servers. Call this particular placement M1. The placement maps shares to servers, where each share appears at most once, and each server appears at most once. 4. Construct a bipartite graph G2 of readwrite servers to pre-existing shares. Then remove any edge (from G2) that uses a server or a share found in M1. Let an edge exist between server S and share T if and only if S already holds T. 5. Calculate a maximum matching graph of G2, call this M2, again preferring earlier servers. 6. Construct a bipartite graph G3 of (only readwrite) servers to shares (some shares may already exist on a server). Then remove (from G3) any servers and shares used in M1 or M2 (note that we retain servers/shares that were in G1/G2 but *not* in the M1/M2 subsets) 7. Calculate a maximum matching graph of G3, call this M3, preferring earlier servers. The final placement table is the union of M1+M2+M3. 8. Renew the shares on their respective servers from M1 and M2. 9. Upload share T to server S if an edge exists between S and T in M3. 10. If any placements from step 9 fail, mark the server as read-only. Go back to step 2 (since we may discover a server is/has-become read-only, or has failed, during step 9). Rationale (Step 4): when we see pre-existing shares on read-only servers, we prefer to rely upon those (rather than the ones on read-write servers), so we can maybe use the read-write servers for new shares. If we picked the read-write server's share, then we couldn't re-use that server for new ones (we only rely upon each server for one share, more or less). Properties of Upload Strategy of Happiness ========================================== The size of the maximum bipartite matching is bounded by the size of the smaller set of vertices. Therefore in a situation where the set of servers is smaller than the set of shares, placement is not generated for a subset of shares. In this case the remaining shares are distributed as evenly as possible across the set of writable servers. If the servers-of-happiness criteria can be met, the upload strategy of happiness guarantees that H shares will be placed on the network. During file repair, if the set of servers is larger than N, the algorithm will only attempt to spread shares over N distinct servers. For both initial file upload and file repair, N should be viewed as the maximum number of distinct servers shares can be placed on, and H as the minimum amount. The uploader will fail if the number of distinct servers is less than H, and it will never attempt to exceed N. tahoe_lafs-1.20.0/docs/specifications/uri.rst0000644000000000000000000002161413615410400016107 0ustar00.. -*- coding: utf-8-with-signature -*- ========== Tahoe URIs ========== 1. `File URIs`_ 1. `CHK URIs`_ 2. `LIT URIs`_ 3. `Mutable File URIs`_ 2. `Directory URIs`_ 3. `Internal Usage of URIs`_ Each file and directory in a Tahoe-LAFS file store is described by a "URI". There are different kinds of URIs for different kinds of objects, and there are different kinds of URIs to provide different kinds of access to those objects. Each URI is a string representation of a "capability" or "cap", and there are read-caps, write-caps, verify-caps, and others. Each URI provides both ``location`` and ``identification`` properties. ``location`` means that holding the URI is sufficient to locate the data it represents (this means it contains a storage index or a lookup key, whatever is necessary to find the place or places where the data is being kept). ``identification`` means that the URI also serves to validate the data: an attacker who wants to trick you into into using the wrong data will be limited in their abilities by the identification properties of the URI. Some URIs are subsets of others. In particular, if you know a URI which allows you to modify some object, you can produce a weaker read-only URI and give it to someone else, and they will be able to read that object but not modify it. Directories, for example, have a read-cap which is derived from the write-cap: anyone with read/write access to the directory can produce a limited URI that grants read-only access, but not the other way around. src/allmydata/uri.py is the main place where URIs are processed. It is the authoritative definition point for all the the URI types described herein. File URIs ========= The lowest layer of the Tahoe architecture (the "key-value store") is reponsible for mapping URIs to data. This is basically a distributed hash table, in which the URI is the key, and some sequence of bytes is the value. There are two kinds of entries in this table: immutable and mutable. For immutable entries, the URI represents a fixed chunk of data. The URI itself is derived from the data when it is uploaded into the grid, and can be used to locate and download that data from the grid at some time in the future. For mutable entries, the URI identifies a "slot" or "container", which can be filled with different pieces of data at different times. It is important to note that the values referenced by these URIs are just sequences of bytes, and that **no** filenames or other metadata is retained at this layer. The file store layer (which sits above the key-value store layer) is entirely responsible for directories and filenames and the like. CHK URIs -------- CHK (Content Hash Keyed) files are immutable sequences of bytes. They are uploaded in a distributed fashion using a "storage index" (for the "location" property), and encrypted using a "read key". A secure hash of the data is computed to help validate the data afterwards (providing the "identification" property). All of these pieces, plus information about the file's size and the number of shares into which it has been distributed, are put into the "CHK" uri. The storage index is derived by hashing the read key (using a tagged SHA-256d hash, then truncated to 128 bits), so it does not need to be physically present in the URI. The current format for CHK URIs is the concatenation of the following strings:: URI:CHK:(key):(hash):(needed-shares):(total-shares):(size) Where (key) is the base32 encoding of the 16-byte AES read key, (hash) is the base32 encoding of the SHA-256 hash of the URI Extension Block, (needed-shares) is an ascii decimal representation of the number of shares required to reconstruct this file, (total-shares) is the same representation of the total number of shares created, and (size) is an ascii decimal representation of the size of the data represented by this URI. All base32 encodings are expressed in lower-case, with the trailing '=' signs removed. For example, the following is a CHK URI, generated from a previous version of the contents of :doc:`architecture.rst<../architecture>`:: URI:CHK:ihrbeov7lbvoduupd4qblysj7a:bg5agsdt62jb34hxvxmdsbza6do64f4fg5anxxod2buttbo6udzq:3:10:28733 Historical note: The name "CHK" is somewhat inaccurate and continues to be used for historical reasons. "Content Hash Key" means that the encryption key is derived by hashing the contents, which gives the useful property that encoding the same file twice will result in the same URI. However, this is an optional step: by passing a different flag to the appropriate API call, Tahoe will generate a random encryption key instead of hashing the file: this gives the useful property that the URI or storage index does not reveal anything about the file's contents (except filesize), which improves privacy. The URI:CHK: prefix really indicates that an immutable file is in use, without saying anything about how the key was derived. LIT URIs -------- LITeral files are also an immutable sequence of bytes, but they are so short that the data is stored inside the URI itself. These are used for files of 55 bytes or shorter, which is the point at which the LIT URI is the same length as a CHK URI would be. LIT URIs do not require an upload or download phase, as their data is stored directly in the URI. The format of a LIT URI is simply a fixed prefix concatenated with the base32 encoding of the file's data:: URI:LIT:bjuw4y3movsgkidbnrwg26lemf2gcl3xmvrc6kropbuhi3lmbi The LIT URI for an empty file is "URI:LIT:", and the LIT URI for a 5-byte file that contains the string "hello" is "URI:LIT:nbswy3dp". Mutable File URIs ----------------- The other kind of DHT entry is the "mutable slot", in which the URI names a container to which data can be placed and retrieved without changing the identity of the container. These slots have write-caps (which allow read/write access), read-caps (which only allow read-access), and verify-caps (which allow a file checker/repairer to confirm that the contents exist, but does not let it decrypt the contents). Mutable slots use public key technology to provide data integrity, and put a hash of the public key in the URI. As a result, the data validation is limited to confirming that the data retrieved matches *some* data that was uploaded in the past, but not _which_ version of that data. The format of the write-cap for mutable files is:: URI:SSK:(writekey):(fingerprint) Where (writekey) is the base32 encoding of the 16-byte AES encryption key that is used to encrypt the RSA private key, and (fingerprint) is the base32 encoded 32-byte SHA-256 hash of the RSA public key. For more details about the way these keys are used, please see :doc:`mutable`. The format for mutable read-caps is:: URI:SSK-RO:(readkey):(fingerprint) The read-cap is just like the write-cap except it contains the other AES encryption key: the one used for encrypting the mutable file's contents. This second key is derived by hashing the writekey, which allows the holder of a write-cap to produce a read-cap, but not the other way around. The fingerprint is the same in both caps. Historical note: the "SSK" prefix is a perhaps-inaccurate reference to "Sub-Space Keys" from the Freenet project, which uses a vaguely similar structure to provide mutable file access. Directory URIs ============== The key-value store layer provides a mapping from URI to data. To turn this into a graph of directories and files, the "file store" layer (which sits on top of the key-value store layer) needs to keep track of "directory nodes", or "dirnodes" for short. :doc:`dirnodes` describes how these work. Dirnodes are contained inside mutable files, and are thus simply a particular way to interpret the contents of these files. As a result, a directory write-cap looks a lot like a mutable-file write-cap:: URI:DIR2:(writekey):(fingerprint) Likewise directory read-caps (which provide read-only access to the directory) look much like mutable-file read-caps:: URI:DIR2-RO:(readkey):(fingerprint) Historical note: the "DIR2" prefix is used because the non-distributed dirnodes in earlier Tahoe releases had already claimed the "DIR" prefix. Internal Usage of URIs ====================== The classes in source:src/allmydata/uri.py are used to pack and unpack these various kinds of URIs. Three Interfaces are defined (IURI, IFileURI, and IDirnodeURI) which are implemented by these classes, and string-to-URI-class conversion routines have been registered as adapters, so that code which wants to extract e.g. the size of a CHK or LIT uri can do:: print IFileURI(uri).get_size() If the URI does not represent a CHK or LIT uri (for example, if it was for a directory instead), the adaptation will fail, raising a TypeError inside the IFileURI() call. Several utility methods are provided on these objects. The most important is ``to_string()``, which returns the string form of the URI. Therefore ``IURI(uri).to_string == uri`` is true for any valid URI. See the IURI class in source:src/allmydata/interfaces.py for more details. tahoe_lafs-1.20.0/docs/specifications/url.rst0000644000000000000000000002017513615410400016113 0ustar00URLs ==== The goal of this document is to completely specify the construction and use of the URLs by Tahoe-LAFS for service location. This includes, but is not limited to, the original Foolscap-based URLs. These are not to be confused with the URI-like capabilities Tahoe-LAFS uses to refer to stored data. An attempt is also made to outline the rationale for certain choices about these URLs. The intended audience for this document is Tahoe-LAFS maintainers and other developers interested in interoperating with Tahoe-LAFS or these URLs. .. _furls: Background ---------- Tahoe-LAFS first used Foolscap_ for network communication. Foolscap connection setup takes as an input a Foolscap URL or a *fURL*. A fURL includes three components: * the base32-encoded SHA1 hash of the DER form of an x509v3 certificate * zero or more network addresses [1]_ * an object identifier A Foolscap client tries to connect to each network address in turn. If a connection is established then TLS is negotiated. The server is authenticated by matching its certificate against the hash in the fURL. A matching certificate serves as proof that the handshaking peer is the correct server. This serves as the process by which the client authenticates the server. The client can then exercise further Foolscap functionality using the fURL's object identifier. If the object identifier is an unguessable, secret string then it serves as a capability. This unguessable identifier is sometimes called a `swiss number`_ (or swissnum). The client's use of the swissnum is what allows the server to authorize the client. .. _`swiss number`: http://wiki.erights.org/wiki/Swiss_number .. _NURLs: NURLs ----- The authentication and authorization properties of fURLs are a good fit for Tahoe-LAFS' requirements. These are not inherently tied to the Foolscap protocol itself. In particular they are beneficial to :doc:`http-storage-node-protocol` which uses HTTP instead of Foolscap. It is conceivable they will also be used with WebSockets at some point as well. Continuing to refer to these URLs as fURLs when they are being used for other protocols may cause confusion. Therefore, this document coins the name **NURL** for these URLs. This can be considered to expand to "**N**\ ew URLs" or "Authe\ **N**\ ticating URLs" or "Authorizi\ **N**\ g URLs" as the reader prefers. The anticipated use for a **NURL** will still be to establish a TLS connection to a peer. The protocol run over that TLS connection could be Foolscap though it is more likely to be an HTTP-based protocol (such as GBS). Unlike fURLs, only a single net-loc is included, for consistency with other forms of URLs. As a result, multiple NURLs may be available for a single server. Syntax ------ The EBNF for a NURL is as follows:: nurl = tcp-nurl | tor-nurl | i2p-nurl tcp-nurl = "pb://", hash, "@", tcp-loc, "/", swiss-number, [ version1 ] tor-nurl = "pb+tor://", hash, "@", tcp-loc, "/", swiss-number, [ version1 ] i2p-nurl = "pb+i2p://", hash, "@", i2p-loc, "/", swiss-number, [ version1 ] hash = unreserved tcp-loc = hostname, [ ":" port ] hostname = domain | IPv4address | IPv6address i2p-loc = i2p-addr, [ ":" port ] i2p-addr = { unreserved }, ".i2p" swiss-number = segment version1 = "#v=1" See https://tools.ietf.org/html/rfc3986#section-3.3 for the definition of ``segment``. See https://tools.ietf.org/html/rfc2396#appendix-A for the definition of ``unreserved``. See https://tools.ietf.org/html/draft-main-ipaddr-text-rep-02#section-3.1 for the definition of ``IPv4address``. See https://tools.ietf.org/html/draft-main-ipaddr-text-rep-02#section-3.2 for the definition of ``IPv6address``. See https://tools.ietf.org/html/rfc1035#section-2.3.1 for the definition of ``domain``. Versions -------- Though all NURLs are syntactically compatible some semantic differences are allowed. These differences are separated into distinct versions. Version 0 --------- In theory, a Foolscap fURL with a single netloc is considered the canonical definition of a version 0 NURL. Notably, the hash component is defined as the base32-encoded SHA1 hash of the DER form of an x509v3 certificate. A version 0 NURL is identified by the absence of the ``v=1`` fragment. In practice, real world fURLs may have more than one netloc, so lack of version fragment will likely just involve dispatching the fURL to a different parser. Examples ~~~~~~~~ * ``pb://sisi4zenj7cxncgvdog7szg3yxbrnamy@tcp:127.1:34399/xphmwz6lx24rh2nxlinni`` * ``pb://2uxmzoqqimpdwowxr24q6w5ekmxcymby@localhost:47877/riqhpojvzwxujhna5szkn`` Version 1 --------- The hash component of a version 1 NURL differs in three ways from the prior version. 1. The hash function used is SHA-256, to match RFC 7469. The security of SHA1 `continues to be eroded`_; Latacora `SHA-2`_. 2. The hash is computed over the certificate's SPKI instead of the whole certificate. This allows certificate re-generation so long as the public key remains the same. This is useful to allow contact information to be updated or extension of validity period. Use of an SPKI hash has also been `explored by the web community`_ during its flirtation with using it for HTTPS certificate pinning (though this is now largely abandoned). .. note:: *Only* the certificate's keypair is pinned by the SPKI hash. The freedom to change every other part of the certificate is coupled with the fact that all other parts of the certificate contain arbitrary information set by the private key holder. It is neither guaranteed nor expected that a certificate-issuing authority has validated this information. Therefore, *all* certificate fields should be considered within the context of the relationship identified by the SPKI hash. 3. The hash is encoded using urlsafe-base64 (without padding) instead of base32. This provides a more compact representation and minimizes the usability impacts of switching from a 160 bit hash to a 256 bit hash. A version 1 NURL is identified by the presence of the ``v=1`` fragment. Though the length of the hash string (38 bytes) could also be used to differentiate it from a version 0 NURL, there is no guarantee that this will be effective in differentiating it from future versions so this approach should not be used. It is possible for a client to unilaterally upgrade a version 0 NURL to a version 1 NURL. After establishing and authenticating a connection the client will have received a copy of the server's certificate. This is sufficient to compute the new hash and rewrite the NURL to upgrade it to version 1. This provides stronger authentication assurances for future uses but it is not required. Examples ~~~~~~~~ * ``pb://1WUX44xKjKdpGLohmFcBNuIRN-8rlv1Iij_7rQ@tcp:127.1:34399/jhjbc3bjbhk#v=1`` * ``pb://azEu8vlRpnEeYm0DySQDeNY3Z2iJXHC_bsbaAw@localhost:47877/64i4aokv4ej#v=1`` .. _`continues to be eroded`: https://en.wikipedia.org/wiki/SHA-1#Cryptanalysis_and_validation .. _`SHA-2`: https://latacora.micro.blog/2018/04/03/cryptographic-right-answers.html .. _`explored by the web community`: https://www.rfc-editor.org/rfc/rfc7469 .. _Foolscap: https://github.com/warner/foolscap .. [1] ``foolscap.furl.decode_furl`` is taken as the canonical definition of the syntax of a fURL. The **location hints** part of the fURL, as it is referred to in Foolscap, is matched by the regular expression fragment ``([^/]*)``. Since this matches the empty string, no network addresses are required to form a fURL. The supporting code around the regular expression also takes extra steps to allow an empty string to match here. Open Questions -------------- 1. Should we make a hard recommendation that all certificate fields are ignored? The system makes no guarantees about validation of these fields. Is it just an unnecessary risk to let a user see them? 2. Should the version specifier be a query-arg-alike or a fragment-alike? The value is only necessary on the client side which makes it similar to an HTTP URL fragment. The current Tahoe-LAFS configuration parsing code has special handling of the fragment character (``#``) which makes it unusable. However, the configuration parsing code is easily changed. tahoe_lafs-1.20.0/docs/specifications/backends/raic.rst0000644000000000000000000004117613615410400020005 0ustar00.. -*- coding: utf-8-with-signature -*- ============================================================= Redundant Array of Independent Clouds: Share To Cloud Mapping ============================================================= Introduction ============ This document describes a proposed design for the mapping of LAFS shares to objects in a cloud storage service. It also analyzes the costs for each of the functional requirements, including network, disk, storage and API usage costs. Terminology =========== *LAFS share* A Tahoe-LAFS share representing part of a file after encryption and erasure encoding. *LAFS shareset* The set of shares stored by a LAFS storage server for a given storage index. The shares within a shareset are numbered by a small integer. *Cloud storage service* A service such as Amazon S3 `²`_, Rackspace Cloud Files `³`_, Google Cloud Storage `⁴`_, or Windows Azure `⁵`_, that provides cloud storage. *Cloud storage interface* A protocol interface supported by a cloud storage service, such as the S3 interface `⁶`_, the OpenStack Object Storage interface `⁷`_, the Google Cloud Storage interface `⁸`_, or the Azure interface `⁹`_. There may be multiple services implementing a given cloud storage interface. In this design, only REST-based APIs `¹⁰`_ over HTTP will be used as interfaces. *Store object* A file-like abstraction provided by a cloud storage service, storing a sequence of bytes. Store objects are mutable in the sense that the contents and metadata of the store object with a given name in a given backend store can be replaced. Store objects are called “blobs” in the Azure interface, and “objects” in the other interfaces. *Cloud backend store* A container for store objects provided by a cloud service. Cloud backend stores are called “buckets” in the S3 and Google Cloud Storage interfaces, and “containers” in the Azure and OpenStack Storage interfaces. Functional Requirements ======================= * *Upload*: a LAFS share can be uploaded to an appropriately configured Tahoe-LAFS storage server and the data is stored to the cloud storage service. * *Scalable shares*: there is no hard limit on the size of LAFS share that can be uploaded. If the cloud storage interface offers scalable files, then this could be implemented by using that feature of the specific cloud storage interface. Alternately, it could be implemented by mapping from the LAFS abstraction of an unlimited-size immutable share to a set of size-limited store objects. * *Streaming upload*: the size of the LAFS share that is uploaded can exceed the amount of RAM and even the amount of direct attached storage on the storage server. I.e., the storage server is required to stream the data directly to the ultimate cloud storage service while processing it, instead of to buffer the data until the client is finished uploading and then transfer the data to the cloud storage service. * *Download*: a LAFS share can be downloaded from an appropriately configured Tahoe-LAFS storage server, and the data is loaded from the cloud storage service. * *Streaming download*: the size of the LAFS share that is downloaded can exceed the amount of RAM and even the amount of direct attached storage on the storage server. I.e. the storage server is required to stream the data directly to the client while processing it, instead of to buffer the data until the cloud storage service is finished serving and then transfer the data to the client. * *Modify*: a LAFS share can have part of its contents modified. If the cloud storage interface offers scalable mutable files, then this could be implemented by using that feature of the specific cloud storage interface. Alternately, it could be implemented by mapping from the LAFS abstraction of an unlimited-size mutable share to a set of size-limited store objects. * *Efficient modify*: the size of the LAFS share being modified can exceed the amount of RAM and even the amount of direct attached storage on the storage server. I.e. the storage server is required to download, patch, and upload only the segment(s) of the share that are being modified, instead of to download, patch, and upload the entire share. * *Tracking leases*: The Tahoe-LAFS storage server is required to track when each share has its lease renewed so that unused shares (shares whose lease has not been renewed within a time limit, e.g. 30 days) can be garbage collected. This does not necessarily require code specific to each cloud storage interface, because the lease tracking can be performed in the storage server's generic component rather than in the component supporting each interface. Mapping ======= This section describes the mapping between LAFS shares and store objects. A LAFS share will be split into one or more “chunks” that are each stored in a store object. A LAFS share of size `C` bytes will be stored as `ceiling(C / chunksize)` chunks. The last chunk has a size between 1 and `chunksize` bytes inclusive. (It is not possible for `C` to be zero, because valid shares always have a header, so, there is at least one chunk for each share.) For an existing share, the chunk size is determined by the size of the first chunk. For a new share, it is a parameter that may depend on the storage interface. It is an error for any chunk to be larger than the first chunk, or for any chunk other than the last to be smaller than the first chunk. If a mutable share with total size less than the default chunk size for the storage interface is being modified, the new contents are split using the default chunk size. *Rationale*: this design allows the `chunksize` parameter to be changed for new shares written via a particular storage interface, without breaking compatibility with existing stored shares. All cloud storage interfaces return the sizes of store objects with requests to list objects, and so the size of the first chunk can be determined without an additional request. The name of the store object for chunk `i` > 0 of a LAFS share with storage index `STORAGEINDEX` and share number `SHNUM`, will be shares/`ST`/`STORAGEINDEX`/`SHNUM.i` where `ST` is the first two characters of `STORAGEINDEX`. When `i` is 0, the `.0` is omitted. *Rationale*: this layout maintains compatibility with data stored by the prototype S3 backend, for which Least Authority Enterprises has existing customers. This prototype always used a single store object to store each share, with name shares/`ST`/`STORAGEINDEX`/`SHNUM` By using the same prefix “shares/`ST`/`STORAGEINDEX`/” for old and new layouts, the storage server can obtain a list of store objects associated with a given shareset without having to know the layout in advance, and without having to make multiple API requests. This also simplifies sharing of test code between the disk and cloud backends. Mutable and immutable shares will be “chunked” in the same way. Rationale for Chunking ---------------------- Limiting the amount of data received or sent in a single request has the following advantages: * It is unnecessary to write separate code to take advantage of the “large object” features of each cloud storage interface, which differ significantly in their design. * Data needed for each PUT request can be discarded after it completes. If a PUT request fails, it can be retried while only holding the data for that request in memory. Costs ===== In this section we analyze the costs of the proposed design in terms of network, disk, memory, cloud storage, and API usage. Network usage—bandwidth and number-of-round-trips ------------------------------------------------- When a Tahoe-LAFS storage client allocates a new share on a storage server, the backend will request a list of the existing store objects with the appropriate prefix. This takes one HTTP request in the common case, but may take more for the S3 interface, which has a limit of 1000 objects returned in a single “GET Bucket” request. If the share is to be read, the client will make a number of calls each specifying the offset and length of the required span of bytes. On the first request that overlaps a given chunk of the share, the server will make an HTTP GET request for that store object. The server may also speculatively make GET requests for store objects that are likely to be needed soon (which can be predicted since reads are normally sequential), in order to reduce latency. Each read will be satisfied as soon as the corresponding data is available, without waiting for the rest of the chunk, in order to minimize read latency. All four cloud storage interfaces support GET requests using the Range HTTP header. This could be used to optimize reads where the Tahoe-LAFS storage client requires only part of a share. If the share is to be written, the server will make an HTTP PUT request for each chunk that has been completed. Tahoe-LAFS clients only write immutable shares sequentially, and so we can rely on that property to simplify the implementation. When modifying shares of an existing mutable file, the storage server will be able to make PUT requests only for chunks that have changed. (Current Tahoe-LAFS v1.9 clients will not take advantage of this ability, but future versions will probably do so for MDMF files.) In some cases, it may be necessary to retry a request (see the `Structure of Implementation`_ section below). In the case of a PUT request, at the point at which a retry is needed, the new chunk contents to be stored will still be in memory and so this is not problematic. In the absence of retries, the maximum number of GET requests that will be made when downloading a file, or the maximum number of PUT requests when uploading or modifying a file, will be equal to the number of chunks in the file. If the new mutable share content has fewer chunks than the old content, then the remaining store objects for old chunks must be deleted (using one HTTP request each). When reading a share, the backend must tolerate the case where these store objects have not been deleted successfully. The last write to a share will be reported as successful only when all corresponding HTTP PUTs and DELETEs have completed successfully. Disk usage (local to the storage server) ---------------------------------------- It is never necessary for the storage server to write the content of share chunks to local disk, either when they are read or when they are written. Each chunk is held only in memory. A proposed change to the Tahoe-LAFS storage server implementation uses a sqlite database to store metadata about shares. In that case the same database would be used for the cloud backend. This would enable lease tracking to be implemented in the same way for disk and cloud backends. Memory usage ------------ The use of chunking simplifies bounding the memory usage of the storage server when handling files that may be larger than memory. However, this depends on limiting the number of chunks that are simultaneously held in memory. Multiple chunks can be held in memory either because of pipelining of requests for a single share, or because multiple shares are being read or written (possibly by multiple clients). For immutable shares, the Tahoe-LAFS storage protocol requires the client to specify in advance the maximum amount of data it will write. Also, a cooperative client (including all existing released versions of the Tahoe-LAFS code) will limit the amount of data that is pipelined, currently to 50 KiB. Since the chunk size will be greater than that, it is possible to ensure that for each allocation, the maximum chunk data memory usage is the lesser of two chunks, and the allocation size. (There is some additional overhead but it is small compared to the chunk data.) If the maximum memory usage of a new allocation would exceed the memory available, the allocation can be delayed or possibly denied, so that the total memory usage is bounded. It is not clear that the existing protocol allows allocations for mutable shares to be bounded in general; this may be addressed in a future protocol change. The above discussion assumes that clients do not maliciously send large messages as a denial-of-service attack. Foolscap (the protocol layer underlying the Tahoe-LAFS storage protocol) does not attempt to resist denial of service. Storage ------- The storage requirements, including not-yet-collected garbage shares, are the same as for the Tahoe-LAFS disk backend. That is, the total size of cloud objects stored is equal to the total size of shares that the disk backend would store. Erasure coding causes the size of shares for each file to be a factor `shares.total` / `shares.needed` times the file size, plus overhead that is logarithmic in the file size `¹¹`_. API usage --------- Cloud storage backends typically charge a small fee per API request. The number of requests to the cloud storage service for various operations is discussed under “network usage” above. Structure of Implementation =========================== A generic “cloud backend”, based on the prototype S3 backend but with support for chunking as described above, will be written. An instance of the cloud backend can be attached to one of several “cloud interface adapters”, one for each cloud storage interface. These adapters will operate only on chunks, and need not distinguish between mutable and immutable shares. They will be a relatively “thin” abstraction layer over the HTTP APIs of each cloud storage interface, similar to the S3Bucket abstraction in the prototype. For some cloud storage services it may be necessary to transparently retry requests in order to recover from transient failures. (Although the erasure coding may enable a file to be retrieved even when shares are not stored by or not readable from all cloud storage services used in a Tahoe-LAFS grid, it may be desirable to retry cloud storage service requests in order to improve overall reliability.) Support for this will be implemented in the generic cloud backend, and used whenever a cloud storage adaptor reports a transient failure. Our experience with the prototype suggests that it is necessary to retry on transient failures for Amazon's S3 service. There will also be a “mock” cloud interface adaptor, based on the prototype's MockS3Bucket. This allows tests of the generic cloud backend to be run without a connection to a real cloud service. The mock adaptor will be able to simulate transient and non-transient failures. Known Issues ============ This design worsens a known “write hole” issue in Tahoe-LAFS when updating the contents of mutable files. An update to a mutable file can require changing the contents of multiple chunks, and if the client fails or is disconnected during the operation the resulting state of the store objects for that share may be inconsistent—no longer containing all of the old version, but not yet containing all of the new version. A mutable share can be left in an inconsistent state even by the existing Tahoe-LAFS disk backend if it fails during a write, but that has a smaller chance of occurrence because the current client behavior leads to mutable shares being written to disk in a single system call. The best fix for this issue probably requires changing the Tahoe-LAFS storage protocol, perhaps by extending it to use a two-phase or three-phase commit (ticket #1755). References =========== ¹ omitted .. _²: ² “Amazon S3” Amazon (2012) https://aws.amazon.com/s3/ .. _³: ³ “Rackspace Cloud Files” Rackspace (2012) https://www.rackspace.com/cloud/cloud_hosting_products/files/ .. _⁴: ⁴ “Google Cloud Storage” Google (2012) https://developers.google.com/storage/ .. _⁵: ⁵ “Windows Azure Storage” Microsoft (2012) https://www.windowsazure.com/en-us/develop/net/fundamentals/cloud-storage/ .. _⁶: ⁶ “Amazon Simple Storage Service (Amazon S3) API Reference: REST API” Amazon (2012) http://docs.amazonwebservices.com/AmazonS3/latest/API/APIRest.html .. _⁷: ⁷ “OpenStack Object Storage” openstack.org (2012) http://openstack.org/projects/storage/ .. _⁸: ⁸ “Google Cloud Storage Reference Guide” Google (2012) https://developers.google.com/storage/docs/reference-guide .. _⁹: ⁹ “Windows Azure Storage Services REST API Reference” Microsoft (2012) http://msdn.microsoft.com/en-us/library/windowsazure/dd179355.aspx .. _¹⁰: ¹⁰ “Representational state transfer” English Wikipedia (2012) https://en.wikipedia.org/wiki/Representational_state_transfer .. _¹¹: ¹¹ “Performance costs for some common operations” tahoe-lafs.org (2012) :doc:`../../performance` tahoe_lafs-1.20.0/integration/README0000644000000000000000000000051213615410400014020 0ustar00Install: pip install -e .[test] run: py.test -s -v integration/ If you want to keep the created temp-dir around: py.test --keep-tempdir -v integration/ The fixtures also set up a "flogtool gather" process and dump all the logs from all the running processes (introducer, 5 storage nodes, alice, bob) to a tempfile. tahoe_lafs-1.20.0/integration/__init__.py0000644000000000000000000000000113615410400015242 0ustar00 tahoe_lafs-1.20.0/integration/conftest.py0000644000000000000000000004011213615410400015337 0ustar00""" Ported to Python 3. """ from __future__ import annotations import os import sys import shutil from attr import frozen from time import sleep from os import mkdir, environ from os.path import join, exists from tempfile import mkdtemp from eliot import ( to_file, log_call, ) from twisted.python.filepath import FilePath from twisted.python.procutils import which from twisted.internet.defer import DeferredList, succeed from twisted.internet.error import ( ProcessExitedAlready, ProcessTerminated, ) import pytest import pytest_twisted from typing import Mapping from .util import ( _MagicTextProtocol, _DumpOutputProtocol, _ProcessExitedProtocol, _create_node, _tahoe_runner_optional_coverage, await_client_ready, block_with_timeout, ) from .grid import ( create_flog_gatherer, create_grid, ) from allmydata.node import read_config from allmydata.util.iputil import allocate_tcp_port # No reason for HTTP requests to take longer than four minutes in the # integration tests. See allmydata/scripts/common_http.py for usage. os.environ["__TAHOE_CLI_HTTP_TIMEOUT"] = "240" # Make Foolscap logging go into Twisted logging, so that integration test logs # include extra information # (https://github.com/warner/foolscap/blob/latest-release/doc/logging.rst): os.environ["FLOGTOTWISTED"] = "1" # pytest customization hooks def pytest_addoption(parser): parser.addoption( "--keep-tempdir", action="store_true", dest="keep", help="Keep the tmpdir with the client directories (introducer, etc)", ) parser.addoption( "--coverage", action="store_true", dest="coverage", help="Collect coverage statistics", ) parser.addoption( "--force-foolscap", action="store_true", default=False, dest="force_foolscap", help=("If set, force Foolscap only for the storage protocol. " + "Otherwise HTTP will be used.") ) parser.addoption( "--runslow", action="store_true", default=False, dest="runslow", help="If set, run tests marked as slow.", ) def pytest_collection_modifyitems(session, config, items): if not config.option.runslow: # The --runslow option was not given; keep only collected items not # marked as slow. items[:] = [ item for item in items if item.get_closest_marker("slow") is None ] @pytest.fixture(autouse=True, scope='session') def eliot_logging(): with open("integration.eliot.json", "w") as f: to_file(f) yield # I've mostly defined these fixtures from "easiest" to "most # complicated", and the dependencies basically go "down the # page". They're all session-scoped which has the "pro" that we only # set up the grid once, but the "con" that each test has to be a # little careful they're not stepping on toes etc :/ @pytest.fixture(scope='session') @log_call(action_type=u"integration:reactor", include_result=False) def reactor(): # this is a fixture in case we might want to try different # reactors for some reason. from twisted.internet import reactor as _reactor return _reactor @pytest.fixture(scope='session') @log_call(action_type=u"integration:port_allocator", include_result=False) def port_allocator(reactor): # these will appear basically random, which can make especially # manual debugging harder but we're re-using code instead of # writing our own...so, win? def allocate(): port = allocate_tcp_port() return succeed(port) return allocate @pytest.fixture(scope='session') @log_call(action_type=u"integration:temp_dir", include_args=[]) def temp_dir(request) -> str: """ Invoke like 'py.test --keep-tempdir ...' to avoid deleting the temp-dir """ tmp = mkdtemp(prefix="tahoe") if request.config.getoption('keep'): print("\nWill retain tempdir '{}'".format(tmp)) # I'm leaving this in and always calling it so that the tempdir # path is (also) printed out near the end of the run def cleanup(): if request.config.getoption('keep'): print("Keeping tempdir '{}'".format(tmp)) else: try: shutil.rmtree(tmp, ignore_errors=True) except Exception as e: print("Failed to remove tmpdir: {}".format(e)) request.addfinalizer(cleanup) return tmp @pytest.fixture(scope='session') @log_call(action_type=u"integration:flog_binary", include_args=[]) def flog_binary(): return which('flogtool')[0] @pytest.fixture(scope='session') @log_call(action_type=u"integration:flog_gatherer", include_args=[]) def flog_gatherer(reactor, temp_dir, flog_binary, request): fg = pytest_twisted.blockon( create_flog_gatherer(reactor, request, temp_dir, flog_binary) ) return fg @pytest.fixture(scope='session') @log_call(action_type=u"integration:grid", include_args=[]) def grid(reactor, request, temp_dir, flog_gatherer, port_allocator): """ Provides a new Grid with a single Introducer and flog-gathering process. Notably does _not_ provide storage servers; use the storage_nodes fixture if your tests need a Grid that can be used for puts / gets. """ g = pytest_twisted.blockon( create_grid(reactor, request, temp_dir, flog_gatherer, port_allocator) ) return g @pytest.fixture(scope='session') def introducer(grid): return grid.introducer @pytest.fixture(scope='session') @log_call(action_type=u"integration:introducer:furl", include_args=["temp_dir"]) def introducer_furl(introducer, temp_dir): return introducer.furl @pytest.fixture @log_call( action_type=u"integration:tor:introducer", include_args=["temp_dir", "flog_gatherer"], include_result=False, ) def tor_introducer(reactor, temp_dir, flog_gatherer, request, tor_network): intro_dir = join(temp_dir, 'introducer_tor') print("making Tor introducer in {}".format(intro_dir)) print("(this can take tens of seconds to allocate Onion address)") if not exists(intro_dir): mkdir(intro_dir) done_proto = _ProcessExitedProtocol() _tahoe_runner_optional_coverage( done_proto, reactor, request, ( 'create-introducer', '--tor-control-port', tor_network.client_control_endpoint, '--hide-ip', '--listen=tor', intro_dir, ), ) pytest_twisted.blockon(done_proto.done) # adjust a few settings config = read_config(intro_dir, "tub.port") config.set_config("node", "nickname", "introducer-tor") config.set_config("node", "web.port", "4561") config.set_config("node", "log_gatherer.furl", flog_gatherer.furl) # "tahoe run" is consistent across Linux/macOS/Windows, unlike the old # "start" command. protocol = _MagicTextProtocol('introducer running', "tor_introducer") transport = _tahoe_runner_optional_coverage( protocol, reactor, request, ( 'run', intro_dir, ), ) def cleanup(): try: transport.signalProcess('TERM') block_with_timeout(protocol.exited, reactor) except ProcessExitedAlready: pass request.addfinalizer(cleanup) print("Waiting for introducer to be ready...") pytest_twisted.blockon(protocol.magic_seen) print("Introducer ready.") return transport @pytest.fixture def tor_introducer_furl(tor_introducer, temp_dir): furl_fname = join(temp_dir, 'introducer_tor', 'private', 'introducer.furl') while not exists(furl_fname): print("Don't see {} yet".format(furl_fname)) sleep(.1) furl = open(furl_fname, 'r').read() print(f"Found Tor introducer furl: {furl} in {furl_fname}") return furl @pytest.fixture(scope='session') @log_call( action_type=u"integration:storage_nodes", include_args=["grid"], include_result=False, ) def storage_nodes(grid): nodes_d = [] # start all 5 nodes in parallel for x in range(5): nodes_d.append(grid.add_storage_node()) nodes_status = pytest_twisted.blockon(DeferredList(nodes_d)) for ok, value in nodes_status: assert ok, "Storage node creation failed: {}".format(value) return grid.storage_servers @pytest.fixture(scope='session') @log_call(action_type=u"integration:alice", include_args=[], include_result=False) def alice(reactor, request, grid, storage_nodes): """ :returns grid.Client: the associated instance for Alice """ alice = pytest_twisted.blockon(grid.add_client("alice")) pytest_twisted.blockon(alice.add_sftp(reactor, request)) print(f"Alice pid: {alice.process.transport.pid}") return alice @pytest.fixture(scope='session') @log_call(action_type=u"integration:bob", include_args=[], include_result=False) def bob(reactor, temp_dir, introducer_furl, flog_gatherer, storage_nodes, request): process = pytest_twisted.blockon( _create_node( reactor, request, temp_dir, introducer_furl, flog_gatherer, "bob", web_port="tcp:9981:interface=localhost", storage=False, ) ) pytest_twisted.blockon(await_client_ready(process)) return process @pytest.fixture(scope='session') @pytest.mark.skipif(sys.platform.startswith('win'), 'Tor tests are unstable on Windows') def chutney(reactor, temp_dir: str) -> tuple[str, dict[str, str]]: """ Install the Chutney software that is required to run a small local Tor grid. (Chutney lacks the normal "python stuff" so we can't just declare it in Tox or similar dependencies) """ # Try to find Chutney already installed in the environment. try: import chutney except ImportError: # Nope, we'll get our own in a moment. pass else: # We already have one, just use it. return ( # from `checkout/lib/chutney/__init__.py` we want to get back to # `checkout` because that's the parent of the directory with all # of the network definitions. So, great-grand-parent. FilePath(chutney.__file__).parent().parent().parent().path, # There's nothing to add to the environment. {}, ) chutney_dir = join(temp_dir, 'chutney') mkdir(chutney_dir) missing = [exe for exe in ["tor", "tor-gencert"] if not which(exe)] if missing: pytest.skip(f"Some command-line tools not found: {missing}") # XXX yuck! should add a setup.py to chutney so we can at least # "pip install " and/or depend on chutney in "pip # install -e .[dev]" (i.e. in the 'dev' extra) # # https://trac.torproject.org/projects/tor/ticket/20343 proto = _DumpOutputProtocol(None) reactor.spawnProcess( proto, 'git', ( 'git', 'clone', 'https://gitlab.torproject.org/tpo/core/chutney.git', chutney_dir, ), env=environ, ) pytest_twisted.blockon(proto.done) # XXX: Here we reset Chutney to a specific revision known to work, # since there are no stability guarantees or releases yet. proto = _DumpOutputProtocol(None) reactor.spawnProcess( proto, 'git', ( 'git', '-C', chutney_dir, 'reset', '--hard', 'c4f6789ad2558dcbfeb7d024c6481d8112bfb6c2' ), env=environ, ) pytest_twisted.blockon(proto.done) return chutney_dir, {"PYTHONPATH": join(chutney_dir, "lib")} @frozen class ChutneyTorNetwork: """ Represents a running Chutney (tor) network. Returned by the "tor_network" fixture. """ dir: FilePath environ: Mapping[str, str] client_control_port: int @property def client_control_endpoint(self) -> str: return "tcp:localhost:{}".format(self.client_control_port) @pytest.fixture(scope='session') @pytest.mark.skipif(sys.platform.startswith('win'), reason='Tor tests are unstable on Windows') def tor_network(reactor, temp_dir, chutney, request): """ Build a basic Tor network. Instantiate the "networks/basic" Chutney configuration for a local Tor network. This provides a small, local Tor network that can run v3 Onion Services. It has 3 authorities, 5 relays and 2 clients. The 'chutney' fixture pins a Chutney git qrevision, so things shouldn't change. This network has two clients which are the only nodes with valid SocksPort configuration ("008c" and "009c" 9008 and 9009) The control ports start at 8000 (so the ControlPort for the client nodes are 8008 and 8009). :param chutney: The root directory of a Chutney checkout and a dict of additional environment variables to set so a Python process can use it. :return: None """ chutney_root, chutney_env = chutney basic_network = join(chutney_root, 'networks', 'basic') env = environ.copy() env.update(chutney_env) env.update({ # default is 60, probably too short for reliable automated use. "CHUTNEY_START_TIME": "1200", }) chutney_argv = (sys.executable, '-m', 'chutney.TorNet') def chutney(argv): proto = _DumpOutputProtocol(None) reactor.spawnProcess( proto, sys.executable, chutney_argv + argv, path=join(chutney_root), env=env, ) return proto.done # now, as per Chutney's README, we have to create the network pytest_twisted.blockon(chutney(("configure", basic_network))) # before we start the network, ensure we will tear down at the end def cleanup(): print("Tearing down Chutney Tor network") try: block_with_timeout(chutney(("stop", basic_network)), reactor) except ProcessTerminated: # If this doesn't exit cleanly, that's fine, that shouldn't fail # the test suite. pass request.addfinalizer(cleanup) pytest_twisted.blockon(chutney(("start", basic_network))) # Wait for the nodes to "bootstrap" - ie, form a network among themselves. # Successful bootstrap is reported with a message something like: # # Everything bootstrapped after 151 sec # Bootstrap finished: 151 seconds # Node status: # test000a : 100, done , Done # test001a : 100, done , Done # test002a : 100, done , Done # test003r : 100, done , Done # test004r : 100, done , Done # test005r : 100, done , Done # test006r : 100, done , Done # test007r : 100, done , Done # test008c : 100, done , Done # test009c : 100, done , Done # Published dir info: # test000a : 100, all nodes , desc md md_cons ns_cons , Dir info cached # test001a : 100, all nodes , desc md md_cons ns_cons , Dir info cached # test002a : 100, all nodes , desc md md_cons ns_cons , Dir info cached # test003r : 100, all nodes , desc md md_cons ns_cons , Dir info cached # test004r : 100, all nodes , desc md md_cons ns_cons , Dir info cached # test005r : 100, all nodes , desc md md_cons ns_cons , Dir info cached # test006r : 100, all nodes , desc md md_cons ns_cons , Dir info cached # test007r : 100, all nodes , desc md md_cons ns_cons , Dir info cached pytest_twisted.blockon(chutney(("wait_for_bootstrap", basic_network))) # print some useful stuff try: pytest_twisted.blockon(chutney(("status", basic_network))) except ProcessTerminated: print("Chutney.TorNet status failed (continuing)") # the "8008" comes from configuring "networks/basic" in chutney # and then examining "net/nodes/008c/torrc" for ControlPort value return ChutneyTorNetwork( chutney_root, chutney_env, 8008, ) tahoe_lafs-1.20.0/integration/grid.py0000644000000000000000000003737313615410400014456 0ustar00""" Classes which directly represent various kinds of Tahoe processes that co-operate to for "a Grid". These methods and objects are used by conftest.py fixtures but may also be used as direct helpers for tests that don't want to (or can't) rely on 'the' global grid as provided by fixtures like 'alice' or 'storage_servers'. """ from os import mkdir, listdir from os.path import join, exists from json import loads from tempfile import mktemp from time import sleep from eliot import ( log_call, ) from foolscap.furl import ( decode_furl, ) from twisted.python.procutils import which from twisted.internet.defer import ( inlineCallbacks, returnValue, Deferred, ) from twisted.internet.task import ( deferLater, ) from twisted.internet.interfaces import ( IProcessTransport, IProcessProtocol, ) from twisted.internet.error import ProcessTerminated from allmydata.util.attrs_provides import ( provides, ) from allmydata.node import read_config from .util import ( _CollectOutputProtocol, _MagicTextProtocol, _DumpOutputProtocol, _ProcessExitedProtocol, _run_node, _cleanup_tahoe_process, _tahoe_runner_optional_coverage, TahoeProcess, await_client_ready, generate_ssh_key, cli, reconfigure, _create_node, ) import attr import pytest_twisted # currently, we pass a "request" around a bunch but it seems to only # be for addfinalizer() calls. # - is "keeping" a request like that okay? What if it's a session-scoped one? # (i.e. in Grid etc) # - maybe limit to "a callback to hang your cleanup off of" (instead of request)? @attr.s class FlogGatherer(object): """ Flog Gatherer process. """ process = attr.ib( validator=provides(IProcessTransport) ) protocol = attr.ib( validator=provides(IProcessProtocol) ) furl = attr.ib() @inlineCallbacks def create_flog_gatherer(reactor, request, temp_dir, flog_binary): out_protocol = _CollectOutputProtocol() gather_dir = join(temp_dir, 'flog_gather') reactor.spawnProcess( out_protocol, flog_binary, ( 'flogtool', 'create-gatherer', '--location', 'tcp:localhost:3117', '--port', '3117', gather_dir, ) ) yield out_protocol.done twistd_protocol = _MagicTextProtocol("Gatherer waiting at", "gatherer") twistd_process = reactor.spawnProcess( twistd_protocol, which('twistd')[0], ( 'twistd', '--nodaemon', '--python', join(gather_dir, 'gatherer.tac'), ), path=gather_dir, ) yield twistd_protocol.magic_seen def cleanup(): _cleanup_tahoe_process(twistd_process, twistd_protocol.exited) flog_file = mktemp('.flog_dump') flog_protocol = _DumpOutputProtocol(open(flog_file, 'w')) flog_dir = join(temp_dir, 'flog_gather') flogs = [x for x in listdir(flog_dir) if x.endswith('.flog')] print("Dumping {} flogtool logfiles to '{}'".format(len(flogs), flog_file)) for flog_path in flogs: reactor.spawnProcess( flog_protocol, flog_binary, ( 'flogtool', 'dump', join(temp_dir, 'flog_gather', flog_path) ), ) print("Waiting for flogtool to complete") try: pytest_twisted.blockon(flog_protocol.done) except ProcessTerminated as e: print("flogtool exited unexpectedly: {}".format(str(e))) print("Flogtool completed") request.addfinalizer(cleanup) with open(join(gather_dir, 'log_gatherer.furl'), 'r') as f: furl = f.read().strip() returnValue( FlogGatherer( protocol=twistd_protocol, process=twistd_process, furl=furl, ) ) @attr.s class StorageServer(object): """ Represents a Tahoe Storage Server """ process = attr.ib( validator=attr.validators.instance_of(TahoeProcess) ) protocol = attr.ib( validator=provides(IProcessProtocol) ) @inlineCallbacks def restart(self, reactor, request): """ re-start our underlying process by issuing a TERM, waiting and then running again. await_client_ready() will be done as well Note that self.process and self.protocol will be new instances after this. """ self.process.transport.signalProcess('TERM') yield self.protocol.exited self.process = yield _run_node( reactor, self.process.node_dir, request, None, ) self.protocol = self.process.transport.proto yield await_client_ready(self.process) @inlineCallbacks def create_storage_server(reactor, request, temp_dir, introducer, flog_gatherer, name, web_port, needed=2, happy=3, total=4): """ Create a new storage server """ node_process = yield _create_node( reactor, request, temp_dir, introducer.furl, flog_gatherer, name, web_port, storage=True, needed=needed, happy=happy, total=total, ) storage = StorageServer( process=node_process, # node_process is a TahoeProcess. its transport is an # IProcessTransport. in practice, this means it is a # twisted.internet._baseprocess.BaseProcess. BaseProcess records the # process protocol as its proto attribute. protocol=node_process.transport.proto, ) returnValue(storage) @attr.s class Client(object): """ Represents a Tahoe client """ process = attr.ib( validator=attr.validators.instance_of(TahoeProcess) ) protocol = attr.ib( validator=provides(IProcessProtocol) ) request = attr.ib() # original request, for addfinalizer() ## XXX convenience? or confusion? # @property # def node_dir(self): # return self.process.node_dir @inlineCallbacks def reconfigure_zfec(self, reactor, zfec_params, convergence=None, max_segment_size=None): """ Reconfigure the ZFEC parameters for this node """ # XXX this is a stop-gap to keep tests running "as is" # -> we should fix the tests so that they create a new client # in the grid with the required parameters, instead of # re-configuring Alice (or whomever) rtn = yield Deferred.fromCoroutine( reconfigure(reactor, self.request, self.process, zfec_params, convergence, max_segment_size) ) return rtn @inlineCallbacks def restart(self, reactor, request, servers=1): """ re-start our underlying process by issuing a TERM, waiting and then running again. :param int servers: number of server connections we will wait for before being 'ready' Note that self.process and self.protocol will be new instances after this. """ # XXX similar to above, can we make this return a new instance # instead of mutating? self.process.transport.signalProcess('TERM') yield self.protocol.exited process = yield _run_node( reactor, self.process.node_dir, request, None, ) self.process = process self.protocol = self.process.transport.proto yield await_client_ready(self.process, minimum_number_of_servers=servers) @inlineCallbacks def add_sftp(self, reactor, request): """ """ # if other things need to add or change configuration, further # refactoring could be useful here (i.e. move reconfigure # parts to their own functions) # XXX why do we need an alias? # 1. Create a new RW directory cap: cli(self.process, "create-alias", "test") rwcap = loads(cli(self.process, "list-aliases", "--json"))["test"]["readwrite"] # 2. Enable SFTP on the node: host_ssh_key_path = join(self.process.node_dir, "private", "ssh_host_rsa_key") sftp_client_key_path = join(self.process.node_dir, "private", "ssh_client_rsa_key") accounts_path = join(self.process.node_dir, "private", "accounts") with open(join(self.process.node_dir, "tahoe.cfg"), "a") as f: f.write( ("\n\n[sftpd]\n" "enabled = true\n" "port = tcp:8022:interface=127.0.0.1\n" "host_pubkey_file = {ssh_key_path}.pub\n" "host_privkey_file = {ssh_key_path}\n" "accounts.file = {accounts_path}\n").format( ssh_key_path=host_ssh_key_path, accounts_path=accounts_path, ) ) generate_ssh_key(host_ssh_key_path) # 3. Add a SFTP access file with an SSH key for auth. generate_ssh_key(sftp_client_key_path) # Pub key format is "ssh-rsa ". We want the key. with open(sftp_client_key_path + ".pub") as pubkey_file: ssh_public_key = pubkey_file.read().strip().split()[1] with open(accounts_path, "w") as f: f.write( "alice-key ssh-rsa {ssh_public_key} {rwcap}\n".format( rwcap=rwcap, ssh_public_key=ssh_public_key, ) ) # 4. Restart the node with new SFTP config. print("restarting for SFTP") yield self.restart(reactor, request) print("restart done") # XXX i think this is broken because we're "waiting for ready" during first bootstrap? or something? @inlineCallbacks def create_client(reactor, request, temp_dir, introducer, flog_gatherer, name, web_port, needed=2, happy=3, total=4): """ Create a new storage server """ from .util import _create_node node_process = yield _create_node( reactor, request, temp_dir, introducer.furl, flog_gatherer, name, web_port, storage=False, needed=needed, happy=happy, total=total, ) returnValue( Client( process=node_process, protocol=node_process.transport.proto, request=request, ) ) @attr.s class Introducer(object): """ Reprsents a running introducer """ process = attr.ib( validator=attr.validators.instance_of(TahoeProcess) ) protocol = attr.ib( validator=provides(IProcessProtocol) ) furl = attr.ib() def _validate_furl(furl_fname): """ Opens and validates a fURL, ensuring location hints. :returns: the furl :raises: ValueError if no location hints """ while not exists(furl_fname): print("Don't see {} yet".format(furl_fname)) sleep(.1) furl = open(furl_fname, 'r').read() tubID, location_hints, name = decode_furl(furl) if not location_hints: # If there are no location hints then nothing can ever possibly # connect to it and the only thing that can happen next is something # will hang or time out. So just give up right now. raise ValueError( "Introducer ({!r}) fURL has no location hints!".format( furl, ), ) return furl @inlineCallbacks @log_call( action_type=u"integration:introducer", include_args=["temp_dir", "flog_gatherer"], include_result=False, ) def create_introducer(reactor, request, temp_dir, flog_gatherer, port): """ Run a new Introducer and return an Introducer instance. """ intro_dir = join(temp_dir, 'introducer{}'.format(port)) if not exists(intro_dir): mkdir(intro_dir) done_proto = _ProcessExitedProtocol() _tahoe_runner_optional_coverage( done_proto, reactor, request, ( 'create-introducer', '--listen=tcp', '--hostname=localhost', intro_dir, ), ) yield done_proto.done config = read_config(intro_dir, "tub.port") config.set_config("node", "nickname", f"introducer-{port}") config.set_config("node", "web.port", f"{port}") config.set_config("node", "log_gatherer.furl", flog_gatherer.furl) # on windows, "tahoe start" means: run forever in the foreground, # but on linux it means daemonize. "tahoe run" is consistent # between platforms. protocol = _MagicTextProtocol('introducer running', "introducer") transport = _tahoe_runner_optional_coverage( protocol, reactor, request, ( 'run', intro_dir, ), ) def clean(): return _cleanup_tahoe_process(transport, protocol.exited) request.addfinalizer(clean) yield protocol.magic_seen furl_fname = join(intro_dir, 'private', 'introducer.furl') while not exists(furl_fname): print("Don't see {} yet".format(furl_fname)) yield deferLater(reactor, .1, lambda: None) furl = _validate_furl(furl_fname) returnValue( Introducer( process=TahoeProcess(transport, intro_dir), protocol=protocol, furl=furl, ) ) @attr.s class Grid(object): """ Represents an entire Tahoe Grid setup A Grid includes an Introducer, Flog Gatherer and some number of Storage Servers. Optionally includes Clients. """ _reactor = attr.ib() _request = attr.ib() _temp_dir = attr.ib() _port_allocator = attr.ib() introducer = attr.ib() flog_gatherer = attr.ib() storage_servers = attr.ib(factory=list) clients = attr.ib(factory=dict) @storage_servers.validator def check(self, attribute, value): for server in value: if not isinstance(server, StorageServer): raise ValueError( "storage_servers must be StorageServer" ) @inlineCallbacks def add_storage_node(self): """ Creates a new storage node, returns a StorageServer instance (which will already be added to our .storage_servers list) """ port = yield self._port_allocator() print("make {}".format(port)) name = 'node{}'.format(port) web_port = 'tcp:{}:interface=localhost'.format(port) server = yield create_storage_server( self._reactor, self._request, self._temp_dir, self.introducer, self.flog_gatherer, name, web_port, ) self.storage_servers.append(server) returnValue(server) @inlineCallbacks def add_client(self, name, needed=2, happy=3, total=4): """ Create a new client node """ port = yield self._port_allocator() web_port = 'tcp:{}:interface=localhost'.format(port) client = yield create_client( self._reactor, self._request, self._temp_dir, self.introducer, self.flog_gatherer, name, web_port, needed=needed, happy=happy, total=total, ) self.clients[name] = client yield await_client_ready(client.process) returnValue(client) # A grid is now forever tied to its original 'request' which is where # it must hang finalizers off of. The "main" one is a session-level # fixture so it'll live the life of the tests but it could be # per-function Grid too. @inlineCallbacks def create_grid(reactor, request, temp_dir, flog_gatherer, port_allocator): """ Create a new grid. This will have one Introducer but zero storage-servers or clients; those must be added by a test or subsequent fixtures. """ intro_port = yield port_allocator() introducer = yield create_introducer(reactor, request, temp_dir, flog_gatherer, intro_port) grid = Grid( reactor, request, temp_dir, port_allocator, introducer, flog_gatherer, ) returnValue(grid) tahoe_lafs-1.20.0/integration/test_aaa_aardvark.py0000644000000000000000000000111013615410400017141 0ustar00# So these dummy tests run first and instantiate the pre-requisites # first (e.g. introducer) and therefore print "something" on the # console as we go (a . or the test-name in "-v"/verbose mode) # You can safely skip any of these tests, it'll just appear to "take # longer" to start the first test as the fixtures get built def test_create_flogger(flog_gatherer): print("Created flog_gatherer") def test_create_introducer(introducer): print("Created introducer") def test_create_storage(storage_nodes): print("Created {} storage nodes".format(len(storage_nodes))) tahoe_lafs-1.20.0/integration/test_get_put.py0000644000000000000000000000762313615410400016232 0ustar00""" Integration tests for getting and putting files, including reading from stdin and stdout. """ from subprocess import Popen, PIPE, check_output, check_call import pytest from twisted.internet import reactor from twisted.internet.threads import blockingCallFromThread from .util import run_in_thread, cli DATA = b"abc123 this is not utf-8 decodable \xff\x00\x33 \x11" try: DATA.decode("utf-8") except UnicodeDecodeError: pass # great, what we want else: raise ValueError("BUG, the DATA string was decoded from UTF-8") @pytest.fixture(scope="session") def get_put_alias(alice): cli(alice.process, "create-alias", "getput") def read_bytes(path): with open(path, "rb") as f: return f.read() @run_in_thread def test_put_from_stdin(alice, get_put_alias, tmpdir): """ It's possible to upload a file via `tahoe put`'s STDIN, and then download it to a file. """ tempfile = str(tmpdir.join("file")) p = Popen( ["tahoe", "--node-directory", alice.process.node_dir, "put", "-", "getput:fromstdin"], stdin=PIPE ) p.stdin.write(DATA) p.stdin.close() assert p.wait() == 0 cli(alice.process, "get", "getput:fromstdin", tempfile) assert read_bytes(tempfile) == DATA @run_in_thread def test_get_to_stdout(alice, get_put_alias, tmpdir): """ It's possible to upload a file, and then download it to stdout. """ tempfile = tmpdir.join("file") with tempfile.open("wb") as f: f.write(DATA) cli(alice.process, "put", str(tempfile), "getput:tostdout") p = Popen( ["tahoe", "--node-directory", alice.process.node_dir, "get", "getput:tostdout", "-"], stdout=PIPE ) assert p.stdout.read() == DATA assert p.wait() == 0 @run_in_thread def test_large_file(alice, get_put_alias, tmp_path): """ It's possible to upload and download a larger file. We avoid stdin/stdout since that's flaky on Windows. """ tempfile = tmp_path / "file" with tempfile.open("wb") as f: f.write(DATA * 1_000_000) cli(alice.process, "put", str(tempfile), "getput:largefile") outfile = tmp_path / "out" check_call( ["tahoe", "--node-directory", alice.process.node_dir, "get", "getput:largefile", str(outfile)], ) assert outfile.read_bytes() == tempfile.read_bytes() @run_in_thread def test_upload_download_immutable_different_default_max_segment_size(alice, get_put_alias, tmpdir, request): """ Tahoe-LAFS used to have a default max segment size of 128KB, and is now 1MB. Test that an upload created when 128KB was the default can be downloaded with 1MB as the default (i.e. old uploader, new downloader), and vice versa, (new uploader, old downloader). """ tempfile = tmpdir.join("file") large_data = DATA * 100_000 assert len(large_data) > 2 * 1024 * 1024 with tempfile.open("wb") as f: f.write(large_data) def set_segment_size(segment_size): return blockingCallFromThread( reactor, lambda: alice.reconfigure_zfec( reactor, (1, 1, 1), None, max_segment_size=segment_size ) ) # 1. Upload file 1 with default segment size set to 1MB set_segment_size(1024 * 1024) cli(alice.process, "put", str(tempfile), "getput:seg1024kb") # 2. Download file 1 with default segment size set to 128KB set_segment_size(128 * 1024) assert large_data == check_output( ["tahoe", "--node-directory", alice.process.node_dir, "get", "getput:seg1024kb", "-"] ) # 3. Upload file 2 with default segment size set to 128KB cli(alice.process, "put", str(tempfile), "getput:seg128kb") # 4. Download file 2 with default segment size set to 1MB set_segment_size(1024 * 1024) assert large_data == check_output( ["tahoe", "--node-directory", alice.process.node_dir, "get", "getput:seg128kb", "-"] ) tahoe_lafs-1.20.0/integration/test_grid_manager.py0000644000000000000000000002665113615410400017204 0ustar00import sys import json from os.path import join from cryptography.hazmat.primitives.serialization import ( Encoding, PublicFormat, ) from twisted.internet.utils import ( getProcessOutputAndValue, ) from twisted.internet.defer import ( inlineCallbacks, returnValue, ) from allmydata.crypto import ed25519 from allmydata.util import base32 from allmydata.util import configutil from . import util from .grid import ( create_grid, ) import pytest_twisted @inlineCallbacks def _run_gm(reactor, request, *args, **kwargs): """ Run the grid-manager process, passing all arguments as extra CLI args. :returns: all process output """ if request.config.getoption('coverage'): base_args = ("-b", "-m", "coverage", "run", "-m", "allmydata.cli.grid_manager") else: base_args = ("-m", "allmydata.cli.grid_manager") output, errput, exit_code = yield getProcessOutputAndValue( sys.executable, base_args + args, reactor=reactor, **kwargs ) if exit_code != 0: raise util.ProcessFailed( RuntimeError("Exit code {}".format(exit_code)), output + errput, ) returnValue(output) @pytest_twisted.inlineCallbacks def test_create_certificate(reactor, request): """ The Grid Manager produces a valid, correctly-signed certificate. """ gm_config = yield _run_gm(reactor, request, "--config", "-", "create") privkey_bytes = json.loads(gm_config)['private_key'].encode('ascii') privkey, pubkey = ed25519.signing_keypair_from_string(privkey_bytes) # Note that zara + her key here are arbitrary and don't match any # "actual" clients in the test-grid; we're just checking that the # Grid Manager signs this properly. gm_config = yield _run_gm( reactor, request, "--config", "-", "add", "zara", "pub-v0-kzug3ut2m7ziihf3ndpqlquuxeie4foyl36wn54myqc4wmiwe4ga", stdinBytes=gm_config, ) zara_cert_bytes = yield _run_gm( reactor, request, "--config", "-", "sign", "zara", "1", stdinBytes=gm_config, ) zara_cert = json.loads(zara_cert_bytes) # confirm that zara's certificate is made by the Grid Manager # (.verify returns None on success, raises exception on error) pubkey.verify( base32.a2b(zara_cert['signature'].encode('ascii')), zara_cert['certificate'].encode('ascii'), ) @pytest_twisted.inlineCallbacks def test_remove_client(reactor, request): """ A Grid Manager can add and successfully remove a client """ gm_config = yield _run_gm( reactor, request, "--config", "-", "create", ) gm_config = yield _run_gm( reactor, request, "--config", "-", "add", "zara", "pub-v0-kzug3ut2m7ziihf3ndpqlquuxeie4foyl36wn54myqc4wmiwe4ga", stdinBytes=gm_config, ) gm_config = yield _run_gm( reactor, request, "--config", "-", "add", "yakov", "pub-v0-kvxhb3nexybmipkrar2ztfrwp4uxxsmrjzkpzafit3ket4u5yldq", stdinBytes=gm_config, ) assert "zara" in json.loads(gm_config)['storage_servers'] assert "yakov" in json.loads(gm_config)['storage_servers'] gm_config = yield _run_gm( reactor, request, "--config", "-", "remove", "zara", stdinBytes=gm_config, ) assert "zara" not in json.loads(gm_config)['storage_servers'] assert "yakov" in json.loads(gm_config)['storage_servers'] @pytest_twisted.inlineCallbacks def test_remove_last_client(reactor, request): """ A Grid Manager can remove all clients """ gm_config = yield _run_gm( reactor, request, "--config", "-", "create", ) gm_config = yield _run_gm( reactor, request, "--config", "-", "add", "zara", "pub-v0-kzug3ut2m7ziihf3ndpqlquuxeie4foyl36wn54myqc4wmiwe4ga", stdinBytes=gm_config, ) assert "zara" in json.loads(gm_config)['storage_servers'] gm_config = yield _run_gm( reactor, request, "--config", "-", "remove", "zara", stdinBytes=gm_config, ) # there are no storage servers left at all now assert "storage_servers" not in json.loads(gm_config) @pytest_twisted.inlineCallbacks def test_add_remove_client_file(reactor, request, temp_dir): """ A Grid Manager can add and successfully remove a client (when keeping data on disk) """ gmconfig = join(temp_dir, "gmtest") gmconfig_file = join(temp_dir, "gmtest", "config.json") yield _run_gm( reactor, request, "--config", gmconfig, "create", ) yield _run_gm( reactor, request, "--config", gmconfig, "add", "zara", "pub-v0-kzug3ut2m7ziihf3ndpqlquuxeie4foyl36wn54myqc4wmiwe4ga", ) yield _run_gm( reactor, request, "--config", gmconfig, "add", "yakov", "pub-v0-kvxhb3nexybmipkrar2ztfrwp4uxxsmrjzkpzafit3ket4u5yldq", ) assert "zara" in json.load(open(gmconfig_file, "r"))['storage_servers'] assert "yakov" in json.load(open(gmconfig_file, "r"))['storage_servers'] yield _run_gm( reactor, request, "--config", gmconfig, "remove", "zara", ) assert "zara" not in json.load(open(gmconfig_file, "r"))['storage_servers'] assert "yakov" in json.load(open(gmconfig_file, "r"))['storage_servers'] @pytest_twisted.inlineCallbacks def _test_reject_storage_server(reactor, request, temp_dir, flog_gatherer, port_allocator): """ A client with happines=2 fails to upload to a Grid when it is using Grid Manager and there is only 1 storage server with a valid certificate. """ grid = yield create_grid(reactor, request, temp_dir, flog_gatherer, port_allocator) storage0 = yield grid.add_storage_node() _ = yield grid.add_storage_node() gm_config = yield _run_gm( reactor, request, "--config", "-", "create", ) gm_privkey_bytes = json.loads(gm_config)['private_key'].encode('ascii') gm_privkey, gm_pubkey = ed25519.signing_keypair_from_string(gm_privkey_bytes) # create certificate for the first storage-server pubkey_fname = join(storage0.process.node_dir, "node.pubkey") with open(pubkey_fname, 'r') as f: pubkey_str = f.read().strip() gm_config = yield _run_gm( reactor, request, "--config", "-", "add", "storage0", pubkey_str, stdinBytes=gm_config, ) assert json.loads(gm_config)['storage_servers'].keys() == {'storage0'} print("inserting certificate") cert = yield _run_gm( reactor, request, "--config", "-", "sign", "storage0", "1", stdinBytes=gm_config, ) print(cert) yield util.run_tahoe( reactor, request, "--node-directory", storage0.process.node_dir, "admin", "add-grid-manager-cert", "--name", "default", "--filename", "-", stdin=cert, ) # re-start this storage server yield storage0.restart(reactor, request) # now only one storage-server has the certificate .. configure # diana to have the grid-manager certificate diana = yield grid.add_client("diana", needed=2, happy=2, total=2) config = configutil.get_config(join(diana.process.node_dir, "tahoe.cfg")) config.add_section("grid_managers") config.set("grid_managers", "test", str(ed25519.string_from_verifying_key(gm_pubkey), "ascii")) with open(join(diana.process.node_dir, "tahoe.cfg"), "w") as f: config.write(f) yield diana.restart(reactor, request, servers=2) # try to put something into the grid, which should fail (because # diana has happy=2 but should only find storage0 to be acceptable # to upload to) try: yield util.run_tahoe( reactor, request, "--node-directory", diana.process.node_dir, "put", "-", stdin=b"some content\n" * 200, ) assert False, "Should get a failure" except util.ProcessFailed as e: if b'UploadUnhappinessError' in e.output: # We're done! We've succeeded. return assert False, "Failed to see one of out of two servers" @pytest_twisted.inlineCallbacks def _test_accept_storage_server(reactor, request, temp_dir, flog_gatherer, port_allocator): """ Successfully upload to a Grid Manager enabled Grid. """ grid = yield create_grid(reactor, request, temp_dir, flog_gatherer, port_allocator) happy0 = yield grid.add_storage_node() happy1 = yield grid.add_storage_node() gm_config = yield _run_gm( reactor, request, "--config", "-", "create", ) gm_privkey_bytes = json.loads(gm_config)['private_key'].encode('ascii') gm_privkey, gm_pubkey = ed25519.signing_keypair_from_string(gm_privkey_bytes) # create certificates for all storage-servers servers = ( ("happy0", happy0), ("happy1", happy1), ) for st_name, st in servers: pubkey_fname = join(st.process.node_dir, "node.pubkey") with open(pubkey_fname, 'r') as f: pubkey_str = f.read().strip() gm_config = yield _run_gm( reactor, request, "--config", "-", "add", st_name, pubkey_str, stdinBytes=gm_config, ) assert json.loads(gm_config)['storage_servers'].keys() == {'happy0', 'happy1'} # add the certificates from the grid-manager to the storage servers print("inserting storage-server certificates") for st_name, st in servers: cert = yield _run_gm( reactor, request, "--config", "-", "sign", st_name, "1", stdinBytes=gm_config, ) yield util.run_tahoe( reactor, request, "--node-directory", st.process.node_dir, "admin", "add-grid-manager-cert", "--name", "default", "--filename", "-", stdin=cert, ) # re-start the storage servers yield happy0.restart(reactor, request) yield happy1.restart(reactor, request) # configure freya (a client) to have the grid-manager certificate freya = yield grid.add_client("freya", needed=2, happy=2, total=2) config = configutil.get_config(join(freya.process.node_dir, "tahoe.cfg")) config.add_section("grid_managers") config.set("grid_managers", "test", str(ed25519.string_from_verifying_key(gm_pubkey), "ascii")) with open(join(freya.process.node_dir, "tahoe.cfg"), "w") as f: config.write(f) yield freya.restart(reactor, request, servers=2) # confirm that Freya will upload to the GridManager-enabled Grid yield util.run_tahoe( reactor, request, "--node-directory", freya.process.node_dir, "put", "-", stdin=b"some content\n" * 200, ) @pytest_twisted.inlineCallbacks def test_identity(reactor, request, temp_dir): """ Dump public key to CLI """ gm_config = join(temp_dir, "test_identity") yield _run_gm( reactor, request, "--config", gm_config, "create", ) # ask the CLI for the grid-manager pubkey pubkey = yield _run_gm( reactor, request, "--config", gm_config, "public-identity", ) alleged_pubkey = ed25519.verifying_key_from_string(pubkey.strip()) # load the grid-manager pubkey "ourselves" with open(join(gm_config, "config.json"), "r") as f: real_config = json.load(f) real_privkey, real_pubkey = ed25519.signing_keypair_from_string( real_config["private_key"].encode("ascii"), ) # confirm the CLI told us the correct thing alleged_bytes = alleged_pubkey.public_bytes(Encoding.Raw, PublicFormat.Raw) real_bytes = real_pubkey.public_bytes(Encoding.Raw, PublicFormat.Raw) assert alleged_bytes == real_bytes, "Keys don't match" tahoe_lafs-1.20.0/integration/test_i2p.py0000644000000000000000000001544613615410400015257 0ustar00""" Integration tests for I2P support. """ import sys from os.path import join, exists from os import mkdir, environ from time import sleep from shutil import which from eliot import log_call import pytest import pytest_twisted from . import util from twisted.python.filepath import ( FilePath, ) from twisted.internet.error import ProcessExitedAlready from allmydata.test.common import ( write_introducer, ) from allmydata.node import read_config from allmydata.util.iputil import allocate_tcp_port if which("docker") is None: pytest.skip('Skipping I2P tests since Docker is unavailable', allow_module_level=True) # Docker on Windows machines sometimes expects Windows-y Docker images, so just # don't bother. if sys.platform.startswith('win'): pytest.skip('Skipping I2P tests on Windows', allow_module_level=True) @pytest.fixture def i2p_network(reactor, temp_dir, request): """Fixture to start up local i2pd.""" proto = util._MagicTextProtocol("ephemeral keys", "i2pd") reactor.spawnProcess( proto, which("docker"), ( "docker", "run", "-p", "7656:7656", "purplei2p/i2pd:release-2.45.1", # Bad URL for reseeds, so it can't talk to other routers. "--reseed.urls", "http://localhost:1/", # Make sure we see the "ephemeral keys message" "--log=stdout", "--loglevel=info" ), env=environ, ) def cleanup(): try: proto.transport.signalProcess("INT") util.block_with_timeout(proto.exited, reactor) except ProcessExitedAlready: pass request.addfinalizer(cleanup) util.block_with_timeout(proto.magic_seen, reactor, timeout=30) @pytest.fixture @log_call( action_type=u"integration:i2p:introducer", include_args=["temp_dir", "flog_gatherer"], include_result=False, ) def i2p_introducer(reactor, temp_dir, flog_gatherer, request): intro_dir = join(temp_dir, 'introducer_i2p') print("making introducer", intro_dir) if not exists(intro_dir): mkdir(intro_dir) done_proto = util._ProcessExitedProtocol() util._tahoe_runner_optional_coverage( done_proto, reactor, request, ( 'create-introducer', '--listen=i2p', intro_dir, ), ) pytest_twisted.blockon(done_proto.done) # over-write the config file with our stuff config = read_config(intro_dir, "tub.port") config.set_config("node", "nickname", "introducer_i2p") config.set_config("node", "web.port", "4563") config.set_config("node", "log_gatherer.furl", flog_gatherer) # "tahoe run" is consistent across Linux/macOS/Windows, unlike the old # "start" command. protocol = util._MagicTextProtocol('introducer running', "introducer") transport = util._tahoe_runner_optional_coverage( protocol, reactor, request, ( 'run', intro_dir, ), ) def cleanup(): try: transport.signalProcess('TERM') util.block_with_timeout(protocol.exited, reactor) except ProcessExitedAlready: pass request.addfinalizer(cleanup) pytest_twisted.blockon(protocol.magic_seen) return transport @pytest.fixture def i2p_introducer_furl(i2p_introducer, temp_dir): furl_fname = join(temp_dir, 'introducer_i2p', 'private', 'introducer.furl') while not exists(furl_fname): print("Don't see {} yet".format(furl_fname)) sleep(.1) furl = open(furl_fname, 'r').read() return furl @pytest_twisted.inlineCallbacks @pytest.mark.skip("I2P tests are not functioning at all, for unknown reasons") def test_i2p_service_storage(reactor, request, temp_dir, flog_gatherer, i2p_network, i2p_introducer_furl): web_port0 = allocate_tcp_port() web_port1 = allocate_tcp_port() yield _create_anonymous_node(reactor, 'carol_i2p', web_port0, request, temp_dir, flog_gatherer, i2p_network, i2p_introducer_furl) yield _create_anonymous_node(reactor, 'dave_i2p', web_port1, request, temp_dir, flog_gatherer, i2p_network, i2p_introducer_furl) # ensure both nodes are connected to "a grid" by uploading # something via carol, and retrieve it using dave. gold_path = join(temp_dir, "gold") with open(gold_path, "w") as f: f.write( "The object-capability model is a computer security model. A " "capability describes a transferable right to perform one (or " "more) operations on a given object." ) # XXX could use treq or similar to POST these to their respective # WUIs instead ... proto = util._CollectOutputProtocol() reactor.spawnProcess( proto, sys.executable, ( sys.executable, '-b', '-m', 'allmydata.scripts.runner', '-d', join(temp_dir, 'carol_i2p'), 'put', gold_path, ), env=environ, ) yield proto.done cap = proto.output.getvalue().strip().split()[-1] print("TEH CAP!", cap) proto = util._CollectOutputProtocol(capture_stderr=False) reactor.spawnProcess( proto, sys.executable, ( sys.executable, '-b', '-m', 'allmydata.scripts.runner', '-d', join(temp_dir, 'dave_i2p'), 'get', cap, ), env=environ, ) yield proto.done dave_got = proto.output.getvalue().strip() assert dave_got == open(gold_path, 'rb').read().strip() @pytest_twisted.inlineCallbacks def _create_anonymous_node(reactor, name, web_port, request, temp_dir, flog_gatherer, i2p_network, introducer_furl): node_dir = FilePath(temp_dir).child(name) print("creating", node_dir.path) node_dir.makedirs() proto = util._DumpOutputProtocol(None) reactor.spawnProcess( proto, sys.executable, ( sys.executable, '-b', '-m', 'allmydata.scripts.runner', 'create-node', '--nickname', name, '--introducer', introducer_furl, '--hide-ip', '--listen', 'i2p', node_dir.path, ), env=environ, ) yield proto.done # Which services should this client connect to? write_introducer(node_dir, "default", introducer_furl) with node_dir.child('tahoe.cfg').open('w') as f: node_config = ''' [node] nickname = %(name)s web.port = %(web_port)s web.static = public_html log_gatherer.furl = %(log_furl)s [i2p] enabled = true [client] shares.needed = 1 shares.happy = 1 shares.total = 2 ''' % { 'name': name, 'web_port': web_port, 'log_furl': flog_gatherer, } node_config = node_config.encode("utf-8") f.write(node_config) print("running") yield util._run_node(reactor, node_dir.path, request, None) print("okay, launched") tahoe_lafs-1.20.0/integration/test_servers_of_happiness.py0000644000000000000000000000234413615410400021005 0ustar00""" Ported to Python 3. """ import sys from os.path import join from os import environ from . import util import pytest_twisted @pytest_twisted.inlineCallbacks def test_upload_immutable(reactor, temp_dir, introducer_furl, flog_gatherer, storage_nodes, request): edna = yield util._create_node( reactor, request, temp_dir, introducer_furl, flog_gatherer, "edna", web_port="tcp:9983:interface=localhost", storage=False, needed=3, happy=7, total=10, ) yield util.await_client_ready(edna) node_dir = join(temp_dir, 'edna') # upload a file, which should fail because we have don't have 7 # storage servers (but happiness is set to 7) proto = util._CollectOutputProtocol() reactor.spawnProcess( proto, sys.executable, [ sys.executable, '-b', '-m', 'allmydata.scripts.runner', '-d', node_dir, 'put', __file__, ], env=environ, ) try: yield proto.done assert False, "should raise exception" except util.ProcessFailed as e: assert b"UploadUnhappinessError" in e.output output = proto.output.getvalue() assert b"shares could be placed on only" in output tahoe_lafs-1.20.0/integration/test_sftp.py0000644000000000000000000001126313615410400015532 0ustar00""" It's possible to create/rename/delete files and directories in Tahoe-LAFS using SFTP. These tests use Paramiko, rather than Twisted's Conch, because: 1. It's a different implementation, so we're not testing Conch against itself. 2. Its API is much simpler to use. """ import os.path from posixpath import join from stat import S_ISDIR from paramiko import SSHClient from paramiko.client import AutoAddPolicy from paramiko.sftp_client import SFTPClient from paramiko.ssh_exception import AuthenticationException from paramiko.rsakey import RSAKey import pytest from .util import generate_ssh_key, run_in_thread def connect_sftp(connect_args): """Create an SFTP client.""" client = SSHClient() client.set_missing_host_key_policy(AutoAddPolicy) client.connect("localhost", port=8022, look_for_keys=False, allow_agent=False, **connect_args) sftp = SFTPClient.from_transport(client.get_transport()) def rmdir(path, delete_root=True): for f in sftp.listdir_attr(path=path): childpath = join(path, f.filename) if S_ISDIR(f.st_mode): rmdir(childpath) else: sftp.remove(childpath) if delete_root: sftp.rmdir(path) # Delete any files left over from previous tests :( rmdir("/", delete_root=False) return sftp @run_in_thread def test_bad_account_password_ssh_key(alice, tmpdir): """ Can't login with unknown username, any password, or wrong SSH pub key. """ # Any password, wrong username: for u, p in [("alice-key", "wrong"), ("someuser", "password")]: with pytest.raises(AuthenticationException): connect_sftp(connect_args={ "username": u, "password": p, }) another_key = os.path.join(str(tmpdir), "ssh_key") generate_ssh_key(another_key) good_key = RSAKey(filename=os.path.join(alice.process.node_dir, "private", "ssh_client_rsa_key")) bad_key = RSAKey(filename=another_key) # Wrong key: with pytest.raises(AuthenticationException): connect_sftp(connect_args={ "username": "alice-key", "pkey": bad_key, }) # Wrong username: with pytest.raises(AuthenticationException): connect_sftp(connect_args={ "username": "someoneelse", "pkey": good_key, }) def sftp_client_key(client): """ :return RSAKey: the RSA client key associated with this grid.Client """ # XXX move to Client / grid.py? return RSAKey( filename=os.path.join(client.process.node_dir, "private", "ssh_client_rsa_key"), ) @run_in_thread def test_ssh_key_auth(alice): """It's possible to login authenticating with SSH public key.""" key = sftp_client_key(alice) sftp = connect_sftp(connect_args={ "username": "alice-key", "pkey": key }) assert sftp.listdir() == [] @run_in_thread def test_read_write_files(alice): """It's possible to upload and download files.""" sftp = connect_sftp(connect_args={ "username": "alice-key", "pkey": sftp_client_key(alice), }) with sftp.file("myfile", "wb") as f: f.write(b"abc") f.write(b"def") with sftp.file("myfile", "rb") as f: assert f.read(4) == b"abcd" assert f.read(2) == b"ef" assert f.read(1) == b"" @run_in_thread def test_directories(alice): """ It's possible to create, list directories, and create and remove files in them. """ sftp = connect_sftp(connect_args={ "username": "alice-key", "pkey": sftp_client_key(alice), }) assert sftp.listdir() == [] sftp.mkdir("childdir") assert sftp.listdir() == ["childdir"] with sftp.file("myfile", "wb") as f: f.write(b"abc") assert sorted(sftp.listdir()) == ["childdir", "myfile"] sftp.chdir("childdir") assert sftp.listdir() == [] with sftp.file("myfile2", "wb") as f: f.write(b"def") assert sftp.listdir() == ["myfile2"] sftp.chdir(None) # root with sftp.file("childdir/myfile2", "rb") as f: assert f.read() == b"def" sftp.remove("myfile") assert sftp.listdir() == ["childdir"] sftp.rmdir("childdir") assert sftp.listdir() == [] @run_in_thread def test_rename(alice): """Directories and files can be renamed.""" sftp = connect_sftp(connect_args={ "username": "alice-key", "pkey": sftp_client_key(alice), }) sftp.mkdir("dir") filepath = join("dir", "file") with sftp.file(filepath, "wb") as f: f.write(b"abc") sftp.rename(filepath, join("dir", "file2")) sftp.rename("dir", "dir2") with sftp.file(join("dir2", "file2"), "rb") as f: assert f.read() == b"abc" tahoe_lafs-1.20.0/integration/test_streaming_logs.py0000644000000000000000000000674713615410400017606 0ustar00""" Ported to Python 3. """ from six import ensure_text import json from os.path import ( join, ) from urllib.parse import ( urlsplit, ) import attr from twisted.internet.defer import ( Deferred, ) from twisted.internet.endpoints import ( HostnameEndpoint, ) import treq from autobahn.twisted.websocket import ( WebSocketClientFactory, WebSocketClientProtocol, ) from allmydata.client import ( read_config, ) from allmydata.web.private import ( SCHEME, ) from allmydata.util.eliotutil import ( inline_callbacks, ) import pytest_twisted def _url_to_endpoint(reactor, url): netloc = urlsplit(url).netloc host, port = netloc.split(":") return HostnameEndpoint(reactor, host, int(port)) class _StreamingLogClientProtocol(WebSocketClientProtocol): def onOpen(self): self.factory.on_open.callback(self) def onMessage(self, payload, isBinary): if self.on_message is None: # Already did our job, ignore it return on_message = self.on_message self.on_message = None on_message.callback(payload) def onClose(self, wasClean, code, reason): self.on_close.callback(reason) def _connect_client(reactor, api_auth_token, ws_url): factory = WebSocketClientFactory( url=ws_url, headers={ "Authorization": "{} {}".format(str(SCHEME, "ascii"), api_auth_token), } ) factory.protocol = _StreamingLogClientProtocol factory.on_open = Deferred() endpoint = _url_to_endpoint(reactor, ws_url) return endpoint.connect(factory) def _race(left, right): """ Wait for the first result from either of two Deferreds. Any result, success or failure, causes the return Deferred to fire. It fires with either a Left or a Right instance depending on whether the left or right argument fired first. The Deferred that loses the race is cancelled and any result it eventually produces is discarded. """ racing = [True] def got_result(result, which): if racing: racing.pop() loser = which.pick(left, right) loser.cancel() finished.callback(which(result)) finished = Deferred() left.addBoth(got_result, Left) right.addBoth(got_result, Right) return finished @attr.s class Left(object): value = attr.ib() @classmethod def pick(cls, left, right): return left @attr.s class Right(object): value = attr.ib() @classmethod def pick(cls, left, right): return right @inline_callbacks def _test_streaming_logs(reactor, temp_dir, alice): cfg = read_config(join(temp_dir, "alice"), "portnum") node_url = cfg.get_config_from_file("node.url") api_auth_token = cfg.get_private_config("api_auth_token") ws_url = ensure_text(node_url).replace("http://", "ws://") log_url = ws_url + "private/logs/v1" print("Connecting to {}".format(log_url)) client = yield _connect_client(reactor, api_auth_token, log_url) print("Connected.") client.on_close = Deferred() client.on_message = Deferred() # Capture this now before on_message perhaps goes away. racing = _race(client.on_close, client.on_message) # Provoke _some_ log event. yield treq.get(node_url) result = yield racing assert isinstance(result, Right) json.loads(result.value) @pytest_twisted.inlineCallbacks def test_streaming_logs(reactor, temp_dir, alice): yield _test_streaming_logs(reactor, temp_dir, alice) tahoe_lafs-1.20.0/integration/test_tor.py0000644000000000000000000001404613615410400015364 0ustar00""" Ported to Python 3. """ import sys from os.path import join from os import environ import pytest import pytest_twisted from . import util from twisted.python.filepath import ( FilePath, ) from allmydata.test.common import ( write_introducer, ) from allmydata.client import read_config from allmydata.util.deferredutil import async_to_deferred # see "conftest.py" for the fixtures (e.g. "tor_network") # XXX: Integration tests that involve Tor do not run reliably on # Windows. They are skipped for now, in order to reduce CI noise. # # https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3347 if sys.platform.startswith('win'): pytest.skip('Skipping Tor tests on Windows', allow_module_level=True) @pytest.mark.skipif(sys.version_info[:2] > (3, 11), reason='Chutney still does not support 3.12') @pytest_twisted.inlineCallbacks def test_onion_service_storage(reactor, request, temp_dir, flog_gatherer, tor_network, tor_introducer_furl): """ Two nodes and an introducer all configured to use Tahoe. The two nodes can talk to the introducer and each other: we upload to one node, read from the other. """ carol = yield _create_anonymous_node(reactor, 'carol', 8100, request, temp_dir, flog_gatherer, tor_network, tor_introducer_furl, 2) dave = yield _create_anonymous_node(reactor, 'dave', 8101, request, temp_dir, flog_gatherer, tor_network, tor_introducer_furl, 2) yield util.await_client_ready(carol, minimum_number_of_servers=2, timeout=600) yield util.await_client_ready(dave, minimum_number_of_servers=2, timeout=600) yield upload_to_one_download_from_the_other(reactor, temp_dir, carol, dave) @async_to_deferred async def upload_to_one_download_from_the_other(reactor, temp_dir, upload_to: util.TahoeProcess, download_from: util.TahoeProcess): """ Ensure both nodes are connected to "a grid" by uploading something via one node, and retrieve it using the other. """ gold_path = join(temp_dir, "gold") with open(gold_path, "w") as f: f.write( "The object-capability model is a computer security model. A " "capability describes a transferable right to perform one (or " "more) operations on a given object." ) # XXX could use treq or similar to POST these to their respective # WUIs instead ... proto = util._CollectOutputProtocol() reactor.spawnProcess( proto, sys.executable, ( sys.executable, '-b', '-m', 'allmydata.scripts.runner', '-d', upload_to.node_dir, 'put', gold_path, ), env=environ, ) await proto.done cap = proto.output.getvalue().strip().split()[-1] print("capability: {}".format(cap)) proto = util._CollectOutputProtocol(capture_stderr=False) reactor.spawnProcess( proto, sys.executable, ( sys.executable, '-b', '-m', 'allmydata.scripts.runner', '-d', download_from.node_dir, 'get', cap, ), env=environ, ) await proto.done download_got = proto.output.getvalue().strip() assert download_got == open(gold_path, 'rb').read().strip() @pytest_twisted.inlineCallbacks def _create_anonymous_node(reactor, name, web_port, request, temp_dir, flog_gatherer, tor_network, introducer_furl, shares_total: int) -> util.TahoeProcess: node_dir = FilePath(temp_dir).child(name) if node_dir.exists(): raise RuntimeError( "A node already exists in '{}'".format(node_dir) ) print(f"creating {node_dir.path} with introducer {introducer_furl}") node_dir.makedirs() proto = util._DumpOutputProtocol(None) reactor.spawnProcess( proto, sys.executable, ( sys.executable, '-b', '-m', 'allmydata.scripts.runner', 'create-node', '--nickname', name, '--webport', str(web_port), '--introducer', introducer_furl, '--hide-ip', '--tor-control-port', tor_network.client_control_endpoint, '--listen', 'tor', '--shares-needed', '1', '--shares-happy', '1', '--shares-total', str(shares_total), node_dir.path, ), env=environ, ) yield proto.done # Which services should this client connect to? write_introducer(node_dir, "default", introducer_furl) util.basic_node_configuration(request, flog_gatherer.furl, node_dir.path) config = read_config(node_dir.path, "tub.port") config.set_config("tor", "onion", "true") config.set_config("tor", "onion.external_port", "3457") config.set_config("tor", "control.port", tor_network.client_control_endpoint) config.set_config("tor", "onion.private_key_file", "private/tor_onion.privkey") print("running") result = yield util._run_node(reactor, node_dir.path, request, None) print("okay, launched") return result @pytest.mark.skipif(sys.version_info[:2] > (3, 11), reason='Chutney still does not support 3.12') @pytest.mark.skipif(sys.platform.startswith('darwin'), reason='This test has issues on macOS') @pytest_twisted.inlineCallbacks def test_anonymous_client(reactor, request, temp_dir, flog_gatherer, tor_network, introducer_furl): """ A normal node (normie) and a normal introducer are configured, and one node (anonymoose) which is configured to be anonymous by talking via Tor. Anonymoose should be able to communicate with normie. TODO how to ensure that anonymoose is actually using Tor? """ normie = yield util._create_node( reactor, request, temp_dir, introducer_furl, flog_gatherer, "normie", web_port="tcp:9989:interface=localhost", storage=True, needed=1, happy=1, total=1, ) yield util.await_client_ready(normie) anonymoose = yield _create_anonymous_node(reactor, 'anonymoose', 8102, request, temp_dir, flog_gatherer, tor_network, introducer_furl, 1) yield util.await_client_ready(anonymoose, minimum_number_of_servers=1, timeout=1200) yield upload_to_one_download_from_the_other(reactor, temp_dir, normie, anonymoose) tahoe_lafs-1.20.0/integration/test_vectors.py0000644000000000000000000000773113615410400016250 0ustar00""" Verify certain results against test vectors with well-known results. """ from __future__ import annotations from functools import partial from typing import AsyncGenerator, Iterator from itertools import starmap, product from attrs import evolve from pytest import mark from pytest_twisted import ensureDeferred from . import vectors from .vectors import parameters from .util import upload from .grid import Client @mark.parametrize('convergence', parameters.CONVERGENCE_SECRETS) def test_convergence(convergence): """ Convergence secrets are 16 bytes. """ assert isinstance(convergence, bytes), "Convergence secret must be bytes" assert len(convergence) == 16, "Convergence secret must by 16 bytes" @mark.slow @mark.parametrize('case,expected', vectors.capabilities.items()) @ensureDeferred async def test_capability(reactor, request, alice, case, expected): """ The capability that results from uploading certain well-known data with certain well-known parameters results in exactly the previously computed value. """ # rewrite alice's config to match params and convergence await alice.reconfigure_zfec( reactor, (1, case.params.required, case.params.total), case.convergence, case.segment_size) # upload data in the correct format actual = upload(alice, case.fmt, case.data) # compare the resulting cap to the expected result assert actual == expected @ensureDeferred async def skiptest_generate(reactor, request, alice): """ This is a helper for generating the test vectors. You can re-generate the test vectors by fixing the name of the test and running it. Normally this test doesn't run because it ran once and we captured its output. Other tests run against that output and we want them to run against the results produced originally, not a possibly ever-changing set of outputs. """ space = starmap( # segment_size could be a parameter someday but it's not easy to vary # using the Python implementation so it isn't one for now. partial(vectors.Case, segment_size=parameters.SEGMENT_SIZE), product( parameters.ZFEC_PARAMS, parameters.CONVERGENCE_SECRETS, parameters.OBJECT_DESCRIPTIONS, parameters.FORMATS, ), ) iterresults = generate(reactor, request, alice, space) results = [] async for result in iterresults: # Accumulate the new result results.append(result) # Then rewrite the whole output file with the new accumulator value. # This means that if we fail partway through, we will still have # recorded partial results -- instead of losing them all. vectors.save_capabilities(results) async def generate( reactor, request, alice: Client, cases: Iterator[vectors.Case], ) -> AsyncGenerator[[vectors.Case, str], None]: """ Generate all of the test vectors using the given node. :param reactor: The reactor to use to restart the Tahoe-LAFS node when it needs to be reconfigured. :param request: The pytest request object to use to arrange process cleanup. :param format: The name of the encryption/data format to use. :param alice: The Tahoe-LAFS node to use to generate the test vectors. :param case: The inputs for which to generate a value. :return: The capability for the case. """ # Share placement doesn't affect the resulting capability. For maximum # reliability of this generator, be happy if we can put shares anywhere happy = 1 for case in cases: await alice.reconfigure_zfec( reactor, (happy, case.params.required, case.params.total), case.convergence, case.segment_size ) # Give the format a chance to make an RSA key if it needs it. case = evolve(case, fmt=case.fmt.customize()) cap = upload(alice.process, case.fmt, case.data) yield case, cap tahoe_lafs-1.20.0/integration/test_web.py0000644000000000000000000006261613615410400015343 0ustar00""" These tests were originally written to achieve some level of coverage for the WebAPI functionality during Python3 porting (there aren't many tests of the Web API period). Most of the tests have cursory asserts and encode 'what the WebAPI did at the time of testing' -- not necessarily a cohesive idea of what the WebAPI *should* do in every situation. It's not clear the latter exists anywhere, however. """ from __future__ import annotations import time from base64 import urlsafe_b64encode from urllib.parse import unquote as url_unquote, quote as url_quote from cryptography.hazmat.primitives.serialization import load_pem_private_key from twisted.internet.threads import deferToThread from twisted.python.filepath import FilePath import allmydata.uri from allmydata.crypto.rsa import ( create_signing_keypair, der_string_from_signing_key, PrivateKey, PublicKey, ) from allmydata.mutable.common import derive_mutable_keys from allmydata.util import jsonbytes as json from . import util from .util import run_in_thread import requests import html5lib from bs4 import BeautifulSoup import pytest_twisted DATA_PATH = FilePath(__file__).parent().sibling("src").child("allmydata").child("test").child("data") @run_in_thread def test_index(alice): """ we can download the index file """ util.web_get(alice.process, u"") @run_in_thread def test_index_json(alice): """ we can download the index file as json """ data = util.web_get(alice.process, u"", params={u"t": u"json"}) # it should be valid json json.loads(data) @run_in_thread def test_upload_download(alice): """ upload a file, then download it via readcap """ FILE_CONTENTS = u"some contents" readcap = util.web_post( alice.process, u"uri", data={ u"t": u"upload", u"format": u"mdmf", }, files={ u"file": FILE_CONTENTS, }, ) readcap = readcap.strip() data = util.web_get( alice.process, u"uri", params={ u"uri": readcap, u"filename": u"boom", } ) assert str(data, "utf-8") == FILE_CONTENTS @run_in_thread def test_put(alice): """ use PUT to create a file """ FILE_CONTENTS = b"added via PUT" * 20 resp = requests.put( util.node_url(alice.process.node_dir, u"uri"), data=FILE_CONTENTS, ) cap = allmydata.uri.from_string(resp.text.strip().encode('ascii')) cfg = alice.process.get_config() assert isinstance(cap, allmydata.uri.CHKFileURI) assert cap.size == len(FILE_CONTENTS) assert cap.total_shares == int(cfg.get_config("client", "shares.total")) assert cap.needed_shares == int(cfg.get_config("client", "shares.needed")) @run_in_thread def test_helper_status(storage_nodes): """ successfully GET the /helper_status page """ url = util.node_url(storage_nodes[0].process.node_dir, "helper_status") resp = requests.get(url) assert resp.status_code >= 200 and resp.status_code < 300 dom = BeautifulSoup(resp.content, "html5lib") assert str(dom.h1.string) == u"Helper Status" @run_in_thread def test_deep_stats(alice): """ create a directory, do deep-stats on it and prove the /operations/ URIs work """ resp = requests.post( util.node_url(alice.process.node_dir, "uri"), params={ "format": "sdmf", "t": "mkdir", "redirect_to_result": "true", }, ) assert resp.status_code >= 200 and resp.status_code < 300 # when creating a directory, we'll be re-directed to a URL # containing our writecap.. uri = url_unquote(resp.url) assert 'URI:DIR2:' in uri dircap = uri[uri.find("URI:DIR2:"):].rstrip('/') dircap_uri = util.node_url(alice.process.node_dir, "uri/{}".format(url_quote(dircap))) # POST a file into this directory FILE_CONTENTS = u"a file in a directory" resp = requests.post( dircap_uri, data={ u"t": u"upload", }, files={ u"file": FILE_CONTENTS, }, ) resp.raise_for_status() # confirm the file is in the directory resp = requests.get( dircap_uri, params={ u"t": u"json", }, ) d = json.loads(resp.content) k, data = d assert k == u"dirnode" assert len(data['children']) == 1 k, child = list(data['children'].values())[0] assert k == u"filenode" assert child['size'] == len(FILE_CONTENTS) # perform deep-stats on it... resp = requests.post( dircap_uri, data={ u"t": u"start-deep-stats", u"ophandle": u"something_random", }, ) assert resp.status_code >= 200 and resp.status_code < 300 # confirm we get information from the op .. after its done tries = 10 while tries > 0: tries -= 1 resp = requests.get( util.node_url(alice.process.node_dir, u"operations/something_random"), ) d = json.loads(resp.content) if d['size-literal-files'] == len(FILE_CONTENTS): print("stats completed successfully") break else: print("{} != {}; waiting".format(d['size-literal-files'], len(FILE_CONTENTS))) time.sleep(.5) @run_in_thread def test_status(alice): """ confirm we get something sensible from /status and the various sub-types """ # upload a file # (because of the nature of the integration-tests, we can only # assert things about "our" file because we don't know what other # operations may have happened in the grid before our test runs). FILE_CONTENTS = u"all the Important Data of alice\n" * 1200 resp = requests.put( util.node_url(alice.process.node_dir, u"uri"), data=FILE_CONTENTS, ) cap = resp.text.strip() print("Uploaded data, cap={}".format(cap)) resp = requests.get( util.node_url(alice.process.node_dir, u"uri/{}".format(url_quote(cap))), ) print("Downloaded {} bytes of data".format(len(resp.content))) assert str(resp.content, "ascii") == FILE_CONTENTS resp = requests.get( util.node_url(alice.process.node_dir, "status"), ) dom = html5lib.parse(resp.content) hrefs = [ a.get('href') for a in dom.iter(u'{http://www.w3.org/1999/xhtml}a') ] found_upload = False found_download = False for href in hrefs: if href == u"/" or not href: continue resp = requests.get(util.node_url(alice.process.node_dir, href)) if href.startswith(u"/status/up"): assert b"File Upload Status" in resp.content if b"Total Size: %d" % (len(FILE_CONTENTS),) in resp.content: found_upload = True elif href.startswith(u"/status/down"): assert b"File Download Status" in resp.content if b"Total Size: %d" % (len(FILE_CONTENTS),) in resp.content: found_download = True # download the specialized event information resp = requests.get( util.node_url(alice.process.node_dir, u"{}/event_json".format(href)), ) js = json.loads(resp.content) # there's usually just one "read" operation, but this can handle many .. total_bytes = sum([st['bytes_returned'] for st in js['read']], 0) assert total_bytes == len(FILE_CONTENTS) assert found_upload, "Failed to find the file we uploaded in the status-page" assert found_download, "Failed to find the file we downloaded in the status-page" @pytest_twisted.ensureDeferred async def test_directory_deep_check(reactor, request, alice): """ use deep-check and confirm the result pages work """ # Make sure the node is configured compatibly with expectations of this # test. happy = 3 required = 2 total = 4 await alice.reconfigure_zfec(reactor, (happy, required, total), convergence=None) await deferToThread(_test_directory_deep_check_blocking, alice) def _test_directory_deep_check_blocking(alice): # create a directory resp = requests.post( util.node_url(alice.process.node_dir, u"uri"), params={ u"t": u"mkdir", u"redirect_to_result": u"true", } ) # get json information about our directory dircap_url = resp.url resp = requests.get( dircap_url, params={u"t": u"json"}, ) # Just verify it is valid JSON. json.loads(resp.content) # upload a file of pangrams into the directory FILE_CONTENTS = u"Sphinx of black quartz, judge my vow.\n" * (2048*10) resp = requests.post( dircap_url, params={ u"t": u"upload", u"upload-chk": u"upload-chk", }, files={ u"file": FILE_CONTENTS, } ) cap0 = resp.content print("Uploaded data0, cap={}".format(cap0)) # a different pangram FILE_CONTENTS = u"The five boxing wizards jump quickly.\n" * (2048*10) resp = requests.post( dircap_url, params={ u"t": u"upload", u"upload-chk": u"upload-chk", }, files={ u"file": FILE_CONTENTS, } ) cap1 = resp.content print("Uploaded data1, cap={}".format(cap1)) resp = requests.get( util.node_url(alice.process.node_dir, u"uri/{}".format(url_quote(cap0))), params={u"t": u"info"}, ) def check_repair_data(checkdata): assert checkdata["healthy"] assert checkdata["count-happiness"] == 4 assert checkdata["count-good-share-hosts"] == 4 assert checkdata["count-shares-good"] == 4 assert checkdata["count-corrupt-shares"] == 0 assert checkdata["list-corrupt-shares"] == [] # do a "check" (once for HTML, then with JSON for easier asserts) resp = requests.post( dircap_url, params={ u"t": u"check", u"return_to": u".", u"verify": u"true", } ) resp = requests.post( dircap_url, params={ u"t": u"check", u"return_to": u".", u"verify": u"true", u"output": u"JSON", } ) check_repair_data(json.loads(resp.content)["results"]) # "check and repair" resp = requests.post( dircap_url, params={ u"t": u"check", u"return_to": u".", u"verify": u"true", u"repair": u"true", } ) resp = requests.post( dircap_url, params={ u"t": u"check", u"return_to": u".", u"verify": u"true", u"repair": u"true", u"output": u"JSON", } ) check_repair_data(json.loads(resp.content)["post-repair-results"]["results"]) # start a "deep check and repair" resp = requests.post( dircap_url, params={ u"t": u"start-deep-check", u"return_to": u".", u"verify": u"on", u"repair": u"on", u"output": u"JSON", u"ophandle": u"deadbeef", } ) deepcheck_uri = resp.url data = json.loads(resp.content) tries = 10 while not data['finished'] and tries > 0: tries -= 1 time.sleep(0.5) print("deep-check not finished, reloading") resp = requests.get(deepcheck_uri, params={u"output": "JSON"}) data = json.loads(resp.content) print("deep-check finished") assert data[u"stats"][u"count-immutable-files"] == 1 assert data[u"stats"][u"count-literal-files"] == 0 assert data[u"stats"][u"largest-immutable-file"] == 778240 assert data[u"count-objects-checked"] == 2 # also get the HTML version resp = requests.post( dircap_url, params={ u"t": u"start-deep-check", u"return_to": u".", u"verify": u"on", u"repair": u"on", u"ophandle": u"definitely_random", } ) deepcheck_uri = resp.url # if the operations isn't done, there's an

tag with the # reload link; otherwise there's only an

tag..wait up to 5 # seconds for this to respond properly. for _ in range(5): resp = requests.get(deepcheck_uri) dom = BeautifulSoup(resp.content, "html5lib") if dom.h1 and u'Results' in str(dom.h1.string): break if dom.h2 and dom.h2.a and u"Reload" in str(dom.h2.a.string): dom = None time.sleep(1) assert dom is not None, "Operation never completed" @run_in_thread def test_storage_info(storage_nodes): """ retrieve and confirm /storage URI for one storage node """ storage0 = storage_nodes[0] requests.get( util.node_url(storage0.process.node_dir, u"storage"), ) @run_in_thread def test_storage_info_json(storage_nodes): """ retrieve and confirm /storage?t=json URI for one storage node """ storage0 = storage_nodes[0] resp = requests.get( util.node_url(storage0.process.node_dir, u"storage"), params={u"t": u"json"}, ) data = json.loads(resp.content) assert data[u"stats"][u"storage_server.reserved_space"] == 1000000000 @run_in_thread def test_introducer_info(introducer): """ retrieve and confirm /introducer URI for the introducer """ resp = requests.get( util.node_url(introducer.process.node_dir, u""), ) assert b"Introducer" in resp.content resp = requests.get( util.node_url(introducer.process.node_dir, u""), params={u"t": u"json"}, ) data = json.loads(resp.content) assert "announcement_summary" in data assert "subscription_summary" in data @run_in_thread def test_mkdir_with_children(alice): """ create a directory using ?t=mkdir-with-children """ # create a file to put in our directory FILE_CONTENTS = u"some file contents\n" * 500 resp = requests.put( util.node_url(alice.process.node_dir, u"uri"), data=FILE_CONTENTS, ) filecap = resp.content.strip() # create a (sub) directory to put in our directory resp = requests.post( util.node_url(alice.process.node_dir, u"uri"), params={ u"t": u"mkdir", } ) # (we need both the read-write and read-only URIs I guess) dircap = resp.content dircap_obj = allmydata.uri.from_string(dircap) dircap_ro = dircap_obj.get_readonly().to_string() # create json information about our directory meta = { "a_file": [ "filenode", { "ro_uri": filecap, "metadata": { "ctime": 1202777696.7564139, "mtime": 1202777696.7564139, "tahoe": { "linkcrtime": 1202777696.7564139, "linkmotime": 1202777696.7564139 } } } ], "some_subdir": [ "dirnode", { "rw_uri": dircap, "ro_uri": dircap_ro, "metadata": { "ctime": 1202778102.7589991, "mtime": 1202778111.2160511, "tahoe": { "linkcrtime": 1202777696.7564139, "linkmotime": 1202777696.7564139 } } } ] } # create a new directory with one file and one sub-dir (all-at-once) resp = util.web_post( alice.process, u"uri", params={u"t": "mkdir-with-children"}, data=json.dumps(meta), ) assert resp.startswith(b"URI:DIR2") cap = allmydata.uri.from_string(resp) assert isinstance(cap, allmydata.uri.DirectoryURI) @run_in_thread def test_mkdir_with_random_private_key(alice): """ Create a new directory with ?t=mkdir&private-key=... using a randomly-generated RSA private key. The writekey and fingerprint derived from the provided RSA key should match those of the newly-created directory capability. """ privkey, pubkey = create_signing_keypair(2048) writekey, _, fingerprint = derive_mutable_keys((pubkey, privkey)) # The "private-key" parameter takes a DER-encoded RSA private key # encoded in URL-safe base64; PEM blocks are not supported. privkey_der = der_string_from_signing_key(privkey) privkey_encoded = urlsafe_b64encode(privkey_der).decode("ascii") resp = util.web_post( alice.process, u"uri", params={ u"t": "mkdir", u"private-key": privkey_encoded, }, ) assert resp.startswith(b"URI:DIR2") dircap = allmydata.uri.from_string(resp) assert isinstance(dircap, allmydata.uri.DirectoryURI) # DirectoryURI objects lack 'writekey' and 'fingerprint' attributes # so extract them from the enclosed WriteableSSKFileURI object. filecap = dircap.get_filenode_cap() assert isinstance(filecap, allmydata.uri.WriteableSSKFileURI) assert (writekey, fingerprint) == (filecap.writekey, filecap.fingerprint) @run_in_thread def test_mkdir_with_known_private_key(alice): """ Create a new directory with ?t=mkdir&private-key=... using a known-in-advance RSA private key. The writekey and fingerprint derived from the provided RSA key should match those of the newly-created directory capability. In addition, because the writekey and fingerprint are derived deterministically, given the same RSA private key, the resultant directory capability should always be the same. """ # Generated with `openssl genrsa -out openssl-rsa-2048-3.txt 2048` pempath = DATA_PATH.child("openssl-rsa-2048-3.txt") privkey = load_pem_private_key(pempath.getContent(), password=None) assert isinstance(privkey, PrivateKey) pubkey = privkey.public_key() assert isinstance(pubkey, PublicKey) writekey, _, fingerprint = derive_mutable_keys((pubkey, privkey)) # The "private-key" parameter takes a DER-encoded RSA private key # encoded in URL-safe base64; PEM blocks are not supported. privkey_der = der_string_from_signing_key(privkey) privkey_encoded = urlsafe_b64encode(privkey_der).decode("ascii") resp = util.web_post( alice.process, u"uri", params={ u"t": "mkdir", u"private-key": privkey_encoded, }, ) assert resp.startswith(b"URI:DIR2") dircap = allmydata.uri.from_string(resp) assert isinstance(dircap, allmydata.uri.DirectoryURI) # DirectoryURI objects lack 'writekey' and 'fingerprint' attributes # so extract them from the enclosed WriteableSSKFileURI object. filecap = dircap.get_filenode_cap() assert isinstance(filecap, allmydata.uri.WriteableSSKFileURI) assert (writekey, fingerprint) == (filecap.writekey, filecap.fingerprint) assert resp == b"URI:DIR2:3oo7j7f7qqxnet2z2lf57ucup4:cpktmsxlqnd5yeekytxjxvff5e6d6fv7py6rftugcndvss7tzd2a" @run_in_thread def test_mkdir_with_children_and_random_private_key(alice): """ Create a new directory with ?t=mkdir-with-children&private-key=... using a randomly-generated RSA private key. The writekey and fingerprint derived from the provided RSA key should match those of the newly-created directory capability. """ # create a file to put in our directory FILE_CONTENTS = u"some file contents\n" * 500 resp = requests.put( util.node_url(alice.process.node_dir, u"uri"), data=FILE_CONTENTS, ) filecap = resp.content.strip() # create a (sub) directory to put in our directory resp = requests.post( util.node_url(alice.process.node_dir, u"uri"), params={ u"t": u"mkdir", } ) # (we need both the read-write and read-only URIs I guess) dircap = resp.content dircap_obj = allmydata.uri.from_string(dircap) dircap_ro = dircap_obj.get_readonly().to_string() # create json information about our directory meta = { "a_file": [ "filenode", { "ro_uri": filecap, "metadata": { "ctime": 1202777696.7564139, "mtime": 1202777696.7564139, "tahoe": { "linkcrtime": 1202777696.7564139, "linkmotime": 1202777696.7564139 } } } ], "some_subdir": [ "dirnode", { "rw_uri": dircap, "ro_uri": dircap_ro, "metadata": { "ctime": 1202778102.7589991, "mtime": 1202778111.2160511, "tahoe": { "linkcrtime": 1202777696.7564139, "linkmotime": 1202777696.7564139 } } } ] } privkey, pubkey = create_signing_keypair(2048) writekey, _, fingerprint = derive_mutable_keys((pubkey, privkey)) # The "private-key" parameter takes a DER-encoded RSA private key # encoded in URL-safe base64; PEM blocks are not supported. privkey_der = der_string_from_signing_key(privkey) privkey_encoded = urlsafe_b64encode(privkey_der).decode("ascii") # create a new directory with one file and one sub-dir (all-at-once) # with the supplied RSA private key resp = util.web_post( alice.process, u"uri", params={ u"t": "mkdir-with-children", u"private-key": privkey_encoded, }, data=json.dumps(meta), ) assert resp.startswith(b"URI:DIR2") dircap = allmydata.uri.from_string(resp) assert isinstance(dircap, allmydata.uri.DirectoryURI) # DirectoryURI objects lack 'writekey' and 'fingerprint' attributes # so extract them from the enclosed WriteableSSKFileURI object. filecap = dircap.get_filenode_cap() assert isinstance(filecap, allmydata.uri.WriteableSSKFileURI) assert (writekey, fingerprint) == (filecap.writekey, filecap.fingerprint) @run_in_thread def test_mkdir_with_children_and_known_private_key(alice): """ Create a new directory with ?t=mkdir-with-children&private-key=... using a known-in-advance RSA private key. The writekey and fingerprint derived from the provided RSA key should match those of the newly-created directory capability. In addition, because the writekey and fingerprint are derived deterministically, given the same RSA private key, the resultant directory capability should always be the same. """ # create a file to put in our directory FILE_CONTENTS = u"some file contents\n" * 500 resp = requests.put( util.node_url(alice.process.node_dir, u"uri"), data=FILE_CONTENTS, ) filecap = resp.content.strip() # create a (sub) directory to put in our directory resp = requests.post( util.node_url(alice.process.node_dir, u"uri"), params={ u"t": u"mkdir", } ) # (we need both the read-write and read-only URIs I guess) dircap = resp.content dircap_obj = allmydata.uri.from_string(dircap) dircap_ro = dircap_obj.get_readonly().to_string() # create json information about our directory meta = { "a_file": [ "filenode", { "ro_uri": filecap, "metadata": { "ctime": 1202777696.7564139, "mtime": 1202777696.7564139, "tahoe": { "linkcrtime": 1202777696.7564139, "linkmotime": 1202777696.7564139 } } } ], "some_subdir": [ "dirnode", { "rw_uri": dircap, "ro_uri": dircap_ro, "metadata": { "ctime": 1202778102.7589991, "mtime": 1202778111.2160511, "tahoe": { "linkcrtime": 1202777696.7564139, "linkmotime": 1202777696.7564139 } } } ] } # Generated with `openssl genrsa -out openssl-rsa-2048-4.txt 2048` pempath = DATA_PATH.child("openssl-rsa-2048-4.txt") privkey = load_pem_private_key(pempath.getContent(), password=None) assert isinstance(privkey, PrivateKey) pubkey = privkey.public_key() assert isinstance(pubkey, PublicKey) writekey, _, fingerprint = derive_mutable_keys((pubkey, privkey)) # The "private-key" parameter takes a DER-encoded RSA private key # encoded in URL-safe base64; PEM blocks are not supported. privkey_der = der_string_from_signing_key(privkey) privkey_encoded = urlsafe_b64encode(privkey_der).decode("ascii") # create a new directory with one file and one sub-dir (all-at-once) # with the supplied RSA private key resp = util.web_post( alice.process, u"uri", params={ u"t": "mkdir-with-children", u"private-key": privkey_encoded, }, data=json.dumps(meta), ) assert resp.startswith(b"URI:DIR2") dircap = allmydata.uri.from_string(resp) assert isinstance(dircap, allmydata.uri.DirectoryURI) # DirectoryURI objects lack 'writekey' and 'fingerprint' attributes # so extract them from the enclosed WriteableSSKFileURI object. filecap = dircap.get_filenode_cap() assert isinstance(filecap, allmydata.uri.WriteableSSKFileURI) assert (writekey, fingerprint) == (filecap.writekey, filecap.fingerprint) assert resp == b"URI:DIR2:ppwzpwrd37xi7tpribxyaa25uy:imdws47wwpzfkc5vfllo4ugspb36iit4cqps6ttuhaouc66jb2da" tahoe_lafs-1.20.0/integration/util.py0000644000000000000000000006623713615410400014507 0ustar00""" General functionality useful for the implementation of integration tests. """ from __future__ import annotations from contextlib import contextmanager from typing import Any from typing_extensions import Literal from tempfile import NamedTemporaryFile import sys import time import json from os import mkdir, environ from os.path import exists, join, basename from io import StringIO, BytesIO from subprocess import check_output from twisted.python.filepath import ( FilePath, ) from twisted.internet.defer import Deferred, succeed from twisted.internet.protocol import ProcessProtocol from twisted.internet.error import ProcessExitedAlready, ProcessDone from twisted.internet.threads import deferToThread from twisted.internet.interfaces import IProcessTransport, IReactorProcess from attrs import frozen, evolve import requests from cryptography.hazmat.primitives.asymmetric import rsa from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives.serialization import ( Encoding, PrivateFormat, NoEncryption, ) from paramiko.rsakey import RSAKey from boltons.funcutils import wraps from allmydata.util import base32 from allmydata.util.configutil import ( get_config, set_config, write_config, ) from allmydata import client from allmydata.interfaces import DEFAULT_IMMUTABLE_MAX_SEGMENT_SIZE import pytest_twisted def block_with_timeout(deferred, reactor, timeout=120): """Block until Deferred has result, but timeout instead of waiting forever.""" deferred.addTimeout(timeout, reactor) return pytest_twisted.blockon(deferred) class _ProcessExitedProtocol(ProcessProtocol): """ Internal helper that .callback()s on self.done when the process exits (for any reason). """ def __init__(self): self.done = Deferred() def processEnded(self, reason): self.done.callback(None) class ProcessFailed(Exception): """ A subprocess has failed. :ivar ProcessTerminated reason: the original reason from .processExited :ivar StringIO output: all stdout and stderr collected to this point. """ def __init__(self, reason, output): self.reason = reason self.output = output def __str__(self): return ":\n{}".format(self.reason, self.output) class _CollectOutputProtocol(ProcessProtocol): """ Internal helper. Collects all output (stdout + stderr) into self.output, and callback's on done with all of it after the process exits (for any reason). """ def __init__(self, capture_stderr=True, stdin=None): self.done = Deferred() self.output = BytesIO() self.capture_stderr = capture_stderr self._stdin = stdin def connectionMade(self): if self._stdin is not None: self.transport.write(self._stdin) self.transport.closeStdin() def processEnded(self, reason): if not self.done.called: self.done.callback(self.output.getvalue()) def processExited(self, reason): if not isinstance(reason.value, ProcessDone): self.done.errback(ProcessFailed(reason, self.output.getvalue())) def outReceived(self, data): self.output.write(data) def errReceived(self, data): if self.capture_stderr: self.output.write(data) class _DumpOutputProtocol(ProcessProtocol): """ Internal helper. """ def __init__(self, f): self.done = Deferred() self._out = f if f is not None else sys.stdout def processEnded(self, reason): if not self.done.called: self.done.callback(None) def processExited(self, reason): if not isinstance(reason.value, ProcessDone): self.done.errback(reason) def outReceived(self, data): data = str(data, sys.stdout.encoding) self._out.write(data) def errReceived(self, data): data = str(data, sys.stdout.encoding) self._out.write(data) class _MagicTextProtocol(ProcessProtocol): """ Internal helper. Monitors all stdout looking for a magic string, and then .callback()s on self.done and .errback's if the process exits """ def __init__(self, magic_text: str, name: str) -> None: self.magic_seen = Deferred() self.name = f"{name}: " self.exited = Deferred() self._magic_text = magic_text self._output = StringIO() def processEnded(self, reason): self.exited.callback(None) def outReceived(self, data): data = str(data, sys.stdout.encoding) for line in data.splitlines(): sys.stdout.write(self.name + line + "\n") self._output.write(data) if not self.magic_seen.called and self._magic_text in self._output.getvalue(): print("Saw '{}' in the logs".format(self._magic_text)) self.magic_seen.callback(self) def errReceived(self, data): data = str(data, sys.stderr.encoding) for line in data.splitlines(): sys.stdout.write(self.name + line + "\n") def _cleanup_process_async(transport: IProcessTransport) -> None: """ If the given process transport seems to still be associated with a running process, send a SIGTERM to that process. :param transport: The transport to use. :raise: ``ValueError`` if ``allow_missing`` is ``False`` and the transport has no process. """ if transport.pid is None: # in cases of "restart", we will have registered a finalizer # that will kill the process -- but already explicitly killed # it (and then ran again) due to the "restart". So, if the # process is already killed, our job is done. print("Process already cleaned up and that's okay.") return print("signaling {} with TERM".format(transport.pid)) try: transport.signalProcess('TERM') except ProcessExitedAlready: # The transport object thought it still had a process but the real OS # process has already exited. That's fine. We accomplished what we # wanted to. pass def _cleanup_tahoe_process(tahoe_transport, exited): """ Terminate the given process with a kill signal (SIGTERM on POSIX, TerminateProcess on Windows). :param tahoe_transport: The `IProcessTransport` representing the process. :param exited: A `Deferred` which fires when the process has exited. :return: After the process has exited. """ from twisted.internet import reactor _cleanup_process_async(tahoe_transport) print(f"signaled, blocking on exit {exited}") block_with_timeout(exited, reactor) print("exited, goodbye") def run_tahoe(reactor, request, *args, **kwargs): """ Helper to run tahoe with optional coverage. :returns: a Deferred that fires when the command is done (or a ProcessFailed exception if it exits non-zero) """ stdin = kwargs.get("stdin", None) protocol = _CollectOutputProtocol(stdin=stdin) process = _tahoe_runner_optional_coverage(protocol, reactor, request, args) process.exited = protocol.done return protocol.done def _tahoe_runner_optional_coverage(proto, reactor, request, other_args): """ Internal helper. Calls spawnProcess with `-m allmydata.scripts.runner` and `other_args`, optionally inserting a `--coverage` option if the `request` indicates we should. """ if request.config.getoption('coverage', False): args = [sys.executable, '-b', '-m', 'coverage', 'run', '-m', 'allmydata.scripts.runner', '--coverage'] else: args = [sys.executable, '-b', '-m', 'allmydata.scripts.runner'] args += other_args return reactor.spawnProcess( proto, sys.executable, args, env=environ, ) class TahoeProcess(object): """ A running Tahoe process, with associated information. """ def __init__(self, process_transport, node_dir): self._process_transport = process_transport # IProcessTransport instance self._node_dir = node_dir # path @property def transport(self): return self._process_transport @property def node_dir(self): return self._node_dir def get_config(self): return client.read_config( self._node_dir, u"portnum", ) def kill(self): """ Kill the process, block until it's done. Does nothing if the process is already stopped (or never started). """ print(f"TahoeProcess.kill({self.transport.pid} / {self.node_dir})") _cleanup_tahoe_process(self.transport, self.transport.exited) def kill_async(self): """ Kill the process, return a Deferred that fires when it's done. Does nothing if the process is already stopped (or never started). """ print(f"TahoeProcess.kill_async({self.transport.pid} / {self.node_dir})") _cleanup_process_async(self.transport) return self.transport.exited def restart_async(self, reactor: IReactorProcess, request: Any) -> Deferred: """ Stop and then re-start the associated process. :return: A Deferred that fires after the new process is ready to handle requests. """ d = self.kill_async() d.addCallback(lambda ignored: _run_node(reactor, self.node_dir, request, None)) def got_new_process(proc): # Grab the new transport since the one we had before is no longer # valid after the stop/start cycle. self._process_transport = proc.transport d.addCallback(got_new_process) return d def __str__(self): return "".format(self._node_dir) def _run_node(reactor, node_dir, request, magic_text): """ Run a tahoe process from its node_dir. :returns: a TahoeProcess for this node """ if magic_text is None: magic_text = "client running" protocol = _MagicTextProtocol(magic_text, basename(node_dir)) # "tahoe run" is consistent across Linux/macOS/Windows, unlike the old # "start" command. transport = _tahoe_runner_optional_coverage( protocol, reactor, request, [ '--eliot-destination', 'file:{}/logs/eliot.json'.format(node_dir), 'run', node_dir, ], ) transport.exited = protocol.exited tahoe_process = TahoeProcess( transport, node_dir, ) request.addfinalizer(tahoe_process.kill) d = protocol.magic_seen d.addCallback(lambda ignored: tahoe_process) return d def basic_node_configuration(request, flog_gatherer, node_dir: str): """ Setup common configuration options for a node, given a ``pytest`` request fixture. """ config_path = join(node_dir, 'tahoe.cfg') config = get_config(config_path) set_config( config, u'node', u'log_gatherer.furl', flog_gatherer, ) force_foolscap = request.config.getoption("force_foolscap") assert force_foolscap in (True, False) set_config( config, 'storage', 'force_foolscap', str(force_foolscap), ) set_config( config, 'client', 'force_foolscap', str(force_foolscap), ) write_config(FilePath(config_path), config) def _create_node(reactor, request, temp_dir, introducer_furl, flog_gatherer, name, web_port, storage=True, magic_text=None, needed=2, happy=3, total=4): """ Helper to create a single node, run it and return the instance spawnProcess returned (ITransport) """ node_dir = join(temp_dir, name) if web_port is None: web_port = '' if exists(node_dir): created_d = succeed(None) else: print("creating: {}".format(node_dir)) mkdir(node_dir) done_proto = _ProcessExitedProtocol() args = [ 'create-node', '--nickname', name, '--introducer', introducer_furl, '--hostname', 'localhost', '--listen', 'tcp', '--webport', web_port, '--shares-needed', str(needed), '--shares-happy', str(happy), '--shares-total', str(total), '--helper', ] if not storage: args.append('--no-storage') args.append(node_dir) _tahoe_runner_optional_coverage(done_proto, reactor, request, args) created_d = done_proto.done def created(_): basic_node_configuration(request, flog_gatherer.furl, node_dir) created_d.addCallback(created) d = Deferred() d.callback(None) d.addCallback(lambda _: created_d) d.addCallback(lambda _: _run_node(reactor, node_dir, request, magic_text)) return d class UnwantedFilesException(Exception): """ While waiting for some files to appear, some undesired files appeared instead (or in addition). """ def __init__(self, waiting, unwanted): super(UnwantedFilesException, self).__init__( u"While waiting for '{}', unwanted files appeared: {}".format( waiting, u', '.join(unwanted), ) ) class ExpectedFileMismatchException(Exception): """ A file or files we wanted weren't found within the timeout. """ def __init__(self, path, timeout): super(ExpectedFileMismatchException, self).__init__( u"Contents of '{}' mismatched after {}s".format(path, timeout), ) class ExpectedFileUnfoundException(Exception): """ A file or files we expected to find didn't appear within the timeout. """ def __init__(self, path, timeout): super(ExpectedFileUnfoundException, self).__init__( u"Didn't find '{}' after {}s".format(path, timeout), ) class FileShouldVanishException(Exception): """ A file or files we expected to disappear did not within the timeout """ def __init__(self, path, timeout): super(FileShouldVanishException, self).__init__( u"'{}' still exists after {}s".format(path, timeout), ) def run_in_thread(f): """Decorator for integration tests that runs code in a thread. Because we're using pytest_twisted, tests that rely on the reactor are expected to return a Deferred and use async APIs so the reactor can run. In the case of the integration test suite, it launches nodes in the background using Twisted APIs. The nodes stdout and stderr is read via Twisted code. If the reactor doesn't run, reads don't happen, and eventually the buffers fill up, and the nodes block when they try to flush logs. We can switch to Twisted APIs (treq instead of requests etc.), but sometimes it's easier or expedient to just have a blocking test. So this decorator allows you to run the test in a thread, and the reactor can keep running in the main thread. See https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3597 for tracking bug. """ @wraps(f) def test(*args, **kwargs): return deferToThread(lambda: f(*args, **kwargs)) return test def await_file_contents(path, contents, timeout=15, error_if=None): """ wait up to `timeout` seconds for the file at `path` (any path-like object) to have the exact content `contents`. :param error_if: if specified, a list of additional paths; if any of these paths appear an Exception is raised. """ start_time = time.time() while time.time() - start_time < timeout: print(" waiting for '{}'".format(path)) if error_if and any([exists(p) for p in error_if]): raise UnwantedFilesException( waiting=path, unwanted=[p for p in error_if if exists(p)], ) if exists(path): try: with open(path, 'r') as f: current = f.read() except IOError: print("IOError; trying again") else: if current == contents: return True print(" file contents still mismatched") print(" wanted: {}".format(contents.replace('\n', ' '))) print(" got: {}".format(current.replace('\n', ' '))) time.sleep(1) if exists(path): raise ExpectedFileMismatchException(path, timeout) raise ExpectedFileUnfoundException(path, timeout) def await_files_exist(paths, timeout=15, await_all=False): """ wait up to `timeout` seconds for any of the paths to exist; when any exist, a list of all found filenames is returned. Otherwise, an Exception is raised """ start_time = time.time() while time.time() - start_time < timeout: print(" waiting for: {}".format(' '.join(paths))) found = [p for p in paths if exists(p)] print("found: {}".format(found)) if await_all: if len(found) == len(paths): return found else: if len(found) > 0: return found time.sleep(1) if await_all: nice_paths = ' and '.join(paths) else: nice_paths = ' or '.join(paths) raise ExpectedFileUnfoundException(nice_paths, timeout) def await_file_vanishes(path, timeout=10): start_time = time.time() while time.time() - start_time < timeout: print(" waiting for '{}' to vanish".format(path)) if not exists(path): return time.sleep(1) raise FileShouldVanishException(path, timeout) def cli(node, *argv): """ Run a tahoe CLI subcommand for a given node in a blocking manner, returning the output. """ arguments = ["tahoe", '--node-directory', node.node_dir] return check_output(arguments + list(argv)) def node_url(node_dir, uri_fragment): """ Create a fully qualified URL by reading config from `node_dir` and adding the `uri_fragment` """ with open(join(node_dir, "node.url"), "r") as f: base = f.read().strip() url = base + uri_fragment return url def _check_status(response): """ Check the response code is a 2xx (raise an exception otherwise) """ if response.status_code < 200 or response.status_code >= 300: raise ValueError( "Expected a 2xx code, got {}".format(response.status_code) ) def web_get(tahoe, uri_fragment, **kwargs): """ Make a GET request to the webport of `tahoe` (a `TahoeProcess`, usually from a fixture (e.g. `alice`). This will look like: `http://localhost:/`. All `kwargs` are passed on to `requests.get` """ url = node_url(tahoe.node_dir, uri_fragment) resp = requests.get(url, **kwargs) _check_status(resp) return resp.content def web_post(tahoe, uri_fragment, **kwargs): """ Make a POST request to the webport of `node` (a `TahoeProcess, usually from a fixture e.g. `alice`). This will look like: `http://localhost:/`. All `kwargs` are passed on to `requests.post` """ url = node_url(tahoe.node_dir, uri_fragment) resp = requests.post(url, **kwargs) _check_status(resp) return resp.content @run_in_thread def await_client_ready(tahoe, timeout=20, liveness=60*2, minimum_number_of_servers=1): """ Uses the status API to wait for a client-type node (in `tahoe`, a `TahoeProcess` instance usually from a fixture e.g. `alice`) to be 'ready'. A client is deemed ready if: - it answers `http:///statistics/?t=json/` - there is at least one storage-server connected (configurable via ``minimum_number_of_servers``) - every storage-server has a "last_received_data" and it is within the last `liveness` seconds We will try for up to `timeout` seconds for the above conditions to be true. Otherwise, an exception is raised """ start = time.time() while (time.time() - start) < float(timeout): try: data = web_get(tahoe, u"", params={u"t": u"json"}) js = json.loads(data) except Exception as e: print("waiting because '{}'".format(e)) time.sleep(1) continue servers = js['servers'] if len(servers) < minimum_number_of_servers: print(f"waiting because {servers} is fewer than required ({minimum_number_of_servers})") time.sleep(1) continue now = time.time() server_times = [ server['last_received_data'] for server in servers if server['last_received_data'] is not None ] print( f"Now: {time.ctime(now)}\n" f"Liveness required: {liveness}\n" f"Server last-received-data: {[time.ctime(s) for s in server_times]}\n" f"Server ages: {[now - s for s in server_times]}\n" ) # check that all times are 'recent enough' (it's OK if _some_ servers # are down, we just want to make sure a sufficient number are up) alive = [t for t in server_times if now - t <= liveness] if len(alive) < minimum_number_of_servers: print( f"waiting because we found {len(alive)} servers " f"and want {minimum_number_of_servers}" ) time.sleep(1) continue # we have a status with at least one server, and all servers # have been contacted recently return True # we only fall out of the loop when we've timed out raise RuntimeError( "Waited {} seconds for {} to be 'ready' but it never was".format( timeout, tahoe, ) ) def generate_ssh_key(path): """Create a new SSH private/public key pair.""" key = RSAKey.generate(2048) key.write_private_key_file(path) with open(path + ".pub", "wb") as f: s = "%s %s" % (key.get_name(), key.get_base64()) f.write(s.encode("ascii")) @frozen class CHK: """ Represent the CHK encoding sufficiently to run a ``tahoe put`` command using it. """ kind = "chk" max_shares = 256 def customize(self) -> CHK: # Nothing to do. return self @classmethod def load(cls, params: None) -> CHK: assert params is None return cls() def to_json(self) -> None: return None @contextmanager def to_argv(self) -> None: yield [] @frozen class SSK: """ Represent the SSK encodings (SDMF and MDMF) sufficiently to run a ``tahoe put`` command using one of them. """ kind = "ssk" # SDMF and MDMF encode share counts (N and k) into the share itself as an # unsigned byte. They could have encoded (share count - 1) to fit the # full range supported by ZFEC into the unsigned byte - but they don't. # So 256 is inaccessible to those formats and we set the upper bound at # 255. max_shares = 255 name: Literal["sdmf", "mdmf"] key: None | bytes @classmethod def load(cls, params: dict) -> SSK: assert params.keys() == {"format", "mutable", "key"} return cls(params["format"], params["key"].encode("ascii")) def customize(self) -> SSK: """ Return an SSK with a newly generated random RSA key. """ return evolve(self, key=generate_rsa_key()) def to_json(self) -> dict[str, str]: return { "format": self.name, "mutable": None, "key": self.key.decode("ascii"), } @contextmanager def to_argv(self) -> None: with NamedTemporaryFile() as f: f.write(self.key) f.flush() yield [f"--format={self.name}", "--mutable", f"--private-key-path={f.name}"] def upload(alice: TahoeProcess, fmt: CHK | SSK, data: bytes) -> str: """ Upload the given data to the given node. :param alice: The node to upload to. :param fmt: The name of the format for the upload. CHK, SDMF, or MDMF. :param data: The data to upload. :return: The capability for the uploaded data. """ with NamedTemporaryFile() as f: f.write(data) f.flush() with fmt.to_argv() as fmt_argv: argv = [alice.process, "put"] + fmt_argv + [f.name] return cli(*argv).decode("utf-8").strip() async def reconfigure(reactor, request, node: TahoeProcess, params: tuple[int, int, int], convergence: None | bytes, max_segment_size: None | int = None) -> None: """ Reconfigure a Tahoe-LAFS node with different ZFEC parameters and convergence secret. TODO This appears to have issues on Windows. If the current configuration is different from the specified configuration, the node will be restarted so it takes effect. :param reactor: A reactor to use to restart the process. :param request: The pytest request object to use to arrange process cleanup. :param node: The Tahoe-LAFS node to reconfigure. :param params: The ``happy``, ``needed``, and ``total`` ZFEC encoding parameters. :param convergence: If given, the convergence secret. If not given, the existing convergence secret will be left alone. :return: ``None`` after the node configuration has been rewritten, the node has been restarted, and the node is ready to provide service. """ happy, needed, total = params config = node.get_config() changed = False cur_happy = int(config.get_config("client", "shares.happy")) cur_needed = int(config.get_config("client", "shares.needed")) cur_total = int(config.get_config("client", "shares.total")) if (happy, needed, total) != (cur_happy, cur_needed, cur_total): changed = True config.set_config("client", "shares.happy", str(happy)) config.set_config("client", "shares.needed", str(needed)) config.set_config("client", "shares.total", str(total)) if convergence is not None: cur_convergence = config.get_private_config("convergence").encode("ascii") if base32.a2b(cur_convergence) != convergence: changed = True config.write_private_config("convergence", base32.b2a(convergence)) if max_segment_size is not None: cur_segment_size = int(config.get_config("client", "shares._max_immutable_segment_size_for_testing", DEFAULT_IMMUTABLE_MAX_SEGMENT_SIZE)) if cur_segment_size != max_segment_size: changed = True config.set_config( "client", "shares._max_immutable_segment_size_for_testing", str(max_segment_size) ) if changed: # restart the node print(f"Restarting {node.node_dir} for ZFEC reconfiguration") await node.restart_async(reactor, request) print("Restarted. Waiting for ready state.") await await_client_ready(node) print("Ready.") else: print("Config unchanged, not restarting.") def generate_rsa_key() -> bytes: """ Generate a 2048 bit RSA key suitable for use with SSKs. """ return rsa.generate_private_key( public_exponent=65537, key_size=2048, backend=default_backend() ).private_bytes( encoding=Encoding.PEM, format=PrivateFormat.TraditionalOpenSSL, encryption_algorithm=NoEncryption(), ) tahoe_lafs-1.20.0/integration/vectors/__init__.py0000644000000000000000000000060313615410400016737 0ustar00__all__ = [ "DATA_PATH", "CURRENT_VERSION", "MAX_SHARES", "Case", "Sample", "SeedParam", "encode_bytes", "save_capabilities", "capabilities", ] from .vectors import ( DATA_PATH, CURRENT_VERSION, Case, Sample, SeedParam, encode_bytes, save_capabilities, capabilities, ) from .parameters import ( MAX_SHARES, ) tahoe_lafs-1.20.0/integration/vectors/model.py0000644000000000000000000000272513615410400016307 0ustar00""" Simple data type definitions useful in the definition/verification of test vectors. """ from __future__ import annotations from attrs import frozen # CHK have a max of 256 shares. SDMF / MDMF have a max of 255 shares! # Represent max symbolically and resolve it when we know what format we're # dealing with. MAX_SHARES = "max" @frozen class Sample: """ Some instructions for building a long byte string. :ivar seed: Some bytes to repeat some times to produce the string. :ivar length: The length of the desired byte string. """ seed: bytes length: int @frozen class Param: """ Some ZFEC parameters. """ required: int total: int @frozen class SeedParam: """ Some ZFEC parameters, almost. :ivar required: The number of required shares. :ivar total: Either the number of total shares or the constant ``MAX_SHARES`` to indicate that the total number of shares should be the maximum number supported by the object format. """ required: int total: int | str def realize(self, max_total: int) -> Param: """ Create a ``Param`` from this object's values, possibly substituting the given real value for total if necessary. :param max_total: The value to use to replace ``MAX_SHARES`` if necessary. """ if self.total == MAX_SHARES: return Param(self.required, max_total) return Param(self.required, self.total) tahoe_lafs-1.20.0/integration/vectors/parameters.py0000644000000000000000000000527113615410400017351 0ustar00""" Define input parameters for test vector generation. :ivar CONVERGENCE_SECRETS: Convergence secrets. :ivar SEGMENT_SIZE: The single segment size that the Python implementation currently supports without a lot of refactoring. :ivar OBJECT_DESCRIPTIONS: Small objects with instructions which can be expanded into a possibly large byte string. These are intended to be used as plaintext inputs. :ivar ZFEC_PARAMS: Input parameters to ZFEC. :ivar FORMATS: Encoding/encryption formats. """ from __future__ import annotations from hashlib import sha256 from .model import MAX_SHARES from .vectors import Sample, SeedParam from ..util import CHK, SSK def digest(bs: bytes) -> bytes: """ Digest bytes to bytes. """ return sha256(bs).digest() def hexdigest(bs: bytes) -> str: """ Digest bytes to text. """ return sha256(bs).hexdigest() # Just a couple convergence secrets. The only thing we do with this value is # feed it into a tagged hash. It certainly makes a difference to the output # but the hash should destroy any structure in the input so it doesn't seem # like there's a reason to test a lot of different values. CONVERGENCE_SECRETS: list[bytes] = [ b"aaaaaaaaaaaaaaaa", digest(b"Hello world")[:16], ] SEGMENT_SIZE: int = 128 * 1024 # Exercise at least a handful of different sizes, trying to cover: # # 1. Some cases smaller than one "segment" (128k). # This covers shrinking of some parameters to match data size. # This includes one case of the smallest possible CHK. # # 2. Some cases right on the edges of integer segment multiples. # Because boundaries are tricky. # # 4. Some cases that involve quite a few segments. # This exercises merkle tree construction more thoroughly. # # See ``stretch`` for construction of the actual test data. OBJECT_DESCRIPTIONS: list[Sample] = [ # The smallest possible. 55 bytes and smaller are LIT. Sample(b"a", 56), Sample(b"a", 1024), Sample(b"c", 4096), Sample(digest(b"foo"), SEGMENT_SIZE - 1), Sample(digest(b"bar"), SEGMENT_SIZE + 1), Sample(digest(b"baz"), SEGMENT_SIZE * 16 - 1), Sample(digest(b"quux"), SEGMENT_SIZE * 16 + 1), Sample(digest(b"bazquux"), SEGMENT_SIZE * 32), Sample(digest(b"foobar"), SEGMENT_SIZE * 64 - 1), Sample(digest(b"barbaz"), SEGMENT_SIZE * 64 + 1), ] ZFEC_PARAMS: list[SeedParam] = [ SeedParam(1, 1), SeedParam(1, 3), SeedParam(2, 3), SeedParam(3, 10), SeedParam(71, 255), SeedParam(101, MAX_SHARES), ] FORMATS: list[CHK | SSK] = [ CHK(), # These start out unaware of a key but various keys will be supplied # during generation. SSK(name="sdmf", key=None), SSK(name="mdmf", key=None), ] tahoe_lafs-1.20.0/integration/vectors/test_vectors.yaml0000755000000000000000000217272213615410400020257 0ustar00vector: - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:CHK:yzxcoagbetwet65ltjpbqyli3m:6b7inuiha2xdtgqzd55i6aeggutnxzr6qfwpv2ep5xlln6pgef7a:1:1:56 format: kind: chk params: null sample: length: 56 seed: YQ== zfec: required: 1 segmentSize: 131072 total: 1 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:SSK:fs6ul2fju2fvb2cfx7gt6ngycm:hncpinwszbggrurbvuaaexnftk3j5wfr7473pj2g734mo2isxlbq format: kind: ssk params: format: sdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEpAIBAAKCAQEA1I0X8E9USJxPRJmD6l3cjlyGYi9hXxJFb5km5/J7elPbYPP3 DhdHmJcELYP5HxGBmfLavCBvFDO6nVA3TDwCPrI/7KpiY7uHzZkLgsLA8M45NaJE eUgACBESZcNioUqYLNHvYKLOqTDV+JwyQ9oWUNONd2jg3LQ+e4oyVwvxEZ41P5cM u9wJI8OO/G7FItCL2Ts1OgjmNWEz6KN7MjU/2UsNfa2eK6mlZ3Wi1oprhmfCrWHu +hjevqW46Qp/ddCCkBQCHKcV5ZsbBVxq6vqrYClUYa6Y5jzevMK8euuT+tA289sx jXpbY3eXaggWdeDIoDquOumCCkVxhoj3dvUKsQIDAQABAoIBAAIAsFSN0sv6WQ7a 6XDIYJ8gxQ1gx+iW6fuStFikIsC00JDZy56g3oZUCfCJ2UuPJSr3rFLwdUt570yz KEo6GIVRtaN7uYCaED4CLqcVQa8jKkvUkxOXd5Sb4JH/5MqDQurNMZW2Av96G9ID Wr/j6qjpTWBuJww9UIdmdnH2hVd2oz12+6Y/6nlrE2iGPDkQMPnkKXRb7xeaXJOq l6003hA4JRtzzS1uBb7cRuvyW/oOouBBxoP49a8UUoetgOMNDvVX4/16lRY3K6Vj VfserJz2R7QYKcfCJAe54VImGGhvq0Q76kfKsbX0xZ5fGFgS8LyAaZYyR6M3V88+ qmUT2WkCgYEA1mI7uL+NEn3zRjjkpqqO1tmKfZVDayQ5bpOtJG44qpmv+eihBuu7 S7V2waf46SwZAdUyXYxj+u0Dfnwre53tx7jdrntKNP9o1i8b3pZW13wv/IWq2bcA UFAhSlFjw9qj9nVFYHnqhygKGq+EbzkILp2eQUstjoWM4xCo1bRMMN0CgYEA/c/K YwVm6nyK6jMAK5zGWstliTPYkkSU79BvdbwXayIVp8CeDYPWpZxtqVQtoNvQwA9C 8K2PuHrHFH3a16siXPrto0hoC3oXyyKKmqbeZLpafg1ngQfieVYS0A0qt6cVx+Sz 3gy7W7xeHfBSBbDPJR/G4gI88+9GVJdCVAfK2eUCgYB5ZaD56gZBfW7fyeG4ewZt pTwmBvrpVdbrxdYatguCl4qt0kw09hHWOkioOqzZpO34OrjNfm0zLzl2S2v4ESMP oKBvaENKJYNBHeYDMlC0rw8hSLPJmzYjRGzFf7cltc55Bkkl64Ohy0uFdvRgYwQ+ GWT/Bkoi1X9FKS7h7LnkRQKBgQDF/jZvEGO8P/NNxwM3AlFpuok2go9LatyURxDr 0xKhrDEgb43cFSB4iJKzKMt/VHp/mGgrv/kBfCWYwqTY4NMpnUWLvowLh+7Ps95T ziBmi0jUVDiN20y8Qnzid6L/KQRArxPxABWX9lWlHTee4NJ2r1dCL2TFFb7Tdjtz ubBwUQKBgQCnEaEgoZ2Rqp3P7TzQjSzPlHlHBMXW28sEdXprdlwicU18fjDj5+og iozu9orcAD8AOGBNueKErSWiXhp+MuY4AvJuJZPV3gkMKKYc83HKWN05Xh8rkcZ3 KIHCyp3EBdtI+YDWvLISSqvFqCYtBR5v7eU9Ri2gOVljtmgJ8lARAA== -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 56 seed: YQ== zfec: required: 1 segmentSize: 131072 total: 1 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:MDMF:tn2ekpkmearz7k3bivm3ikuz3i:wcqgst36kymoirczlkok5pqdekt5lgsyfw3oh7ecoro3rj5aiyya format: kind: ssk params: format: mdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEogIBAAKCAQEAxMTk4iBj9yi+NBdRamEGlVB+/Vptm6uWdSZIF5CmliQHr0Ev KlYaiMxlFR+1RiV3LA7tzUbeJtHUsiaefAxtZFf4gtCKNRj1vC8lhbcH7NKxY9vr 4QMhNJ4NxYpC8qqcnNrlwULMw+eYLb2NUhuI2NOPKDjs1dgwypIEej0/m/Z7r+NB xO6dKx4NYOnrQBoLNfgN7KlHCruhG4LcZxaMkuUD38brtxkd25/PlQ9tm77ODz9G WrU6/G5LSzTgWqrCQ6ww3UXfk9hfNqXob3loaTw2U+2M5gy73UduhHqSkyskNHgt WJRUyU9YC4K40IDtiJAlJJ3cShfVNDWCAIjKuQIDAQABAoIBAADRMS16j/UMpMQ8 8o1xYLXJs0qkZP8iq8nJwmk3+bvMONdH+y+pqDY4Ob/oNU2uGybCMHJL9eE4ZDHn NNJJZOzn6/Qdye0lhjkQAw+2mQr+kauwqUlHxOFd5KsU2L6plGPsXsw6KvUx/DD4 cA1OvaHqOFZ6Qgrd+SSQ4wGKST4sZSwX5ZtMX2+o0eghkK1W5fMgbxSvMOWBOwqk uUnYeyFECxvntRhv0clYwvqfNetf4SMFcPVp+pCY7gaZH2rr3tVk3lSHWFI0pz4s lLVtINuK2jeg8cfhZhnlWHSVW8C/F3xryNZCnacq4UgeoYwje8swez4/gCi0PyYY MbufpP8CgYEA8qLO9guRyFd9S4jgmVmu1TQLN8nX8sDOfkmiT7+V3oDJm/3HNRXV nX9UD/n2bpydEvjMCF7Mh4FaQvSFqSLTSBDoAY+4KD4KTecz3JD5yugBWbT7hWpp 0jgUOW18w+2HFyLsEYFyW1xpIa1iZT9w/R031nGeH+q1CJcGfYRNXN8CgYEAz5tb wAShRkugicR3jO8geYzuDV0JOAsZnTdef0OLrTYoOL6V5TX1YNnSGsv8vTs0sF08 PGVQqEdZrYtgCJ3wehs30xBoE5+CZdAo8k810x18TYK7Zx5ZF8VjKT4LcW3SPD5I 7T0TUeOKZRWgF0V4uRMj2qH+4fX4fjkoSDkYM2cCgYBYaf0yaSrZLxBIGvuExcpQ hGNmE9Xt7lYQbLKJjs2Ew7czcXlKncc2WfR+0d37lnQiOqjWj/zFj9wdM88Uv8zv oMF5+C3p9Bl7I7mhMO7lAj+jubBBgHJJGQg9mOjy2DX2t1IAWwQZyIXCsNR/Amwg v6neKY6uIK+RDr9ds30hTwKBgC8Svv8PDbJuu3wBfEoMfoSRG/kTu19lxO0M/PRG UIl52izjqgFK9tR7D1TcI/aUUiIbQek/38YIR6E+FQxfI4PMYCAPfEnWxS5owKAQ rdesu96nYe7DxtfI/e8ADoAtspnOVaLVUmgi++JnwOEF85WjbWHJkY2SxEF6nFOj /oTJAoGAbhFazz1ZFuMJ8TwKo847lkC3TqWEvMzvoFgOLej6eqiZ93LtZG2+CG84 eBDDyAVnNaHUa/HrX62B3Si6WN/vxsI9x191kaRUkiBBaqTg6TkruQwdFHMlFFHH Vg7Pqd88N5H5gMl6+i4c/RgXw/vlzFkTgc3TwtUjwAWGQs8eAVw= -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 56 seed: YQ== zfec: required: 1 segmentSize: 131072 total: 1 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:CHK:mri732rh3meyh4drikau3a24ba:6tsj5wvcp6szdhmrbu5bea57wduoza64y6nd2lm7aleqpsxjm5la:1:1:1024 format: kind: chk params: null sample: length: 1024 seed: YQ== zfec: required: 1 segmentSize: 131072 total: 1 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:SSK:kwn44kjzh5s6lqyaeh3d74ziye:fammlnqo37yrfilvn4xwralire36de7ogpusp2uprtirwlpdbtca format: kind: ssk params: format: sdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEowIBAAKCAQEAvXGjmoIhKgrA08l9upgUZdyMOgOrH2rayM6/7tuaNjWzXfBL 048vgjD+PyWUCat9+Y/ZXO7RybLSSJ0z3tK+7177gaBBPffwp1ltlVR0mFEnpzRE 0wOzhn8+DBcrOV6819f2TcL1PLjtX0RnbHoVpQaMhacwE9laXSGvvDEliPRSTZGF a2p4DmkoBaTILKT9ZYjUdYvWT0VjtpK0aj1kkBtAvJFfnfKw+SXEnuRb49QWlfZy wuQdAZKqpHv4lm8DNDXviU8mXqU1N67D2Nn+TIZhZIMIILlinFKvbQDgI74sLoMI PxnuSZELZkRF4SaV+oNIjOV1FUaPlhs7mVy98wIDAQABAoIBAAIVmYrDDBUizImR 2dFeEhLE1zf7k0X3OGWlhtxSs3aXYjTDd+0wb4HX+RQqU1+68LKCZjWx3NX4rKOi DBVw7bThJZTPsOMnWiTOdgJbYOn1WWZ+8wmte1z1Kvw3YLxfKqFdVnnP8LY0ohCX C1CT7NZFrxjlDnxdHYxWAceaTUpYms5+vw/XwNUtypjjqeU7yd5lZ8Oi3kRlxgDE tVJq96pzahruy7Xo+QOrqDd55zTzfm1lXA5ZQNMtSKOumSoJ9+/NTGkz9WYDOzUy jBJDzKYP2hE6moUdDOUqK0sWPFmcRvMcVgxlg9DRuNFWzqM12jTzEaFqnBpRUoJ7 KuNBBlECgYEAxUo+GfuPB4QuWy3TlvbheSOP8JN/wJm6YgsisncroYdIibR9r0BW dsaQmJEPKjsn86qIIT10AtNXlieRg8GTtW+1E3GPDpkJYnqcQZ5D/R2AJPNK7Acd JeOAjZOWVoD79D8sC6w98+akURaO7TrTwXEQpSIrl+k0aR/RQCThuycCgYEA9dGs AWA4RVU++eFyXiu3fc08V7Asgh2h991FUWckP3/kRM8P9wLBJkBvolbQ6OA+kpKy IatQ02fCw77ksathgv5X7dRwBq+ziBX4VSch/zZGXiQTxJdjDEDFvCOzmXmuTVgw ZXSXaQkTlOci+aknV4CHZ/uZl0IRa2gX5u6x1lUCgYBE+3+ZUCcjpqkawnxARdRy qeeTY8+AhX/w9hnMsvRzhzzqwUxM8b8JysYWQmo+Bu8iONdeYAFnV4RBgVZU7mN6 RjPXN5agsQvh/iMSoob9QspioRrqSlZ7v/9cAWXIm1L9hPUeo7wJwvRjUfLpqe8O rTz3sGnztNvZggGFXx/6cwKBgQDg2CT1qTYvLNcKpwz+WAxhVF2yc2FyrnodBtbF q4r7ThbUXXVj4bAcNeomWjSCHcL+PJIUu+eVRx5d/3idjn4F3HE/CAZkB0g23Kml 8cJl9xYMPAGc2z/s0D5NZXZ8llE5S8YQtsFbgMLZe0WBiRiEL/sqwHbvZK4cST07 rO8bdQKBgEvcv+EJfffxdmzgRaZQaxxLdvrFH63ArPa/CnTMsltQUHifZD8H5F/Z MaMXAN+tbbwcE3uId0UcwsSflJOCoHkI7fly08FYTUCzyWLcTrrntP256SU7bybf 1tD2fzeoHns+FePq5qSkmXyw9bKC4WaP7PYEHr8RZ9+z4tmmo1GU -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 1024 seed: YQ== zfec: required: 1 segmentSize: 131072 total: 1 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:MDMF:uuln4c3nhnggvyge4trhl3d6pa:gzzewapuozilqyr7jo26dxv4hmip5nalhwsztkxxu3dcrew3k6wq format: kind: ssk params: format: mdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEogIBAAKCAQEApavd9AgytDZu/VS2b253N8/2MWMUJJsAV+dXcMl1lXlD0yN9 Ljxay3m+ZIc3jJPqj5nDhxFz1Yd9HI0pJP224jJjlVHif9SpXzH9FVAsGIizYDMd ozHIZVctpX2ZbcQUbsskiGGnWwrDCrBDruVq+XnPbbGUpTMSFsXAafuKlvBG/zu2 e9TyEYPeenCz2+9A/Y4wIcK9z+dbKoof58R3XyQO20v4oD1cb2ohQreE778iEYJj gQCjO/EiYz2ATiEOlPrdD+R4NjFxqzfh8SZWrIDyKho7bMavvBM0N7Fa20n5630r Czu6jZNVgqK5qS46otsQN9XCUB9F37IVZbIwYQIDAQABAoIBABzHki9QFETPlvDN nEKXyUCKER3LtSZVwdXDY6J9cL52WiSty9NyGyCxRbSPc8QpNuxavQdz7fAoUQDa ec0KARDiyX9ZQfRMZF7b33fqHTrm1mhOAOZGYeZO4mhW/QX/M8B6bB5//lNXt4Ge FKfnhTGQ6kqHOIgJKumHUVFn58+n5khCgM6TKTOc6A4wqSfvF8Mt0eAlj26rPwUx sil+5uj+KMf/Z2Hb0/KBONLmRY2/dIwjNa41sxQ/DnW0Dae4PSKU0IojV3qz6mVF upfeZEVcod3Kn5jbHvkL5VpP2tFkGz+v9RPEQ8Ipt53GzaWMvMzKJ+nKKhp77IYL TBGp9SUCgYEAtf+ri1c29XMhUENSTfSKfihxBdlABVfrVCeVb+5TnB9NyGxLdC1W bFX9kn1pURR3d/0MJvgr5oLt7pFO3FmJ6cAeBYMadoxu0mKZZldnh9m69+hZ73ih im9VXLzKvwzVO7lpC83UMgJXyGs1EVQQ/fpCDlq8wEGhH/zIlrq/IpUCgYEA6Qis xm5d9lmhhM7rGWXjzcWDI5Jla1SGe2bNDwTyXMFoG+W7ENXtO5bdgRRaiByrb0mh X+9r5PI241MkseQIHL8B1yEgsxExj7aebfujspeAeSA1IznaZOSFWOxG/L0xpYQZ G6dlk5YTv/d7nKpZ0AV7z1rvOER+KLhpWsIWT50CgYB8fTG90h5JrkKvQB1gLVE+ EjOdKIleHlFd8uWI9qBCPjdaTJkgEpL276rPNPGBAFrnvDM/xKlit1RAxywGFUj8 lujyJEdJp612QvNiyY6Loib9w/UglIcKxjTBhnG4VPLWM3DjHhtzSZ75/DsEa1hk IXZlkzNg3oNz+djLIVn0MQKBgGUfTZJCHGYlmPB/tgdsboFBn1mVUotTv5PXFU7a L63Og7XvZ9CzdGGyYuZ7hZmhD0eYpP1zcNeFwAm/b6H+OQL4Y/0NtBngcShS78b8 NpnuImLtdgGWPk4f2DmxNlDAbMXNX/PfHTYBHwrjgvGF/rlFV+ewJzS8jB5xf85R gMsFAoGAB1V5eES3Ud9t38YeEcc4zi7amA7oiLZgAlMStROTxBripUuw5aFvzila S09EunEasURmykYqdzUoMAsIBlwiFS1Ky1pliR/PDgkX2YVM+S15bisIrPu5DmGF bbYgYCgKvkxL0p7hRTeu0+czw29He/T0pNfvr6/+4nAL47oCju8= -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 1024 seed: YQ== zfec: required: 1 segmentSize: 131072 total: 1 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:CHK:e5tpbcmalcq6nn2zhd3qvg4chy:emzsvv2xnkhhrj2oatds5hf2cney25awi56ybeq4ofiyehzyakua:1:1:4096 format: kind: chk params: null sample: length: 4096 seed: Yw== zfec: required: 1 segmentSize: 131072 total: 1 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:SSK:s2ort62uw74mbnvhqxgtclusli:xm4pmnye4mht7hoh5vcpt2zknqdvogujdo2ygmj2fb3chllp2ecq format: kind: ssk params: format: sdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEowIBAAKCAQEAughxLigyfdYXPUaFIYRop4H33/VxoWOha93f29U3uFfjeij4 GPTARKNbLaqvANxHkscAPvKSTCJdECqzdg05AowyX0dbDPrBDirDVb2FF3UJ43oL cFIrDSpAL/+Yr14mVfvPtxomdw/P3rRocY9PtiLbSLGF8u7eliBiUsITbpQUy8bw 2s25/uBlkWXaflolma67NyVS83tugO13PN1W0pBqRRhmoJtWm6Eg6CRJzP/PeKmO OhZ+ybKTGWl3Z8Wu6w5TmEDrp4zHRZ3SYyGXrRppGhHhVQY8awBVCQzDf8hoUt3b y4ITWSIS3DqiI3n3O7JaPFKMTeFknv7hmgWLhwIDAQABAoIBAC3RtXqeWO1IZTno 3BJox9P1/WSyUbXj0Za9M400A0DKmNL2M0EGSzK8n1DsmKTYHGLI08UtPp402oII WmIpmJCJkkCIVSMpwZHpM2ozPwwLfg+CeD3GZycbcGrCA8uxnzSPTFY3QSETCmo6 Of06RHJCPIs3ehve/ICyYjUq6tlRgKtLSWT/YcV/rF0HcLz2JOa2gmGHPlazLuYR 4xKm2xWzLAHTKI1JJi2C6eNla4yIKEuXtTK4qfSFCnL/L9UZuzR/jL6Eq4iRNcKW 07nRRWppySr3G7nesoFgNxDvqDB0QpE5oTuLbSSiZxe+55tvWHfgECeqedpyN7Vt b/CxNi0CgYEA7t4Ks+xEX+q8KIhEVsK3lncV1o1Rb/GLAmSqCqir5Aux9j6+WING emwlv9rGQBgRXxCMDaYD3eBXXFgKCIKcu0OgA4racaEZ9zySSpqChyRJKNOick8c WXsnsTv6yQtN5r9iT/QuTpDXawc0v6O3fwAj7U3itjZdeYuakfb84nUCgYEAx2BI cWxKvcRYGhn0ClwjLw0ZzLj2vZpwKbu+kJiCFVyxHWgqAo/HtYBDGDAKLi9Xbu2C 78Gdv1Xmgq7q9kLr91qH6hbIXr4fzC06R6kTjPmXwhtfKJcgagt8fNztKnf15wwF fK0rZQ1SBtp4iYM5yr1L5LdkwAImry/jtOAdfosCgYBa7+F3Of2V3pGfhLEvrpWi DNgdhFN6oKRhVt19jVVTTjiEHMLug5uzQ7TjY3CSOhdy76PCFm/mamAX4dSABOmS SV6DursLA2AVRdQT3trOhDvt9RQlHIUYc9BaoxEItOsFa9sLwVRXyMCaGHY2dyeG jBEhaMNkxzGy8jj4VOVVeQKBgGLsReVpCsiPU/tXZzuArcBZqrRmDZ7TstUyHwJV eS2qOQLTPQzaVAedJS0qINL7kFEsrWvSUDewIlgy+8fGOpgXJhqixTYk9Vf0FNeb b2TiLkcUF6nnGiEjo9e8MjyBGtRRpaNPtJlF+64E1gu8vX73X2GTEP0n1BPWGhAT pu6zAoGBALYVsjIE+zj8c91ybrbNGLZbTg2KKr8fO0/07C2CvWH6HHN+lpyRTF9A QTGHviaFWoSLjgjmty9o0BMlvUtB1NNb2t2D1pNX15fWVYh3ALeFO9T7iR762Pdn KjfScNMgtEqigVRUEclyOlV+bH5S3LsMK4D7ripJSRvcQfsJ3LO1 -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 4096 seed: Yw== zfec: required: 1 segmentSize: 131072 total: 1 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:MDMF:ftamrjwrnnk62rlturhbjwd7uu:b6qgy35eipo6kkzqem45uzxfl7prx25w7nevtqv76akv52y6fg5a format: kind: ssk params: format: mdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEowIBAAKCAQEAmQmtP2klQmq9icuQMvprLk5aXgk2Nv5lPWs+6Qf9R9Tft3YB CboH0scSgfb1+dYfuH8FHhuKD341yVBj2uGqhYPZDiGodMoAnGp4ZElvFc4qLDvP Fv0jixXMKYykT0cxwvWlQjCu7bQwUABdh32ZugvGcg6Kl2ubE2HUPRfPNSOEQGx9 DGHOogph8DNL0dm9xha1FggJLqFN8BnGk0nIvxA58oXG3xV8hsBJF5UV0BZcurFw 98tohmKmAUObYCtUl67nQixFQHl2qUuxQo05EMwRUcjRsnH/ywUjzu1sgDs0Ee0k OlMKPtT6go6Z+1iw7MEeQ0IS+rrRZmvwFhUCJQIDAQABAoIBAAnheBAUsn/RSHTc ccOjgMa37wRh7+ApGKbt2l1NU2sPMXU05z2WMenH9J1I2/ofew4nFVDWUlmhunNJ qh4jB9F1q86fxF1AqugZGmDPOkmGysOZsPXjricivHsfm37gMomgi+T0I7cxNRVV O19YxVN1GIDws2iyB+HTz9I0oVTli+BIQAaEdOOO7cM1AJhiUilqXcmCXBxoAfoo yEfchETAU25JgsirfqK+7QIbpcVB+00U9PKp1AM2ievPtXa6pmnuKZcSMjyEB9cj sNMuc/A+DnkmqpHHLOXhQCDl/ETLtcnGxFhp6rTeN0TYf4cEEEi+fFcvRV4dPMqs ttF2PsECgYEA0brJPSqQUUyyKWbdLJ7LpvWx0w3j8SRb2mj5Wjzsv2rNCIrCnwZ3 E5ABYrvDGG2w8Mvs2Rx0v5jvvms4vB/QmxC/bjqOXaM7DY7+6oezVLhGgsg9sMN4 1neTcCmBKdQTTjzvd3QzFm/j6MkRITL7E6gLIspqRHY4+kCmAQo1R6kCgYEAus0F cbqVoSYqM1M+GEI+8OAsTV4+xxVvx19FYgNjcRJ7aFfvTAPMtxOHxzLXcTWPS3rp vTBQz5XPXLw4iKC0mbVyLxqBjS7AtU22vGRWiXi1Y9THM/G7P4bHxiaGcoBvX+nu VzHCrDVVksT/2V8osreyaFp+tz3i1EoF5NTLRB0CgYACe7u2RbK/w7C4XMdxp8+x gmdAoIF6cXvE8klBkEcdXR5gY4Q6bdErIiFiEecVevcFYuTDDVs0iZMNJifd0mKd 82zQ2VCmOzCP0ImkLUcqUaREGCri1O2xXGkaguNMo343BvGu8GlKcri4IOlbA0eF zA+Vsd/gP6YdEHbmvEA1QQKBgQCkzJ+S+ENtylfMtBHCIR+aLounLhBAXx8gS2LQ 16BxbxbEtV9+NpPyqB5PlQEQ9WmX87YmnGuO1+H7NGrDztPGD5fPCplkzuxgh1FB 31uhKIcOrfeYUhkaMHQq0m4msjyP57fH4TSX7O2z8Gyvfw5OrVWOTtggHU7ybuPI QBub2QKBgDst8HtCy1kYdVFuDyG/CjReI6w3/NIeril7T3MixYGAC3zxfwFvq9aE BGMEWmoIg2Vq3NGLE1qHrwCrVvfiHQOJDC1XBiBzH2sijIHO9xnWwWLSsOspMXMj fX32WOapE8++CmasDj9ABJlSHbQkUn+iHT4ncBhce52sp3xE0kvf -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 4096 seed: Yw== zfec: required: 1 segmentSize: 131072 total: 1 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:CHK:ok3slnjd3e56za3iot74audhl4:2mjvrb455yldouoxwzbx4sbsysowipqje6ifa4pmbzqahj5j4mnq:1:1:131071 format: kind: chk params: null sample: length: 131071 seed: LCa0a2j/xo/5m0U8HTBBNBNCLXBkg7+g+YpeiGJm564= zfec: required: 1 segmentSize: 131072 total: 1 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:SSK:gayk76ptouc275r3kxcb73gpxu:lsdwqdqlno2gkdldk6s4wlvbz6d7qviuotfg77t6fquhkx67go7a format: kind: ssk params: format: sdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEpAIBAAKCAQEA2d5GQ9jkZXW0mqMtvm4qVD5nq5PR8Ni6DPhNk9+qpvtUTz6S 7QBFOkYz4Sbuzm7HYk40JWPDpTaBBUi2tB6sp/HUUQetqUeh4BSZz1VwB4pkcOrS vr4pLeZyos65U/lB2RMUwcDKEqtbDGMKZjBY2ElYUpQWsPsVB94KH0/nTVFRJFVF DrsuTdqwRSGoBXfKuL4MWWJQY7CUJ17ds6ixizAnRVAldHCWQvrqWIOaekLi2LAC OFP6w+g+fQKzNj64mH9Z4sOHLHevrU19hITv2HyjvhGVV+YACYWHXFEchiLr8p08 CAWktc8xobZrAzeqVBV405TJP6FooROpwidqCwIDAQABAoIBAGBGZXV45GDjdUY6 Yq36n7DvjL9YyJauJvUxSpLUbWRxvq1wANxDWNQqDqXpnvDRKkGKPmfhYKTi43vS yI3q2jAy4LhX9MlP0rwjTl69Kg7i/ISbeDp67NaQNcs1H9d4V71Vvb/q8kDywP7F y2Qh7DjTnHiNYUOfCImb+IwisptKyUeER4b4kqgREhwBkNm3O7EcdDn0OK0zBwTr Je8dnJ0tLRwszDUe+cSiAUY5fBM/UMq+Z5IJikk55FkLkvr+oam8Eq0s0oR4z4hm NHldRoFu20npfPM9SG7acViLWZIgD3YYXbF9LkBlk9X9ajNtSwE3QMtNHa8zko99 M7oKqEECgYEA+h94ts4x03c7cpetfvA+KM6O2NbWUmKo9ZQkO7q0orTo9FEB75nn NXZW//f6OZu3TZr+3yGA2F52rbtO97Rd8yIncR1ai6nd9THR9d9AGEmBZBM6tjvf dVypg702tUGLU8NvvmFkkyUh9PD1paES33dr7LgbTH2KXxcaDts3Y0ECgYEA3vzJ ORflPds+Px9Fb3vO6ZjAoLToGrhiR/gna8vm273Otl/WAXiDLHah+wCdlUgmGEm1 KkfckVuU2SGzfBY6YbQwEMAlZt1k0Z5DrYSydX6JWkJMqklJa7jZ+rGO/PsBatOo fLy+G+ysFfsqzPgbqEN6ZqvIQeNLNNgABU+11ksCgYBB+9VznFfGqpizNVJev3AW gc9rYtmtaDucdZVNcIbAuasO8OPq2pYFI4/1/Ow1EGA+B9qe8I62Bc2XLWe6rwlt 35+6Fn1RhOF6EseJ7nhRL+sDhJMjig38PxK1H1B0ZrMjyNYMylKnAs+/d2XGaQS6 kR2WmEcTWbcMOOL18lzdQQKBgQCD/sr2Wui+Juu/3bjydy1SJbPQ7YV/W6oBxClB rB9p7/9PAYfisv2i8k6MEB834M98DRWKg6NTAA0qQsLGLzo737ecEsGRFHi7hJ23 NxeUaWTQ4vIS0vL9Kx0NQtHLeqGqJMRVojw+t2heUqFRV4S3o8nuwLz4E53PGBVb D4Yp5QKBgQDxVnCHap5zYzzShVi00ygZcT+rBtldcrsj8Kjm4KE6qafB5M6R3gM3 bFbG9DSE4hnuP0aJMrLEfmuU7X/nxuzU1JRXIzgFXOS2hoT33LbZBkYZrk/m9AcP QYt8oUu1Ad3WLADavwwU2ZeQiHERomK8b5FyfhG0x4cu5zdWiel//g== -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 131071 seed: LCa0a2j/xo/5m0U8HTBBNBNCLXBkg7+g+YpeiGJm564= zfec: required: 1 segmentSize: 131072 total: 1 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:MDMF:ko4vxyhbyhfzkmucaou6leumvq:ysb7buuikg7nktiisrnm7je7nmdcqr5qxnfkzizhc565o4j2x5nq format: kind: ssk params: format: mdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEowIBAAKCAQEAneiyaICnfRuLwEd0VV/xNw+mB0KBEIJvVhmfYK43vq6VLy2w qPOLPCx+bGv28IRU/0CKkFEN680Ww1Bdfv3DLUHisid3ISUMweEqQW3tKabOHgqZ ieOQIEFJ0dLFe6QnoMeiE+9bGdgtXi3bWzhV++1W+2uAdcjFGLpjHLozVstEGP7e +So1FQt02jTRp2GGgfOw6o4CKcwGuXmoud2dsl9j1dm2N9Ov7gJdz29I802wjmGI DZDSiWtgN90TU5480UENMqgMJ7jgLGQ3yJdQZDeDopQZ72RWpoXhuJWW7hy1lXwa vga1vaxpy2XLfVS3fVsQycWAKs87g50Z35HoMQIDAQABAoIBAEiAzmHTKJP18N3R MSX+DlgUtEvDClWVPh+PVjFi7K99o3vtA58fa7+uQkHv9IsMh6ZHcRfAT58EJL57 COLFCzedd8QLANTUGR5wDyHJokosj5kVjtfUB5n3wDg6CXiyr4tP/igfD3o3WuVS MtSYckpg+D4qZuoty+mFsvo+uFHR++gzkeS51+wvk+Psh5kVkTJ9eWD7WdNJbHP3 QKtrSWN5zqJYkSI6Jn7Ur1olKzFpT6QkIwHIAmyY9brwBkILdvPBtSTVRLX3sJaT XaX5zd3jlmGgvzBovGJc64jlSJWl5lhb0vA09wq6tVUTqlch0yBWsnIHxrZbT2bm /J3pRk0CgYEA0LlCapyHjXUD5GJ6wzXiY2EqzTCNhDaJHTpWrLCAa6jkZ2h4du9l /u6uiVZSj7lIxy1UD/Skuiz5xWe0j+Oo2P5XnbzAA3mr5lEicqbo0+oP59mxfWJO KQ7iiIikcf7s3V8+H07Tvgwc6XAQ9rSAWFxU/0zmSDskTRSFWcxT878CgYEAwaz4 0IwB7NELsJiqEyjotGkeVxw7bN7F6/XjIU+TuMouyH768KqnatYBPvAPXSUNZtX0 2Nqp/WvOGrL6wh7GCX0zj/Ro6OkoVwmDQlNPhsyl+h9yvTXO/SZOhUzlBoU/PkZE 1ATL2H/Eo43iE/9EkREZX6ydQwdlHuQaHGiBYA8CgYEAryY3EKWnrlG6YWUuZS+L eR+pviP3LTJiXw98ek9mhHFmsUvegtejvIjoF8FDaO3vn4xvFTCTJtPlCP1cbL57 CxRry6b/bisk0BHXmWRszp+El2d7ZJ8gvZ2LBU28yRhGBgINbFJGpx0dCdsLsSqI 5R0eClqqh4Rxkukced1XuZsCgYBGy4eYE9WQT0nKn8NrhYSqjdI9XWCLh1Mp0ZPY 1VHWNnGrcF8iIf9YmimSbAXxsl2XvZXmvudsbz/DmrD2zHDvfwieEmVW3gOU7TFB yVpEmAID0AMNDuI+vwXszBLbs1FO3jjCl4478VhbwL1nOeRCctGnm11Q5x8bj53L zJeeGQKBgF82eley8ZEdCutxK1pAuP1FrLf2Xf3k43+sf4XviiONEZqn0GMUUAR5 1L6QOK6DolBs/8eQh0qWAM4Jzp1ab7k/ju0I4rXZtWPH1KChdGSh3GlF17KgWxTZ Fp7IgXxx2lqs9WCffy7BHqa26oASZlG83r9bbVgXVtgDyi1QhEQb -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 131071 seed: LCa0a2j/xo/5m0U8HTBBNBNCLXBkg7+g+YpeiGJm564= zfec: required: 1 segmentSize: 131072 total: 1 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:CHK:worle55uksa2uqqeebm4yxnihu:vta76jbmejt2pxx4prqa75xawpdtx42cmzzhappeetzjksym37wq:1:1:131073 format: kind: chk params: null sample: length: 131073 seed: /N4rLtula/QIYB+3If6bXDONEO5CnqBPrlURto+/j7k= zfec: required: 1 segmentSize: 131072 total: 1 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:SSK:p2jakp7rmlimr7kdcyv7pmqk24:scsdwkdc7jmwgvz5cpttafschwome2dv444rkq4ra72wwth65tra format: kind: ssk params: format: sdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEpAIBAAKCAQEA6MkCkLlqIfmB8wLFgZ7c4ValETX48Zf4zJWOAFisLzvSOUJO lPHTSZYLA6zqMqZWYrvrIncrm1VkqXkQ00lKJih1GPXGXfTHZub8ZU6dUHySe59I r1QBOdwZSLczBVAx6B2LegL14IC4XW6cgq7aJrm1NVCAgTVaE7TgeKBgZtCOfj1D 28IvBHxp0et6hJXMtNle5nbpg9VcHWWPcc/rbZeK+tTZkH8WAZXkyCJsDYjidr/A hSGdgZVyoxdCB3lRBAdZKaojxbDMCKbT852KzlkAbvQTk9XaANbxaVknsNCzSfRi 0hIyFgRGV8D6pYzvPFTFAzLvBHNO4M2oxXDzewIDAQABAoIBAER4o72IHFTD/FpL jWbUIKpTfxgx5PjDN9aNgwhNDNCT6wEWpOCgxQJXFQROv1CIps5B8ibgIL5+/q3u w2kynJHewprF6ERItMJq3QC7gABls/yS3KFdt0KaoAFIicRdU2CwGA+agVI46oHp ADZcUiSj6U42UKYw2D8FrCUvH0v+Kt6I8W4iIeuy6P2TFmp5/M/AdzVxz9tZPeHY s74aKp9yBnQJfoJCgLt7RF8gBeGLPyJVPesf5gt5GKHHPryujWOEnOZkesHl6uyW iHI+x9k7yrypYwQVIH1wxklUFMzqmKF2qyFcgLF9TIAsDZT+0qJhc80nWeeIQ4Yr Z3lL6AkCgYEA8oqywuSnDHkVJHxhH33XF4+VsjtHnPjh2K1yPTBBiEzOdD2WyVgL ZmXXQmh4/fbc+ioHUK3GKyoqW7XK7wSLwnBDLZ5qGybKquvDFQy+3+jOYfz5Ux/v m/efDP6gAyLRc08hfEnnPf9u3scgW1J1klN/jk1YYW48GzRqnYvpQ5cCgYEA9bO4 MAx6vMt7ZO8tNZGs/Pe5KfDaDNlyzMTKAzBpr0thHtbY3KhBtpE9eWZBvTeNIORE hmmZjiiGRcEQWDfbxiPeTLsUdbK+YmrkBi3PFrsMzcvDsmVsFag0MNQ95uiaDGmW xhMzprSQQIPpOlDQo17x19e+EQjVbuWSZF1T+70CgYAPS6nsWokiYzxvGZzyZHg9 FyQEonJottVfWcjbjQCE/PsEH6IzvmaxpXiGypnewkO7Tw05DEx0CmuzbuqGWk4K DBRgex4L6k1brBSYbj5XVpI1YcPDdz8gIeSY6DHlILv+vp3I/cwkf8hZKkujFrct bDCeI8iQFGib+plCqEEkrwKBgQCQYjwqgCrgJvMsLSD0CdcOEMTO0KpSQrYjfsD3 fsucJz+7T6XAhV+YWWE2pdCb0LkuSvW5xvRlhYriEsH4FVsg5JMNpCIxyAf47bdx qhm30dOEW+l0PAV39JA24YI+3xEnmiTv4PjJTfI2901m53azimez3yPh/r7tnBqa v/1KQQKBgQDUdKGfCOGGLV8Mzk41foCzT31r72bO6niNyqlGrx0gn9GHMN5/+4oF BJExrdUPi5w6FsbmYXqVUPfmKQER6QBitALbH1ASMCg+scoYZq5ic4J1gU7RWB1r F1J+puZLYmg2Yfcy6l8mHjEez3jNCzkZ72xFRmo++UB5VGkYXcYqJA== -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 131073 seed: /N4rLtula/QIYB+3If6bXDONEO5CnqBPrlURto+/j7k= zfec: required: 1 segmentSize: 131072 total: 1 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:MDMF:rn2dx65oniewqh2lzqfcosqr4y:op6fupeh5cchglxtz2qmxukfvpkltcciv4emt65j56irkloppijq format: kind: ssk params: format: mdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEpAIBAAKCAQEArrgsDZFLXR+LmZPnzNSaqgxy6pv8euDq8iTLTUmF2HWiBc97 MH2Tl1V41pSVY5a2bOoRN38LPs2Z0bP6dKDgaKNayV8ykWvhzlJpRMLP+K7XYATB qyVE7n34TytxWdHM+gHmlf4laTbQ56Ts8yZ1w4eINMaa/0OAFgbXBJZTElrtYlAA 3ljAU6HDDPVoOsXtOsNXTsZbMssadi/5ESEn/EzQw3muAvDxLV6adEXk4/1HiY2j 9EFLXUbKFWpfEdw13Goj385ok7/iLNR3ExCNbc6RefNeZYIY4uJGkWAKV68HyJUW HCag1gn0uoiEfc/1ichkvnJUytj0iUmK8q186wIDAQABAoIBAERJOv56gOf+gkMd 6YTGu0Qm5WsIWB92dJz7AHJOf/9fyllXBCCpk9ubzkSfgduQdfAPc3crMivzk2DA ZOzcS5jZ74uwp0Tq0zAeSYJUWZAqVYbzlPXc9Rn5JelbY0vlkaVMxOBumhSLFg7T CMDnkTNAb12hGccWjKMPPU5PqlwLgrUkwqEYrnNAXUXg6YTNaulW3BYi/gKcqyJ8 zinOFLL3gcQ5wR3qX41YLe1fwGPplC9TFuB9xyoJSMN0PJkhEGm4GAFdL6mjJGwW 1HHmEfSeHkvkAlzrDRVR/7e0xSL/nekJPY8wORLQT+fRfF+bhSCVeB5BKOFBIkIC PVdGNTECgYEAt8vEqMi6f+rm+atxOEScOpPyFoU6RDYIgmM/hGjiXp/UCX743fIj pujXFMbx9jv4LnnEjsd2wcx4SsncA/x9Uezk70+Y+RzpqzqiG7YNDDIQU6QJHh4p Bs4v6XOIdhzeWHaNav5ydb361Sm6Ppk0wxfT49gnxC2PBPd97/10++cCgYEA81uU 9gocgdBignouVx99rL9OaQ4orsSvG3NY30C4LNYabDM3qMpYdZY6NjR6J1lDZyFD X7LKlK5yKOUfQYs1qFIFelm/IT0ey1pEa/JtRLg23c7bGMqAh27WLHxyC86BwkiV Z6srKR/rwWVV6rtersvMzFdREetn7J6ksW4y9l0CgYEArlEfwukmIR5cVJ2qszA3 cENTDtLq8TjCF1AkNOP5Fr/Frf/z7ySxdaNOIpGRePVlrEanCrfZRXM0/9G1zz4Q abwhYWt/7Xzjjhf9GgUvGMr+uyVP1HXMeXzi6io+Wa+FnidKdxi+3DcECFocHzTV Wtdliqo/BQwkohRNKGmeIy8CgYAoNCoQ16zL+Ww82AiN1iMCBfzjODaaYN347/5v q5aBucFVvMRmOz+P9YiaaeMAWyvaftFNnxD+rS8o+GlIf8IWk65Z/zenOxy6Rahm GP/aSYCu9jyWBOZk+Xeik9CXiL9BJJKiNNIFkkN1iNM/20KSKBMmcwpupnBd0/ur YGRE9QKBgQC2bvTY+NYnwB5fVkyswl/Luy2B20Z2RCBWXazFfVQCFGQ+s5cMEUsU rYVRM93RDxod6fMgXRPKYs6phgByLVi1bMRqJvFTx9/9/dtkjRlX0S8WETLHEyhY VHf/xJdGHph+8wBa8qt7vVoyX44ylAVWgXvmf2PTL6qvLKu6OLbUmg== -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 131073 seed: /N4rLtula/QIYB+3If6bXDONEO5CnqBPrlURto+/j7k= zfec: required: 1 segmentSize: 131072 total: 1 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:CHK:6kccrgbtmmprqe4jcfi7vf6v74:wpivl2evi25yfl4tzbbj6vp6nzk4vxl6lbongkac7vl3escvopsa:1:1:2097151 format: kind: chk params: null sample: length: 2097151 seed: uqWglk0zIPvAxqkiFARTyFE+okq4/QV3A0gEqWckgJY= zfec: required: 1 segmentSize: 131072 total: 1 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:SSK:k2opn2p3sjpp66iipfj5x7ylka:dcrrkpbqjs4vv4r6hrfnftza47zkhsrfbmkavwx5gadbphm4gyoq format: kind: ssk params: format: sdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEpAIBAAKCAQEAwoi+YH41KOcNqUaRWqZGXqPyG8Zs6a8fjTu0NzfiiPPXiBhe fCd5fHKHYXTr6W/YCqE4c1E9i2JDNtFULRrnLyjRwhnvaeWizeNN6A86aupuNN1c Kt0XxDPDEgR8kxCh//kIgOALQs/QxRK8WMr+NfsV5xWLDXzPrfftKWvTmRaGRuPd iuvVzFfpfFyQXHRZoSr5uxTIEiEjJWTEvWv7Qxam0mQp0FeGGhcPu8g2zDbrG5uq 3VEn4X6AtFwQOFkvQV9E8oFtFm9mtI3G8Zx0dVQw3OMM4/2vIrloQ2PwzymEPvx8 Vnec7pY5AtWTssnihLLn9ZJCpcVr78gtlTAvJQIDAQABAoIBAEUSlLZiWbHKSJmA SwAq5fWYtNCT5fwbiFJ3jofEuhTyr+bM8E+ZHJPJfrRl9ZYPdmBf2lFn2ThyjXcb YI4bbVbbYY9P8ihhtyrvuKvbLFzmHHd82csGcffigTMWkL0PPNZMsG5CHv18GJ7B Bkto2FaGbsJ8bcE+PeeBjp4UgS9rp1QLnFu5CFVEv9gju+MUDNWxWptBfLxlKAaZ Wu1yGuDa3nWY/JhP7vd/Gtum5aDqsPmI+8JCma5EjXgdJkG94bBBFPgKdkOClh+L 5PSnN53RwuQwbKdFq1Py3CNz3WcrB2jq+W8EaM4NQX2fSp1SRoVECuk7NqxuM5FD q7oH9MkCgYEA078uAEnM2YovbquX602S4vnoZTtfLhqFrisrJwZfaEj9HK6mRJAi xsWMK9a3ZmI9v4PnWzYiNjWr9SJ3RP1AmaCuIjncjI1iN+sJfGsGXBLYtVzRuuvy hPkSN+kIcM1bJ6vALz1XMPiNnJEbtQOyFLNYuITqFE1oKG8UTZK/+6sCgYEA6zCo ao/w462g+cK2kBSrB7J6tJJpGTs+jrN+W3p8le/OKpPI9e+8nwlnJ3TVThhAkeFN rUhIRQeG0LHVrIyodzJXLPZNMYbzr5EaGeTdjoFQl3GvfMoN/xX7D87F5SQ8QhWp oMvKEEVwtdj5vaEcjurefCCFrpokc/r1FGxGMG8CgYBTxnrjwE8c7nAvwBIeFZX8 2VUZ4DCbJAEp1IiBKyNKNj52P58m10P7EqOPoKb1Cf9WK1C4pKVKf1emZ9l6YRxS 6+CZR/07WqC2cPZS1GEywn4c2zlbVAiilYygtEETqBvdiTVDO2ioxl34yOyGZIzr Zb2/W07lc263OKN2wY3VewKBgQCNq2i3j+8l5m/iIvT/g0Omxk79uHfQeAxtvxdt GTI0yxfgM9dItdlj3zEg5lKa0ScL+LBmofTOiAMgcQ7p+mx8KHm0nsTPAaCGcBxN 3rvK3IBkSVnRDJEzx9TMp9wy9AnMMOpV7ovQE1QaZhHBtWvTdwz/rkN4cmdk6ZV5 cOMyxwKBgQC/EScZHM1YPWvUatfF9oQrzsYBYTXk14RQNvuW6MAAK8C3E2REnypm iZDeMOyYzxmJ6r3sT0YIOmRdsaSWof7Rt7x9aeMmG69KDJL0jUsUhE7aK7W2H2yB Ax8MWdGte4MuzMRXpjZh6R3P1ZZ7xI9/O8SeKVnZB5u8G8zxvY1XUQ== -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 2097151 seed: uqWglk0zIPvAxqkiFARTyFE+okq4/QV3A0gEqWckgJY= zfec: required: 1 segmentSize: 131072 total: 1 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:MDMF:2wd77rwew4gfhioljcisf6jr3y:4nzqf6vq7w3f5pw5yv5reh4isfsxmjpkd5dvc3swrcwm7zz7m24a format: kind: ssk params: format: mdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEpAIBAAKCAQEArEKiDSb3JVVLiePghhefdt0bfKlhe0LHgxxnpBjkRK9SNScI iwNB+PdJZHJcUSCrGv1l4dpEEhXpykuhb9ckeySFYh93rtS2c9Z3pKYJ74SzkTIh T3x2B99H8uIeKEtbs+3wt4N5gwedYIfhxfhIC+AZcMD+BF7JyAermj9p81kqfRgv n00kRQpozitb8Vs0upKX+T7n2F+1JDrisJjTT3yDxnzwQ9fCGXa7rBOb80tNbrqV M+LpObdZ9Z6TKZbTKoVK5KFz5zSCiIx5usnTJ7MeFKzZQbPArsH+YkcAaSA0ctjH yIIeTXuCA+oyO1ep1YLRZ8YDqW3/GBSQ1sbqLwIDAQABAoIBAAEOKXL+LZ5vbtaJ 7Fq181allSEG4p+AMbu2ZVYmhuN61XyY4B8F5pYRYcQ5RSKEGjhAnCqtjSQ1OSjx nKwUO9d7SkvnGHMcP3/nLPV3xPDQNc9qaLTVdN2oewbcZsXlbErmZsahChII7mhk fLgbRMm51j4QGJ5LPF462czInawNebeJxMfuX1ycZsCd5b/hCxhhn82CnA1LeGWQ dl+87lyCJvTb9gTfKxzKI6X94N46zYz86f6KgaMDPazZz1cbuRhMJsRpqFWltYEM UneIEKEIm/56RtLnlAKZOIAI5AJGpDoMWYzxzlD3PwXyFfP6PigB0SDl/nAGA2Gt wdoSzC0CgYEA2ajpn6JHoRnYfgi1gXvkWnU6jZMfYNeGO8kjSvTRok240eVMiMR2 l9dOe7qRglNuksW6gCv5XlF4RtFJnugf98PQ7Hq0yGvbdkA9P4+jnwuJZ13G6lyy 75cFDXpx34SMSfWMnZvHO7cQuPe6bTSj16LGFiWWVIWjSpJo6++zwOUCgYEAypp8 yO1NYBxcuVhVa6hJR6s4ri3AKkrMR+tIFyClBBgSdkGFSK4f0fnyaU3xxWQjAZFu ZlrNDj4VMQ0RkIWl6RjCWt2yzv5ZRS88Ft/kG9FpI18N6QpdpKqPSgILTCThYj0r Q89pbHTnIEcYj77Yt5P9XMFPq02WKbhFm3JMEYMCgYAgaHQQnrsHk6+WZHmSA/5m MZo8RjGf27dS44nruTQVdHkWV4vjuUznItm+tnK/8ug91k8EkoeYsYy1Dqhljq5j 17Yd389XICgXHU2BT0PPhIo6582cS50g612HOjoGS9gPkw4S0YUCsSk+QTRy2imj C8tutL4Da7p6ef5BUvlyuQKBgQChJoBa0WXaLLUUN8658wFWoDpVUM6o1RXnnp2Y x755oywMI9GAHf/xZH4MhJLqDtxJQwQtJcw0p+zzNxHhgmyVG7x7yhuPyX/4J7oD 99HYzphyKglGc5hIgeG3XCjgR/V9zmm7Zh7UxaeRZPuEWqjGXFAKlzhnaS8nwAqd uiwHQQKBgQDCqKXQa8ivL9IDscqW1UNK3HPf3JU9GNy1OkcIhz9HotoooXB4ZASS iV6B4tKev2scNA4KKU5KvY0a893ghsH9yEDeN5MJP/cGCceh9+IhYMbDqmRmIv3W 1uWtjpmyRGk/rFBDf6jxWcXKEVXAvXq8IQS8AJ3JaatJzwuasiIDJg== -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 2097151 seed: uqWglk0zIPvAxqkiFARTyFE+okq4/QV3A0gEqWckgJY= zfec: required: 1 segmentSize: 131072 total: 1 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:CHK:ofgig3loex6pev2eymmvohv7wq:yasbfedqnueaajdcavuba7kxbdqzn7e6zx3y6cbvucgx433vlzcq:1:1:2097153 format: kind: chk params: null sample: length: 2097153 seed: BTBX/ampNfLU+ox7xipBGiaSbgC0kcB8Gy7BkJB4oKI= zfec: required: 1 segmentSize: 131072 total: 1 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:SSK:eojgidosziluemkir3zkxtkwjq:yvdxyopvbqryaeslumwlc7x2xuw2y2wcqenys4rws5xslaox5nra format: kind: ssk params: format: sdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEpAIBAAKCAQEAo/r4ebWzsWZmdDKCYXv+1Fh1/0xyPWR8do4BgQjDG5vsJFX7 gLwo+/v5JuvE3k+aTMDp/VNFtgvSd4t8WT1QTYJvVQakqcIL6mZVVg7ZBc8JCvHm D3wtc2my4nqN1VLw2iUduLmFHY9SPMxRVHafVWNaD8MAhDpEhA2Daz442uG73/70 6x2LeOqscoTK7/NREfUbeTK9Xz5mFMnyGSWppM22+mDf9/3nCTTbfq8WLafMcLW/ 4X74uJMRE4uYzUzwiC0HyDZnsNAuU4ggjzljTryCK6emynSNHGAX1dkBccvd2Q0w g2nNL5rbsonjbaYHZ6BLWRUGbh0PMm9vZcL0CQIDAQABAoIBAAJMkyYKtgalWcek iycwgL/LrzjZgqsJcQTNBCih3bFyB9cxFO4GxVjWHTXOXbPjwBU2Kbm1bI9rPkPS kvdh2JHDo8m4hn/CUp4yWd0zZ1fRYa4zes8qBa2d9GYAQ6OTboDSfyaPRFZoI2Yq k7PYV27QLiu1J1lTQ1FqKpbq+Atak+OEn6n/y+1aEztaF7G8o37W+RDJUUhgv4+P SrOVXVbP72XCW39JPUBUPu5Zb4kqmWPZwh32nFZhWgv4dsjC4y97G7fluLhBu1F9 rDaX9OC+jQkgOq0znYAYWpaQQXqTJcluCOuk0RZHbT0ZOLdCxUl2Ar0CCZmDLXJH +3++gJECgYEAyMrvmQRbCZBJ2934mksJ+i9sYWBoh0E8yPFp7f7MFZM1x+PLGINW Q3qKLjzpFThlAHVCu5cHcWnnqCbceE4aaDUjJSTEn8MTDsvTleF138ARl5iZUCGw EA3qk95XyLz8QzOAY29i3SGdoCF5w+RciNDMq6eDfvOgtpUakHG63XkCgYEA0RDy eRXjpZAr6whs1Z+zDUU8zLDeh7Fpp1xSDElqR/79del8LkoRQNf5e75qRXcw4YSn cAxGewQ68d80KGi43j1vT3cyyM4qdA74AslMR8avtr14DZ2B4ZucgOPxEXh+Sn4J y6uXA88FezhW56WRo5QhwDcUwWD8cdbq2sffdxECgYEAuDxkTc2OTp8i9eec5M23 cOS0mECiiIAWavX/0Uz6K02wiiF8R5iyvSWhxj1DTKI7Iol1WrSAMtMOqN5HaueX GUiwX7N16nVOdkQVdy+xPUV5ntR4z+qpJoHuXBEmEN0+xdZqllIsr2PZwoUUueY9 9ZdIRLRmVkTqkL0M674+zFkCgYBInxdrAjSCdVMcIGAV0JSUvoFBJQsXnYPy07nY dMQLYELQ65rsRWQ63wrVdi5aFO4Q2FCFNm5hGvYSwJLQmpfk8vgwZVppsFvE1tJF vYDWyeiNlMEYEadlI/W4O/WoOO+9lox2iBM8CnR/+fEOJFAzvyf+KeP2zGbEcIBC IP250QKBgQCwFmCfg4/EBvU9lQZJkGZOdEig5Y5+pqOSZLkOYxTb8b1pbDF3hfZP CjfKpC+u6dUVLC/glR+W0csPsAPVSftm0cHNIHVLaHExeeS6cFPJ37Tz4TsOWoxp z6hEblbGV9C0pyhAgu1dAJAPRBpzzIZax5K6EPIGqFhEkLUUL9VocA== -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 2097153 seed: BTBX/ampNfLU+ox7xipBGiaSbgC0kcB8Gy7BkJB4oKI= zfec: required: 1 segmentSize: 131072 total: 1 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:MDMF:axafhf5i3oi43qvlbi5crtcn5y:6y23ckrgvhzeudwqh7tl4lx3th2wogx64c7oxzu6q7mbai3wfuwq format: kind: ssk params: format: mdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEowIBAAKCAQEAqaKZBqNMZdBBYFHtrwyneRRmT+bj/ZMXJXupA5LgTIkFAgeu lOCUknGhdmteXd5KJFpCY+/YUYdo2I61BL1EzfnCGTMyQUy76jsNGVUTewV9yFb7 ji34ulQ+va8O32F5b9e9zBFJ/9YEvzlEXUh5Ui54LYcJBSK81Pi4YBuwfMhjcAGF YRSGOV6yFNu+yVZ4X3M5BG9dEdSS9D+RWvSQvvf4it50MUUZhB42ZuxYceEQQ3VD /yzbeAA7VYkA4x9Ogi1vjaoM81edSpzeEL53y9uq31sJAa2TWJ7+UuvUuOHjHZa8 wBo0k/ggvMU5gKYf7Uk1omcLu5bjawUuqLQwmwIDAQABAoIBABJt59Igy5wEFBYy 0G/EIC23WaY0n3BdGpan6KTKwDOQb0rZKs5h18JRNgl3gLkR/VwVskDJPecdnvZo CqKJLYBzMfMq7LrLa173K2UlKQikgBXT3WCE/hDANlDFRAhgvqC3/5a8Ch4RUlK0 FglMZmHXSpcnpIM6UumQW/eVCoeD0GzG4RcUm0VAPbNXDqekyVZdc/AYO7FUVW7r ATcye36CT6AqiAP7OCOyk9LqIxJluHHqvci12Ab05gBsfR6ihwNkGH0zYDVuIRV8 6OBxpPsHO6fdp5TzxKEhh5Mu61dMjhjOYE7gp8BiQSTaPtvHxcAZiNuAb6L3TtYS dIiqDAECgYEA3WkNFq4ADXjJ+3ClmYwiv+AX/8apD/YOwcozcZlUUmlzywJ4dVrM kFpo7BA6iJWeFuow99T/IgyPT935D2miFPW4uTG7JxNdFdL+QAykSggiSs3CH+UO TGIPJf4qFZYZjlhfzpyjfiJkL5qdT1wDabPMhVN1eC5RWHplIuzv3WECgYEAxCLh hlwB+mlMsnxYlzD/Vbk3L4y5y+bYjuPY61OdRTOnVknnI7WQQwSBPqK0eR4aUIRM XLlLCX+vvBpG+I2i/YGzkOtTqk3qwyqapipEjfTCVuWKjGXZnID7r1ESVslUgXMT gqjk7aId9FRfAhaXJrJSXwM/agJhF2cWLB/qs3sCgYAFtMRilj9oGXnTIhcSevsW SNc1f1AyGhxNQEHNJq92pEMYs0qZc6qb+ciEdPKdPIXjf0udx35/ySUUYNsfW5CS y7ZkB3UUT7pxaouk6O5+/fCsTts26TdSHqDXUNKS1dh4w6xMbdsE0CwW0fxF5FAu NDUMJpd7bm4oQpdCrCqOYQKBgQC6mfD0km6zXiFBInpqhYw3c3pke798lYjtESsc YLWc1BLdTnxghenVSODpxYRsQs6IUgYQpZ0VUWzRSjLBYId0JkS/mJRFz9GNkugF NOt6HyYR5FrXTkMl2bUaS8hl1y/V5LToN2VuDDHxdZ/abcGqaDdj+8QGSHM0/7eh jVAPJwKBgGbuMV/ccKzDXzlYhj9ek20gBfq/kdeSRY/BK1oIWhF0wb4G/S0BQSGQ AJs6tBozgrN1zxI1PkJ+J0u9KNtUhBUV9KCVSXv9IxzOtEOacFXmIvJL7hDYKMwe 52PBFl9+WDE20km3OYtlFmezu7EvBNxEnEAAXZ5VO/0pxiRCLoq2 -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 2097153 seed: BTBX/ampNfLU+ox7xipBGiaSbgC0kcB8Gy7BkJB4oKI= zfec: required: 1 segmentSize: 131072 total: 1 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:CHK:gsxbfakyyvqrv4sqvzftmoq5sy:4hinuje6rhm4myffxq6xwaocimd57c6hfutcbqql7pgd3rdhdnta:1:1:4194304 format: kind: chk params: null sample: length: 4194304 seed: 2NVWOXvtwcAFf0J/g0fDC+ypQbnCU7awZoCxcnhpkVM= zfec: required: 1 segmentSize: 131072 total: 1 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:SSK:pnpjd2iuvxpslyhy7xzlbnl76q:5wjat7x422zdadbomr45uwbsurpj6ezasplybg6b2zkl3avs2sfq format: kind: ssk params: format: sdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEpQIBAAKCAQEAmx/L+VzqYjeGT9QCeDb6DlBzSwGM5dZ8hkR+1JeURf5GCdfo URGstMbQQWl9RvHNywFYBoxXWD2Xzk/OVAl9cTKtagDGT8MfR0X0AfXQ7CfzZeV5 3ThNsF0X2qd1DamOiSIGm+RP8NaqginLYVqNozMk47iN2CkSnl8r3XpatrIvBDiW iY2o2wWkfYCcDAbSb0jVY23y94t6JApqPSKNQ4UA7BI+0oWt3TI7ucA379drHOO3 7d4G3kVwWxO9zCi/4kV4Q90QkBosqTdC2rBR/Y83FMaM2hT1a0iypnz+0wGGHkD8 PQ5gmHS2fwmN68vOWk6j6nv0GGB9WukH0Q9ZJwIDAQABAoIBAATz1phHKM3VRfrQ eE+25EmGtKzwB2aNxXtSk9YHt4Ul4WiOYG1jf2cPczX+FIYqUXlUuVG5iKRZkGTW 1BiHa7OB0bAxBs55xUrfkd2/fQRny3PfsqynD9vnQkwJzUk6tkFB49d1n5O/LBcX FyuBhO9xQjvpotBZkgLvuOp2Qzp9QxIP8RMecu/NRtJD8XzWpkeEI1cKGnPl8oNV H8Tw7X6YhicYI15REqQvw/KQMmgK9egOGGtAgSZu6Le/PCfDMsWWTXhP9gnjqY2N duRcUvoGvu/kaIM2bmp++4DnZgp9mumcAwwbVwYNzA73NrmCsswDk+ZBULRd3yCW R9Hrr+kCgYEA2DBW333pWiEzv5lzjmMQD20zZrbrF9trBTxByf80OCLWaSEOPvXr zbP8zFAAWOgJFckFsgygHfA5yIcLPLaa0TiWtelXNDNVVbgEVo/MAWKOmuujKh6O 3/GKEbln935UnrECDg5G8GY36fyyoNWjezfug6olZ/goiIJQOsNSCOMCgYEAt7C5 fETBtLILfHVu7IeLkUW72Q6foTzJVTkRx5cfGPLB1TgohqMqDTICURIL783ZGz/i Cxk042QRD7S3j3C+moLjXQMceSdWgY4sdy7OX2hscmbNSET42qyYFoUN+tLc8FUH jBVIVwZ1JfohDD0DdZdMMXuHp/BElGr1+6Uble0CgYEAiJPJXjoSgRE2uxW7rjmh PM21Sm/HB/RjoRQXUAC9QbWolRQABwCf7v2FeKIWBhTZIH017u0Q/rj0GF5QWBPY rNK+S8BVijHf+F5fxzvjGwDjrLWvB/30L0BOBLKIHxAdb3/OF4kngdph+p3dT8SI GmEUevOz3AInwU3qV6VrnxcCgYEAsNfM2xx+uG2orTuJfOHJtiRCgueXOu2AjzGQ Mm0FHUmo3pNgQK6Y73czz8TmBQpSd+96uWCdEEXoPwymo8vRVIOqTIOQR/tdRwEP Qfan7CZmMYVTIL52LmB3U0bpfI7A8geKaoyaxl2LLvKuGlArImx0iDb7FO01uQV4 p7n+4skCgYEAh9u+E2U4YtqeaSaL0K1TUlNPCrS+iKDbikC3WXv/anEDAik8R9Yq PO5kQ2AeNwcuIeCwY6F9h2TAIS0HzZTITW1AcVmoVKekqkGPsxzNV5zofZR/BTZY OpNYpBwJmxFwHPNnN2hcco1QVRN663FvaptxS/kgtUwFB2+hLp3Tbn4= -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 4194304 seed: 2NVWOXvtwcAFf0J/g0fDC+ypQbnCU7awZoCxcnhpkVM= zfec: required: 1 segmentSize: 131072 total: 1 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:MDMF:ihnqgpid4zygk7utpua7n7beqi:2qppzsf7y4hdbo5fmo3ns66mvshiti3govobsriwysvy4w6dovvq format: kind: ssk params: format: mdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEowIBAAKCAQEAyGaEmDD0/hTK7WAWTEfXO3Vkgq3vi3UYydqC1610aQ32QOnL GVtwlXPM1KyC7nm+MmBUx/f6nhOCLmK3Q0+4uVf6D/58u9I1f7K1hsuvZtuzoP/m 5OV2jEBm1R30hZUFZ468b4KuS65WNH+LaW7ylNziALdIuLNmz7WPta/UPpjOQfWj vq/XXLenVsHngzL6azBUr8U11vS5ombDLWZec+5Z5WEXPJTZ0ywW4o0VcVAK9D6W a+K32XanLNVpCpBhVTNQ4Lk7q1O7OobYoLYJBhdiFIW8jMTknoYQicHjTxmfLTz3 zWlCfbBC58KHogd3qVMV3JDRbYOG7I1/vD65+wIDAQABAoIBAATvcWiGHCJ9xJmf +iyawFQ4iecl/XZDxf6CoSJKpUlJDL2AhH31YIpttaevL/JLkUGQWcYq90MZW+Vk jPrdZcE6x2/JZq0BekvQzOOq9IDl/ECEzNzqQccmduHcwP7hMqbgPwfIAh7fBkR4 t6g7EUJVRkOaP/I8iNWotQdWczWvZoMrEgSjIi9OR2yeosrZB9EihCUml18NH2JC 8MDBcKmkSfFSfwYoLPwH+EoVwhotBzNl/dofMGCeG1DzObKXZSC0Fi3nXILVX/Lp 2wWaWehN7HggHr2SggQm/4jA6RoFAlP/qqeD8CBpJn1eryeXRd+A6qsIvK1C/miT aJPVr3ECgYEA0IGp73f4g/sb7kIYQmylo8cTy9LqA5tK/SwIHyc3nYtqRMkcG9Zj gLpGSvZABbPiwprawwQcxXz9TwMCY6PjUEfhevvo5K/ObQmB4p2cSWflNJayhqh9 TYzUgpIypjVlMmhQDRIX9X1hwOxmEt8ueIlDGNlaLb6sFKHhNWeqIIcCgYEA9gwt smtU2xVmoXas6Q5VFZV3X/5dfhs6Uko5XVTS2cwhzxDHn54mstbDfEqCU1G1ZDJ/ 3EUH2aX+RZjEU3eiAiB6TNq8SfIFqDzJpnmGUR6y2RofhjLA1cZZu6/L2fqZZ9nc 875lV7tENrebLu2a59g+l9NoLBbxgtsKAX0ku+0CgYEAhxCHWTU4yZ3fUO6FsnmY rsflnfHpXx64a9mbBTstPqOx2g8AY1P0Ls37fNGZVVhaer8/GHbQgGlf2U/Uu2DN fhKiED2gdosfx+gRuA9qzu47Pl6kFLCOQq8IdfBoWNxbylRiDqV62a43pXY9BNqH ytL3oOAjF2DdLZxTO3oEbX8CgYB3fD6M0Jaqtd/bNViO7QjgrG8GTO52GR7fa3Ak JNcoMXuRpOJsX08HtkfEiiJz99AQ0n1JKLTBO10ZyzA8IHKqeb8qp2acuk2I/8wl bgqORkwwJgF9GBSRO/vDq4FhX9MznZcxPxrT2fssX0mbJoP9ZwQuktmZ36J1G43m XzGBmQKBgEeliDU1sEpkXMYUaBs2mzCEe3u7DqDNOaXjvT9mJQioUNoMFMdd1JPG oVJGuGKJAOAWbfeniw7wraIXfSp73jlxWULKKs7kbvMWFXcJ3OclOyKtFs0RycUd bTkmeb5G6hZBu3y3HjYYW2MX5E+2TRPHAWm9CvnYaTbcGW8BsyBT -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 4194304 seed: 2NVWOXvtwcAFf0J/g0fDC+ypQbnCU7awZoCxcnhpkVM= zfec: required: 1 segmentSize: 131072 total: 1 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:CHK:scwepfpkvhuc3fc5jkpmrodsfi:wui2ixaxyac76mbxtr6diw2ajky75wehk57f2kqgytddnpo4rozq:1:1:8388607 format: kind: chk params: null sample: length: 8388607 seed: w6uP8Tcg6K2QR905Rms8iXTlksL6OD1KOWBxTK7wxPI= zfec: required: 1 segmentSize: 131072 total: 1 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:SSK:n7a2ymjpmh6yzczjz5dzttejpm:zx5xl74ellbif73polmylwfapy4ptr26vu67x7ecwqrhf34ldqda format: kind: ssk params: format: sdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEowIBAAKCAQEA7Qad4gnk7mpesxcpiKQfXW7+ybDNH7rdWoHlMsXSaE6r85/4 5cZkBSptDFJ9+H65qlyiuUVOo2gnX30Hd2jRv9Uq/vVwgcGTlG9yTeqtLwNK6fRt Z5wIYWdver+rACxUrmg1uwYFcD9fqzkTtRoFTNwUmjBZKDAkJA0OU+HN8ehZ/9FV UJdscqxIbC6MDDwTckVMOvpNq2Cc9maInHw8On5VwEOsOaDGYK+TaxsYjmwhxWl1 TF17Io6wdauHSLP6ZYjv2KibGZhiiyb1NBKD9GnSuHji7oQyAWKaXGGKTU4F04jx JdtwhYBysZWHUf7FQKsVIjGiyE/8/f04FBlX5wIDAQABAoIBAGK1QTXbkfuZz6M8 b58IXkl+Slv7JYljvAAPnUAGMwgeTyyvf6tM8eVW1D/v8Kb2O2LPnjKSwtt5KgBx pJTdUZBWeUfhNb/LuiZ0PQFmzEWKVP3WPWOLDtBlj37qaA+z5nYVTt76dHRY6AH5 zJO8aN2nv2qw3MhIOBzNVRyoqiflorQQ7su4pXe6IrvHzmzS43zgahJby4SSmt7W OebkYXMsQ2Gv6jc0aJj11CcpCvutO3B85OE/GEZlywWyNKo4sMD8O9x40ByVnT2m IydpZOSF/z7P0RZOCKyZdDoEZ+hipc9taog8Drdu1eq+L9tAF3W7McIlLFv+Jqrz PrdVuaECgYEA+zG8iWbfT3DdyE/59t4OX3MJMj3SYDTQ4h1ePLc84zbuCy0Z5ajh mCnFSPdel+d38bUFHwh5BkFV12qMwXTqMeRIn1s6EDuccujxINETPN77Dl7FRF7k EE2xNfJ5Io2L42Izzy3YJVJiQqBnXjMdD1X+Z8hKbQcdjmxOZTu1accCgYEA8Y98 9ODKRCocZ8BfFyHs7D7eY0Q7r/vp8iqE4xHXZGLyDDozphwmMl77Y6a1mjEEzsrx 6ckl0LVvq1ESt1wKoGBMxIMlrv1tkfHvgrhu6MeBe2QSDCUWfd823VujytEESyj1 xwBelqRcTXoZC62FlGA+uMXOGs84RPWDfeQdoOECgYADxn6X7hTjI8YhkZonLLU4 mAkGWUmFKqYND/Xvoa1nmNbBEj92ZTBm0hHmA9nHHLJ/zoGyMrVm86pvn2lYKwKu F0lEI+HehpbWX0voe1v3qT5Ku//pBCgXWqOUNP2/GDOHCl3O+lhqTy+s4q5LCyef qGI3exorQ1UdY+FVwiz61wKBgQDHJ5XODsa0DEP/Bgtf9whufia7kLXlEbx/e66z xzHeAfWtPw72FJ8pSEXakseGqINeOtPX+47B09SNWfokUi4wqzSfj8Cx1R9RBDaD f6txH4sRQB/hA3LXtAB33+XagRkZHlwEBbn2WOwAtHmRty46dl8/11VlpRKvR/tw /3GuAQKBgBz8ER+tjIMqlVSwLRtgJ+Hi4B/7I94HdOhpEuXynkPxYkOUSKGxSysg i572W0X5fIFK6m/ubzdtfYx828y7p8LM/BofvQpy5q8mNL7tOzbnsR6A2O0k3Amb t0H49VVOSMlHAYNFWL0CQE98M3bWZctIHIFo7xyGzHpypCQUfI23 -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 8388607 seed: w6uP8Tcg6K2QR905Rms8iXTlksL6OD1KOWBxTK7wxPI= zfec: required: 1 segmentSize: 131072 total: 1 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:MDMF:jqzeglflgihje5znycp4axh43e:cju77h6dvd5w3oarj5m543ff5ou4vlrsipfmd7ntfkleq37tltma format: kind: ssk params: format: mdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEpAIBAAKCAQEA9+tyPYSnh4G/0SdL+ZxbCU+UCQn3ZduTfDstJbGfNGdlZLgd F5mSDgO0Ius38nmOmMAx07abuUkvdVkDNN5RZKfOolLZlDl+eDh/XBb55AaSV8I/ 6asiblwxGA+xOS31pvnhupLSXYKI1xM2Pc+wLbQEgS3MJIazJAakNySCrNJ8Yp8A MTAHLDdMYJa4kdvQJeJN0GBFRNLdbdQ6VCqnn1rK+ky0khEskH5QkNZV8pjGFAIJ 0RMcbysG18eZwvLsyCzhtsST2f3v8/3B9N1Dm49Sf2OV9DvwMxGRamO36f5etPt2 xrrg+lTnWKzEoqlINSrQ1tYNOZMGHVoA15BTnQIDAQABAoIBAE+LfDKP0v7P2x88 +AwFJlJ09X728yl7y7Ty+bfb50R1nls0FaWCURHtD0ma5e8HIIETPYl70DgharhA kJ5QbJYan1qGsaf00Ia7PeXqu0/16dN9kGslTR1SuC/LrSW3ANgL2ei2fgehv80e LWukrRbk6QMXkiXwEB9RgDPvI3xWRQQT79Yf35VwUoj2ldG5bDRiwciA2NeNbNNc KUdKtN1acgqZMvv94uN7HeBMYKyttlfUM9+EMoTDhGJzGNN1vLcB7R7nUwiCJume 3/4nm9+pOJt2ETl1pFY/G+CSAUeIrJU47y664UTfHhydt0YI5Xaco+3burj/9xxE qieUK9MCgYEA+Kon6r3VpPd+vkhylgqk3vNleuxB/lZmmXMH/oR8WdU3bQh65q3W YcZlRh2Jq4iwChz16aDSelWB/mjDXotPLK2cSvJZL+W2c7tzCF/RT4XiphkELmAB l2Pj9W9FEZVbxHBZHRuaeQQsJHGhM6MtnX9TCdSpX0Apd2YbKx71mX8CgYEA/zuq I4R8dSVNkliH4it6ABhX3z0t8pTQqhDVd2G+ubQ1QjweJ+VA/xxyQF9XoqCd5Bqd XY7BpXRtVVQTEyZ3ecAQTeed4pVlDuJXuwMqAxSVb2EAZ4rLVf4DpfSuSvREqZjb fhrW3zbRzhBQF5QL/WferHruii83JDKuAF8iyOMCgYEAkBhCI1Q4Lm1A49ElnW6z lYKjxrSLlW/J6pfvBP0O9huJD8S/O1d3CJen7haFxYHiySl5ExYfgcZ1GtDojava iIBeNkvzhL7vmGcCRNMJfrSN30RV2O3HXkwDOCFve736PH4CFcz+GaxiTAgQqtSf RUoX+3VhZJHQtaDUk4tQNM8CgYB7GtITU4GcFyP1JFJWGlY72YH4oM+ao4CJppjv feu6MltF2S1KXN8erR/GQLZKMGI3dUbVq1dncGKTt3uDzxftV2AF02NpuFkH9tAN 2ZbX6YOyNv0089LjZSNpVj0C1hKQIrQrfNKK0ywa0e9vj+7AiOr0Ek8fw2o7QV5/ u2NRtQKBgQCdjdTxZw1n5jDO62w92iJVi/Q0g+KoUEpeeEXAXmctiDNaCNspsETb qqIo2ZmoLA/yptOPa0vfEYg1yTpQJIf8ZY0AjeBHRTw9ECUFeqdakQdV6IX1dEFd r3OSCPSREX24x6CQyUhZMAtMn8D9mAVMlUt1Iq6HtXA3EzViTS0PSw== -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 8388607 seed: w6uP8Tcg6K2QR905Rms8iXTlksL6OD1KOWBxTK7wxPI= zfec: required: 1 segmentSize: 131072 total: 1 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:CHK:5eeprb6lfxclwt7fieskd2euly:ffjgjbrxfl6d2ug2a63iaesxtrqa5pk3yaagfcldqvo7x4ok7ykq:1:1:8388609 format: kind: chk params: null sample: length: 8388609 seed: yPi3JHKKbWaEEG5eZOlM6BHJll0Z3UTdBzz4bPQ7wjg= zfec: required: 1 segmentSize: 131072 total: 1 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:SSK:n6eoyebsflmwvubrh2crtnxxxy:layy5tfeichjjxoeg6jehn6lkxtcxpke4udpakwpmotutsdxajja format: kind: ssk params: format: sdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEpAIBAAKCAQEAm5KsR6Jp8mLWjw6HtwEMd5rnzsyCivFl0kRnZoC31r38+JeR iYgM22iJ2rzYVOjZlSBtq/3ts0wZxx5HkFpqNaermhD1XcpPWBX094SvaiCxiRFT I6o76DZ8xaDgpbg6qOle7+zIGyR83ARl7s8gNCdHNWS1/eAJ7VYtWukyvAMdOpUu i6Mjqy2xgaEeEHBdHQq136NwZDaQiz52EodJE/Fi+hatP9XluKwkv/UXwE3lIcBi sEr1/FeG8CwRAk39NKk0qf97vAPvfrzPOQ6WFFV/MBS0To7KsCXBY4Ve2zNVBsOf YL4u/52a26X1yH5SO2oblE/cyFCGWiatd3IjWQIDAQABAoIBADmRRdTgIapCrriT HN84MR/VH3Ajty6o8w+ipkyE1wJMnV5z37Pvtyo9fb2GYdrRqyoGrO6W8S2GvIc1 CjA9dM3T9Kj3G4SQR1oGDfbFj4+K94cL9SLebHqaJwOOa3KHQJWefbX0fXulvdpA emOrG3SREEWOtdVy4NmFKRVZ858kSCXGbOZOl8wuaTfzBQCBSgWBM+P90ERMKRgL Lw25mqpXBVBuIsS2/EHucMaH8F5V/2k2jC1w0bJQ0rIjM8vITCQuLCl1zb3sHjYh alCQyKR/DjhE3NnY5PRedDu+OikqsEMvFxEXIlUWbAgPybe0BgWF25z4VWMROanW uEZ8Ev0CgYEAxBmqfy7Q/wRbOdlsFcSBNkDtXHIjJYtS9OvYwJcOj3Yusnw2qMSC LZ2teRvbRlbrI+zIpXWCVgiB7iaw2AK/WGohtkmf0oMgl5QR+lxqqwzwze8rKWiT gtnEtGNBPLxuPmEt43thL64nK9beN/vdlVEwEsZcpjRgf50bMEXY91cCgYEAyxfq sD3X9WTE9jOnJWpzA94z0ccVsUYD5+vSXRBhwtUcPet3KPc9M/PS3XTShaV9QOzI z/z8wsFLyN8JzoyOZteIAd6Cx5MKQJ3KO0+F6jv0xyIeaDaqcVzsycw6vT2ITQtq 33VGVB1GaXYCNwMBhUx8VjyMcbW4xGGPa4fsfM8CgYEAqZhk6v+rQpIa74oJPz4m XayDW8teeC7pfOaoG8/IiOw18KkagJUK3Lace7xKxKeRTw2ObgKVySAsdrHBid++ apHHPCaqcV50hoNJlRPuMKbNb9zjoDlQMf9ybmvU1NlGIu7ax/1BjQH54KFAqHxM I8IGaIZjRF7SAiv2gqY1wZsCgYAw6cm0OLDSgTqOsVIISOL6g4GnfHNVBq/aI4m+ sDtbWUg8AYHpc+JhqM+YVpJ9baYFBQI4VY3qufMupckO3ftN+YrgKF8HAfruJRKX xkdSaq5BZ447Oy9Brke5Ml7TRQaWx7EtsGkHySU0MR/HcAnluM4ZVuvcVw/w/C42 j739MQKBgQCFbLq7w8IGar1IXBuG9hCvVFUE0zx9ij7u5u40I8F6yW5Rt9QYNiWC ctNwMCkvbhiDTvYvd6titc0U4hplRmTpCeQDf5B9gHKQlA9wlk5sfJ5cN7AZsc+0 Zl9LnyoMqX/Mug+HcTEMnwQDHQugAkto7jQLYDZezXIX0ppxLp/zlQ== -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 8388609 seed: yPi3JHKKbWaEEG5eZOlM6BHJll0Z3UTdBzz4bPQ7wjg= zfec: required: 1 segmentSize: 131072 total: 1 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:MDMF:7sj3wkjglezcfjwsbnjanxlouu:3p7qxclmuc5vz7vbnivohlfwcvwd6kvnnikbdx2afkf3kfxripea format: kind: ssk params: format: mdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEpAIBAAKCAQEAoDSGK++imql5vyeH632kPLk9l4Tzb4tj71be7xXT475INrFo tUFmxce6eT9TKQQx06OAdiUfOeaOMPuRL6+qxijFnQ9dkLmpBhOdqW98lOQtr+hf t8zMEFkC7PYThOpmc25zeRI6U78PLlpOXY1A8dKgy2JvGqqXlKu1ffULBhyzZay7 VxWsArjl3b84vfGyAUeAZkaqHff6rlPrsR54muud18tWBMTfwpBFxUvnD4PI4bLn FHR23SlqCdWZ9A7s/M6z6zVgfmqOaRmsHqXjdq5yXKBYhlrF0MrBIi6V3s9kKUZV OJGC4GC3j36l4mxplcE1onK6o8sLcjAwbPoHVQIDAQABAoIBAAxohQvaH6oC/l8D 3M8siA/7+P1HWuOE1FSxUcsK0cKN9mHmE8oWKrOe6J6DfRlsLb/KpiSAc460gMbi dThQTtXSSpwDmKeg+apy0n9RF0Eg+zjosqE1x4hsnIFl/dUJoq3GHEOAWewqnC5m 6DLuwdz5B2M5WImkNOFa+0+qLxRl/UPLytzQ2+z/zdHAPaS2spC0MFO2J5z1H3rm N7gaVtGPDqDnFyqpN7zuDyOD9+7sXKgV4CL50g7+b9EwKwsjWNaQeePKWlbyNL90 6AkWT59cahVhqzprl2k5GWURkxi3QryfBjGJcBPv7ZkJceuhgrhNg88Hga7fxLCb GiksSPECgYEAv5rT+9s9B96Xd0h29pJ99sNgLaus5yhGlwcIeDFCUk8jvMbzGum0 n2BCZ+4aT8BBYYx/qMdosZPMISmznMJkOEPPlCP4wHlpG6C9QGZR+uGiw8ZyzBf2 /s8CFXtRvlRY3e0TQDMHgKcFGxWn7MGa6K8CR4EGxdbXFmjRzEyBIXMCgYEA1gwn sYiRy5ZTUdvI/r4YYedsNrFlv5M1tFiIccS/wnFpDg3vBfYgeI8zO/iGWNP8OLWz 8C/lyuTgRQ++HoPahJ0XTywXZg51qZIC4GxbieKCbiSdRztpIb77+waJUN53j4VX DOyK75hfxS1uE3NbK0ZbEQDGf6zN2fxEOlqmYhcCgYAEIA4+VusKd1VlgQ7moiLK JEy2zwJq+6gBampZRB48bW3Ei7gCNVPpNoZXfH3eh7Igqoi5Fon/gMIdWKuATYMg 3vziIKAjbLnBmYVZlJphP2hktKoWENIFjGlsEvqgkWpUZN1MPY0EzRPEEIRMCaMP LW1sIrAFpGl/FwSlVGRXVwKBgQCtiDU2DU6GC12JY/JT9LG3zfNBdBjVc/d6OryD 38rHTUKqjklWP/CbTR1wZVAl+9bj8wvqkipuj5fy5YxxGNyz3tfi7BAcQWTLEQEc CT09UFIGEdEgyt206i1HmkkBMxsjVCr641rQXGxoYyh2xHMJZoS2CDblk6dgLtDx rkRuCQKBgQCHK+Dq/DQmCi0yYiWz4gEYmgTPgCDOOEstKD0YbfFjvMesitgQYXd4 uWHcTMZcycOwlWXZR709Snrhnvy4xuKMlYK5/lHFtnK9n+Tzs4KtsDRXuIkIHJNh JYyzXjQUwhLbMHY+JSa3kNVOjMkP8Rq+po8xC/6thsDaGJ0CoIQzYw== -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 8388609 seed: yPi3JHKKbWaEEG5eZOlM6BHJll0Z3UTdBzz4bPQ7wjg= zfec: required: 1 segmentSize: 131072 total: 1 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:CHK:tkjwggbz6p4wvuipe3gtmgfmsu:cnbcggp4scaxcde6vtfzga7bsuja4qjfbtv23xhaofwhbw5exjrq:1:1:56 format: kind: chk params: null sample: length: 56 seed: YQ== zfec: required: 1 segmentSize: 131072 total: 1 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:SSK:jlpnxcvtg3ohti5c224dtqsfi4:g3exm5bx4ctu3iuew3ir633j4dieg4p4fygpvpb33cvll6jqj5ka format: kind: ssk params: format: sdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEowIBAAKCAQEAznrHSzm1uPCplwOOjhgL+PvikNBWyjaPdZEGGBqWLIpr7gsD ek/I01cdgJzAd3p2oJpVj+Dew6Wr2LCTlGFWaPLFiCaSlibE7BJHqhQwEDazq24f eAPbkxFT9Y6KC7bB5Vg4uROUdKqadhdN8aXOC2QqrosdT4IMzMn0Vxe/GrwanCY8 fzCrcp6DqAH2SEjlavcSlCelKgt9C9cHVN3XAAFEsVKAlDnSKE6CVE3UTMTKmqOH l84nGMspHD4BUV8juSsseKYz+Y7LElcdslZiPsfiEDcE9Tv28zWFlFG0iV36NIg/ XzFutNOioIOhDhWpzUSOENUuMXZG3cDINbAEcwIDAQABAoIBAGVOklfTYdjyo5LH mPsYy08Hbxt0TRD8AhlB5YaQDNyfseLinns1iChBVuVSg5Bbkrar4o0sXMALmixA PriPpZDqhIaPvl5TeU0GjwjgzNA3tqHG70O4SNR4rQQPQqYKrkmzpmkQNUekqRKF zqVgn56xL8vhz6jB+zvDXtIYgZhAjtB5gyI8NEZzzWfD1VNLwGP6Hpi2Y7LvPPxM tjCkGN/D1ejbyiAHA23EjAAIeLF3xjzSViZv6i98lGE+hFe//EPDs/h1C+Sso2YW u6R9IIPW6hwGw7+wcmb4djEB6IkmrXwMDzRdPqbCLaN2DEeWk6iDBkgbggcqxxVQ uque8OECgYEA8AEj9fQCkzJ4R3g2Agj3eD37J+44cs8XEeslh8lZTFHezvQE+5BY I0GAX6qsetKJYV8d/SooIyIaDy+LULaxBiipw5WQIAclHhFSTiZ9zskALzm3qUaF vJPzpljTCPskVDtXEdkRpfZ/uOXKQWdMHUQm4u38i84zLUxHBdMIt4MCgYEA3D2m hesxOjuUMQ9gNhRVauq1gEbZ3V9RcMaq90g9xsAXB6ax2YkFAy7fUWpfdn1LIyKh Lt2aCVmvqUupsFLNOonDLmasOEj/RUbOxUg/TWpmVq1AExAF3ZQG3EyB8ig4uEFL ZJDgYQ/IWIofPgZ6GY5LGR1GX6pgDbqXCJxD/FECgYEAsYSGbrMu/GUGJga6G8M6 F4vwqtY+llyqeaxttAOvsw2TOYuv68oWBu254AjDTo1O4+CQs+JskZ/1mmnWJ7sZ MK7+8hU75xSh1Z2GPRunTj3JjySnveLVpGfifZWRckEf29WQTzk5HoI2cjI06S9Y UwVHpe3VMCsyGz0iAyLWfbUCgYAh0vINJSbFS7shobvj8lF//xXq4na5MddfG5PM MHMUYBHpYed0gj+b0ooHhe+tUebFOZ9JhE0Q3I5G0ND5vG26bMfmC2ytpEBYElzV HZhjOlEHRMpPYymTcxVupe1bFGRJn/WFN17OaU8akfPkYbhEzn6oF7/kF1VzJlHl x6IFUQKBgDVkZseTqRYmO1dZxwEqAEgOZ+4c7RJ7vaYp6x/JZcmDD7UvmH/QAJUG J6S3uHv23lcR8rwtAlbHT1jIW+kK2vst8M7W4kRuXafj8nW/mk7mDMXzXU1rD56g xhNZqMb2ruLqkEUMRpbklMHlKxlZtk5NS9vslPxFPSDqPfwctu3C -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 56 seed: YQ== zfec: required: 1 segmentSize: 131072 total: 1 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:MDMF:l43byqsidupuz4k4kzskysj4hy:fnacy4ixivqrmptbpd5tvybb3uhkha6zxucyj57ovqbiof7dvcqa format: kind: ssk params: format: mdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEowIBAAKCAQEAnYen24v52vWuFDhJps20/WRqmksmi74optyIDS9EE7fL4YRr w1EnmATjh/09h//Yu0LMjEwSwfOgfKK9VK5FmsnWExfSDYV82Gcca87JRNdmvEvo fsBMZOFVbJDLvgTNha8Y4cHiAmtom1fTSklkEwml/pVPvKnTPVT2rFTp6S5iOun3 W/akECkzPKXuUJ6erpvzF9ksQloyL7d+UQyhUjyL6+sQ8L2RTkoWWhDiTagXiZJT tnXvDXugY917TpTYfwaLuscpbzmSc9eaXTBk8cC8ZGSWluUDaC5PVbheSXfYFLoQ mizLMZMCz/Pih6kNWiFb60T7l6TCNr1R1YjhsQIDAQABAoIBAB8p6A4pydsEQVTk scVa6pQ6WlB9z3lTvC0OcafSEvCnqqDJlpwEIQYU6YJMmfCer5yUIW0b25YdAUHG 3Be1hjWR+lS6oKZmIwWYmGnHdc+1oTBc//ibSEGoxkJ8/qFvx8zLj+uRdImv//jD ThxjGnYdsYYEucqD+jMm7Mm43rFvWScA1wAFlgKCbylLXRZK6Z3R0sTE47SZrNX4 FGCRssorYNybFgw7r/7ewbYyKJpLtp7r5QB/YcQc3/ZZKVOphSqSwpRCTDLTMH8O MwhbefL7jz3VJPzu3Solm9W8udhu5A2iXhXUj3IL86N2ZncEBPyh/dRWwKFPxvvr Xyy91rECgYEAzyqf53UCCRrKfBaPgLX2bl/lN1cdJWeRTE5CQvnBnE4Qhwfj3nN8 5yM597eA6ROLOlZmFyAyKGK1jtkS6ojZDnlAr7nVq52+kVqCR86REybbbaXjzT2f Jb7a9+1pQZ/bb5n7xs7GYH/LX9I3MCPhSRS5apf2G10GPoFiRe6Iv/0CgYEAwqm7 Q0hZKxk4D2+mG78HNKgh4WVR2JTRrRR7XaVTXWZNuVhKW8BBEyniFNyn9+AWQEAY jUx35hyiFz6DF+TlGZ1cPOTyWespY9JLi/dIdACc9K1lWAcniO7z2ksYKycPI7IU 4K98qhrbfLJPAx7rF13uKcp68snYQ57g1wWQ9MUCgYA7gcnmyVRpWxm4pR5ZYWtE 7yS/TbWgjexNl9kuteEoTcAvmVOaDWBeYF8BSeOsj6GZg0HV+LiPozL1smLdnauD nc6361B1+FzKEc6EY9CGSM4U4+bYiI/TXsw1FSv73rhAiWGqDLEs/OhlQNP7bwMC ZAKSnM3jtEfb4nxhDBCZ3QKBgQCWZyX02lVq41VZN96T2YjrumxTBkGyoWlP3V9j /3Tl2UF8TydEtMqSz+2KSOLOtij7A4r0wXxyIvVqGDaZo5UPsXGu6wYFS5jzM2yD fFBSsJaUxdRjq0N0nYtzwkmuLcOYxOM0puIfXBjxw6MguibSKxT03SkZpbKerIb0 G6zgMQKBgCU5nTQTh70j3b5mcynwsvOwmOD0YEd0Vgps2VbNObsBB0u8UkcUiZGz 4jloIGaqTsf6dsc7hdHdkL2zCiBtbSsur+HXpVUl0HbtAgXhsrCu0uzLHInTvtg6 K4E7oqX5emi/cyUG6Emx78GDO9dfYm1dz+gTZnAek9+aQyo66F5i -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 56 seed: YQ== zfec: required: 1 segmentSize: 131072 total: 1 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:CHK:nnv4vrtlxmzkurfzvonj22leua:ywcyijrfnwykraku56dq7v3o4ts3xsxqfgmk3kgzwhq2cpest4pa:1:1:1024 format: kind: chk params: null sample: length: 1024 seed: YQ== zfec: required: 1 segmentSize: 131072 total: 1 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:SSK:rq7lxvy7oorzoamt2runjfgvky:ebenedfynevee34zqofeu4vtv2bgu4dfqhqy34yry5lb6x4fdy4a format: kind: ssk params: format: sdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEpAIBAAKCAQEAp3GpCG+2qiLvwO5aCQrOrjr8Uzt1502l6qc+LwbJ4MDgH3pw re9knaikOrghUl/qOo0iVCImZoBItDShEU/HZSXDMMXyqZVHNnpnLNn2EQOgHXdw OK6uDNONDJg7rb0zIpr5sF4HoPukO6odIqogsA0S3VUhYPL9YE7MHEj4IL91AhH7 u2WdKvxR9ZOZ90Kqsha7iBhMzy3/JtdrXRXP4NRJPTGDSGo88bb33P/i4CN/GQ1W Xu9WspqE0Jn5yOO5YOrxvwNJK8l4dBCqMmvGNLucul7UYDsZZNHbixOlrrRexbP9 I2Ho9Gsfx1hN05vKNlLZ2Tnuef18XeLgrceTpQIDAQABAoIBACA8Nqsv3IXdA3SM PmuSt87dfrGkVxKwRWKLD2LcxvUclJkiyHoHxgI/EtzWEV4rJmveu07goy1lAXol zqNHTU746d6kIQ8KNMM1ZdMB5AgK/2Jk2ccjw9Cm0nbsAMM2ExfUp4CPXZ8ditTM r166y1+xKKJRDwO7y9EkYlGIr7IX8XY0QIr1FU26QItZXL/Dtz+rInVx+UReL6OV q6bA/28gWCOkCeoMqlk+LGU0WWAFryoJr+Ob/TjqkjFX4nURFXmVhJwaqoVMZHU1 J2O96V89Y7boz7VAuoZc9fxy1IH/q/p1S/DsD69EN1elFs+g4TchWg8AdaYK2xg+ nyCS/fkCgYEA6GAbr3Mf4++xg+Iq0kV8lFRrxH0fSMhdLeYJuQjsgCjzkLpuMUX/ JbkP4LoUmv5W1HQ+Gq7ZAOmS2VseNjaeWWrZwdoR7s478OTFbQwIEVxOpyzi3MR8 WKU6KiTCzs4ijjf3f2oyf7Jl5/aEnJVhiZrTw2iNhk3jnqZA3h0AvE0CgYEAuHef JTQi9e3rGr3KWEl0vhbjB7W6isYdh8+xZtwdYHSVdogQuO0TgPooVTkCrJI0KiEl xghSqYF4OAh8iLHGtexrTahw6/xkxAx5So3PHev0eackG2ZFVYbLcRxxHL2n+jew XAQ1j+T8RdWOVdXIkWTOoKq+lYqwrB7JCUr9gLkCgYEAox7cMGRbTZF0BkVck/Ct TB6a6/p9XIUyS8cAqkBmbGzS1ZTZR4OAYUWwrKtTTZ4e69KRyf9VW7ubFzNMWPgs Xk6Qf/EJx55EG40sPalFfJJUsCvlMN5I/5004GKf0baIMVd+SJYOzu83dAbr/lMq fgMOhky9lDrW/wZr4L9xRb0CgYEAj2XeP0uSSd+Tvgv/ujYQHJ0qC5pH0w1Dc4oO /EjsRUkbzzLi3P7fBIpyxB03aPOWvZFbDeD2cXKGA/kE5jZcpJuOpqXkcm6X3pdb yosGkNoWCGPX+7y69Ut95wYXICKG7EpSPJXBFYUKXzcuGKfB7NSSk+9njFRuFr8v xJuZCXECgYAyyV2Ql8HWz+iF56c7Tar6vjU1iaHHp0iTS+OiZbe3iGbBaHbCpNQ+ SiAOcFOhAJjw/0Ez2Gffvmk09TnjuiXBUmYHgZKIWIa8EhAQCklLDUl4YmqcRA2s HvvNdvNPEnpyoXKOe3BmANrjkNz5Yk0gtOXDH5ar/JgweQaTxXoflQ== -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 1024 seed: YQ== zfec: required: 1 segmentSize: 131072 total: 1 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:MDMF:syhziku625dhr3x3c67oa7d754:cbp5qtz6vphepl2tmyo4b55o5x5afrdjjjyt5i5t35k7fp42qw6a format: kind: ssk params: format: mdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEowIBAAKCAQEA3NxdDuRuM4xF0bmoRI46FoksTeyQwtzCYuL0eEhfbn4PTB0F jjcTcDp3Lc+E7FOT1zUU0wp4rhvOZDTpFjrW8n0+iBZzZCgka1L1sEtncPDgYb+/ VDtSFIoSKqQLmq0L1hdA5oLgXWEE918kLlNjN5l++W6OGfzB0ZaiWbJfQpOyYWvv PN5P9CFxh/aNO4924oivqB1iCRJbP1szGcEuZ1L8QWISZK4oauQA3wO05UGPNrQU XIEJDUoEtTSlsuOV+LZlFWkEHD7cwtlG3ZF9u2r+iHf0xcOPqsmqSIpOjPqg7Uic Qh9r/g1WwKmlmzgYgg2dWt3QUTv20WYZ6GOmnwIDAQABAoIBAANwH2IMy+sGgHcQ IPPsPjA9SLtvzB2/FPw4FIkjde+A4S5p9z0BqM5NmARWX3cpopADi1mbp0mX34xv cb9prBCQmDQYcV8Plug1cuNLJobmC6rJ3X8WAKRBiqCE9t9HWVCnAljJlGzaW/BD 2ArWkSFQ74k6H4EnuOv7yyGCpXlM+YfJJmAXSVM593BuERTM3//Ki5ctMwR9t/CN rxzDVrGhbIOKHWWAKfiNUseZVuy5F5QR7772QU+mycAvW2qVLeKs9lYpLekwqlLR TWYjE3HVeuhSbpQymHc6TsLiADlSVgGVC4mDeg80Q7vLEyM+BS4vkouJOkZl10ZA KdJT2A0CgYEA4suaXxcc8IrNTyE7lS1NCAXMYaI/Qlmszy3KfR2aIFzbJWBRTlli U0WWVbvVkcs616IKEb6jMgK3gJK9Sg2+IuT8W/DBr+q68/iTSJcjOhWD/OzqCHpB Y5tAkLA9NZf5QUcS0eSaZzWJdn3s30r4kTV/ZtfSbsXR97mlb8vRWaUCgYEA+U0g XARhs8QDChYNRjcBnZQz8usHL/LMXbO5h9NNtdfqhjYaBljHjYgu7cgEeaj5TMPr rVKgVUmIgMun7mBIAOTsQym10tM6ARf9wIR1+yYxbtDgoeaWSje+InSaG+3hYVe8 o/T1QQFQPW2ngQQ3OZ+YRrZLCqVw2qRLVQuiI/MCgYEAo7WKsjdR6YSYHRWFF/LC VxcwaA2hEjj/F/Ia52OV3OSKQBmdtyuoYSmrEinrSTllOUA7eoGc9b2mTkYeIzV2 WWPnkkpg1aZf2zpEvrJyeDwNsWYmrYXqa4cm/QpqtKQGBYvTVvVoSzYHCyRs9uX8 NX1jgI4r1VAwd1xnwiJi9Q0CgYBS43/k2FgbywovqlFTjSpuWD5FgDtth87HQOBo 9qqZ0WZapVZV0eLXffYMfTpvsOzixylvAU/py38lQ5FcQoruMS8UzaN0q2JXxsBJ 6EDJ9lLtQ2nMqrxBhPMkxZwPuTH8iY1g/islJ+ij4/eTf/FUqWmZ6TZeHc++Am5B opKQXQKBgF+86OH5QEMW5MMMKBhL09uzyMnM/sweKLo5fNYL+7hiO2Znb4sz55u2 I3xsH9+OMWq+o9Uy4q4QIqgGApbfrujOECe1z+j5mZBRTIpWTtW4W13sjT0Srqbw rGIvfueyhQlQqxcKobvVGIdeU8co2tG03Xg9jlFhrq8vMdnxg457 -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 1024 seed: YQ== zfec: required: 1 segmentSize: 131072 total: 1 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:CHK:wlszole5m2emf6wbhp3lnlyfxq:irdttvny74gxdrcotzfjslfq7p24kyiigyrm5shhw7zyj3hsiuba:1:1:4096 format: kind: chk params: null sample: length: 4096 seed: Yw== zfec: required: 1 segmentSize: 131072 total: 1 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:SSK:ad5wtml26s7jjv4gfwlrs2653e:hx7zlqd7fp44i6ky6lfrrwcqxslkg6gd6x7moxt4kxkqvm5d6r5a format: kind: ssk params: format: sdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEowIBAAKCAQEAtrhCulmdP/BXGRpy31rlHCP6E1vlAqnbaxWG3o3o0VlTxS2a Bv4x3ac/KhnIfkznrXj5G2o+lYNlWNt5DhKIInA9qHsPlIHeXHh8dtttnP17to8Y YtMjGikMyfSF5tTCCYYHxXxFknHZ8Rf8eo+JCuOzFqhO7RYSN3Rs7zYTwMeG7rQX PZgrt9PxOYxlGcluBsphr+PAm+ImFg56vk9Xy0fbDMXI9q1Po3W4eo2RTyDdmBOl y+v3MVc5L0ai7Tw5wiagQD5nnJDPwLcEEsE8kj6GVa1wGEUG7eERzS+QsoBLlmkW GLThyO9/vCp5cxoZ5LBIKUfq9W8D4bH5mauzxwIDAQABAoIBABSh5o8KXX0B06P3 CO+ltjyg51/WPvGNDUTLRSkjrHLZ+7ab4/T+29H5TQ2g+bMhVgE6/s4Usi/J4DYn b5pM3L3JdNJDNcrW21gPwZDeTZiuRZD4xDqB4vMqSCe/NSY6nL/cjaDEMUr3EN1u OvP6pJvr2FDU/Tc9CHOQnFhqqn0DGfrevRyBBD12AnHdfA/hlvquMdgp/u+MDeKg 3McZw0oVDqHHb3t/GwYNvEesMKC9xeA08YIMQOMYXyUxZ7VILiqVhUc5F5Kw3l9D /SGn+DxYggL2+C/JwwQzbgHRCWulZFe3l8wDI7cOI6ajmjJBQDMx10YYeZsUHmEq EkEGOuECgYEA1d+v3L2mRJJrjblh1juJilnSMnsIiuw6W6fGBfR1OCsmaw8MQ3jL 7JH90L9kHtchyz71lCAf7V56txXxkHvn7K3zuEn9qBTodH5GSKYJFYW35zWwARsz J2LifWzQLYyKg3PTcIm5z9WMCmBovZhNQivhhqRdGeRrZEyVWg9aXLcCgYEA2rWs JM0ywx5F4S51vifdM0PP6VbVCYPfXeZ9tDLkdA+Ztz80/IpNUoQGmfmTFXk3YRsK bheqltMJcqC7xUm5YMJLGlmLkYRj/5s9Xux01pYNt9TDJowu2It/d09OIPK7+ioU +jiparuYULKNsEJp9cSRXyZ8qOKB8PhX+GsEcXECgYAsDynOgq9G/xbzGlaiaJ98 Beb8iUYIQIQBL73mqiafzJvcgDwZhkAUWzr7jwIULGOE2FKFEl0hbE5Be17JUg1E P82ukGeWAcClhwH5o2LJsUNieTfp8m2GVqOsDQeR6pr6W5kaXPUPcMGpvZS2QjLg R+Ps9d1MITdScUhvRixqXwKBgBlQB3Fm8mYUvd+3AdeVQ4uoYIrQCu4D/jke8ROH BFvOZmsH/LjxxMs1DpKJiRVmJxutBoMBaDP2jtRed/z4cGUbd5fAH2AjI3O04uB2 m3sueL36+O8gMFfNpV4IprE3hrwIXM8s+aapuZI1aCKrPRo9utl5WdouBP3/sCbH NAdxAoGBAMTddDwt4yuRavBVlZSn+b+DNd3vV4skOVa+zV7Pdkqf2MLsoksIwQhz DT2RwZ40+Sh6JIBaxKERIRTUE0o9zfWwWQ+csXAGUxUo4Pd7FV8svwfKVl9vqAIX x1XcKc+/jgT5lvmkpv2TFwke7S0jNvUtpb/j81L2QnYg+9nFUxDw -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 4096 seed: Yw== zfec: required: 1 segmentSize: 131072 total: 1 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:MDMF:jfsmvwzuduc5flywq6or2sybg4:uguwgnvjuxfn3ivws4zpt2dai4kzoucp2zr3olid52xeln7u4jqa format: kind: ssk params: format: mdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEpAIBAAKCAQEA2K/v01VINZgu6XRXhKx1Q9Hl8zjkve0R5lQmY54p83t6eF7K iYqi9X9pFxVmG+3pGwMwmA2ZO1lg6+FWhiSazDk9O1bEdK/fM6N6Pyrk/s1imIf0 4lUkZH1tW3cKuzsaCSXRvPKqMRXqr45ONhhNXpJtxJcsrJXJrQ+37PtqJnbQHVo4 UzqCBOm5ytUJp65EtQDMBCdf8up4E5HvpmOzjfEw9SoJ5bREvuF32t4QNiBvkkjZ Xv0G2EtXBsO1ujpCQaRftxVCFermNDt+qL6s6pnYsfgpNMBmMgI8YSr9aNQrMDdD r50e0MwnUo7uyqQuZkuhC65pdaL0Dc5Fqi4dVQIDAQABAoIBABZ/5kImZ9IQ1EYv a2r+UUrSf7MKpE3IUQR+lmHfqXF7z9Kx3Qv9FkCxkyLveOPLh1njsecH+nI8LKEx i79wC5bLFr2Tm+CV5nJBNk9az95ZSzSVYWsi9h1tHK7TpIyebWynvaiF9gAUy4Kh HyPk0BvSzo0MOXpOL1vF9w4naPVHTfC+7GyaDld2OeZqIkrLJtXBxMnq/C6WTRRc yurMaAaQCtp8ZgjKaw8vdud+KMfXDJZqNIxqjDe9PN9Lzh/sR53y0BifEYKeyJ28 PavJWKAIUH74aku45FFPjZEIJ/a/JrHEyTeOdcn2a5Gx/dZUN/h9zVtZRgE23wKA D+Sy2QECgYEA/0V9P6NRlP/qz6+FfCKAx7xfmlABM6qvtVgZ5kFOACd2diY2Io+y nVAJa1no9JGu4ufJsRYaaa2ilE+rekXfpqXBngmlazrRgLkfyy0TkKDxMgP1wxkN owHh9Hgr2/l09Hb8La/1ITzqIQJJgtJVf7ObYRxW2fQD8lTUKvz2nEECgYEA2U5B rBME13Gxx/Y3fMhGJJGuJv50t/ECdPxUXOzg7WkcNtHXB654YvFmsr7WxsXkwviQ k5bnewj5BHQuj/U5B/G3kVubxhbdiPldQaVP+xfaXTLFhUodEshzsA5mlYg76QFA /iKUQ9W1gDvSn/xK/ARLIm+GdWrw+JEsRg+QTBUCgYAobE6bJzeiCqyaWscekzAl cPUKsKSgE+VjKCJhzfGWIKmnqAFmk67LLoNvVnuHTxKMp/vOaRuhpHdcWQlkgXAb KaBxcEGbq2LFqYsZV3gDrRjEvM/MJ0l7iK7JUcZQPT6B/92LNpPwwX6p33zYlIop gL2YMS6nsPZ3B2vZqtk6gQKBgQCY/+dvP0jWZB+XOb1hpyTz1Hp4zAnkBNYFBjBj 6QiJP8t0sZQjvWzXxT3YtlNESstBl3872zEKSIwD3cV26GKKPF9SAd0QwMKkEWbe tIU2tlmx6vB1Y3RK6EXD/K+vsubzrEVVaYVYqZyMOBKZQCqPfHpmOX3DKFOXv6cb gRPI7QKBgQCLtj/0Z4oOc/o/HaMKnP6Nsu7vCtbB5iYxtmA0yhBTB3Sf44/1mENE LZ4dVfA/ZbZH6M/+6xWnnZJUSXSS07dVO/kjX8obYRGEMXlSFtF2hsw6P1fkJBLU 3KPOvN+NxQaCi7zW2xoifSGPvLwzHdMwGWfioVoUVhWXSK671OHFuA== -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 4096 seed: Yw== zfec: required: 1 segmentSize: 131072 total: 1 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:CHK:4ewm23jvdtm2i5xf4gck26wg5y:7cujxxc34mkfkmwhbemqtuixuektknmhhxlmujcekibf5amfwwga:1:1:131071 format: kind: chk params: null sample: length: 131071 seed: LCa0a2j/xo/5m0U8HTBBNBNCLXBkg7+g+YpeiGJm564= zfec: required: 1 segmentSize: 131072 total: 1 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:SSK:edbq3ekmt5si2lpahrwyh246gi:34snfcth4337nbu3cdgznrsrzmcudje5ot42rft2kqxyad4uyanq format: kind: ssk params: format: sdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEogIBAAKCAQEAkFwLHxfDZEKh+ekOTF6YXW3pl2TxEoCMoMegV7GLQJrmj+hD fE7WFpWGE3DPsZWfVU+79VlU/o56/Ymx2REZQCFB19UgkGV8FPlGDjsYcXPU08Wz peR2heNrx2NKF2Y3v6CV9Tb5mJ58ibfnByNyjswVstEiL5FCRGswKGos8eMewmJ3 0AnC0s8txTEGki/mI52a0QkIlzgJTa0qu0Cgm0XnnK7xi1w6Yli4VIR6VbNrGhwc xChrb5cVD440H60LGJvqNrhAoAy+6Lg+0N5tWPuD3ZnyGqNjkCzqKVznvhTyD62B 4Ap0Fd5tbcNgqHAqmyMIvOfsZYmz/Jq6ql8ppQIDAQABAoIBACK4IGyf+Hxakj5a 0PeJILgHwVCKFHjQtgHNQUEWEFm/Z4hg4io5g7/2wkJWtX0OcT3BaYE+tPRsLCRi Q4XjWOFVnlJcjfJslgUtVq4BhIV0yFEOkYBqjB7zbW6M8Lrj+LB73NUXHbyZEXbF 5iiPW/QAHY/eQIyUMQ3ngbOWpayfCLJ8M/LZBorPb+JYLLgAygfOP2XUH2+kRbeH 2X5gOhywDkGSeoOsBCI8W3f9Yhn9HzP0E/htrZE5hhKim1ZcFcvSW4Zu6jLSMOGp oRnpYcyFg2Nx6JIiWd0w+f+JP0Nx9ukEJvHmJc6zYZptxFCdAPW1ATMB2vIao479 1HwchgECgYEAx9mVKo9CA2kqoDO4nEOjV06F6Qc3me1C5KUglXHX0JQzW0Vxt5Yz ygqV21jokzksiat1D6cGnOMPqaaz1bQu+bxaBO1LPku19dvDfi5VdCy+BwzUVkmE uj6EKqX7MsKkz8DgOPXRJErBIkMFxK5q2qmLaJ6I+QHE6TAxzuvKTeUCgYEAuOtB jVNHpNgWAWgmmva49j/LmWy6pBpBt2uuxXSmKlk4PvB7oAzDvn5HPKw0zLZ6MVsb /e+YqbLGxfAJeEsVZ4ovppTYOdClVaPlqvnf3tztav/JWA5mi/YsQNIJJRIGwLZf n0lyY7Eby2GVH4cCRAsXmDFKnfWoT1X8b39JsMECgYA/e5xooo0jrDqAHS3dZZbz Wtwqw8IjwTxoiROqpTka5pjRu2N+H9ZfrbEgtkNa0OSW7sIGsNXm7DHDgFLL5aqu ZehqfD5UkZRBfwfAg1NdzgCnGKoyprPkvYsaSRNccnwMCoavUVaYIq7rBUNF0Onc f9Lq7sEv6CH2uPp5cmkXCQKBgFbVqm+p9s+y5Qp+FPrZ9tsz8/C0/SQIbGmseGKi t1DVmrL7jKIIvHacp+kW2Kh03AaHSSrCs0ak+/CBGoFRiNiZLG0mIi9sCeegUj4q nnTx+88uFCd0g7UfwYIi30Z4I5GlUlvjSoMD7RBhX3xxkp/PMaI3K1nnvMSclTDq bflBAoGABvqvBpYQj+Lv7WWdC218juhICDEnkcEJoZK1ElS8AIFiQ2m3oaKR/+Qh ONjJGROx7v5JdaM357QPllCRuQjS3UfU4XOFQ814GJ8TiOzp86AfhT+9h766nQ0Q TQo+tOB15TiaBC0QMChvFNibHytFmGtsMGvAfseEEWYnRDDADlc= -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 131071 seed: LCa0a2j/xo/5m0U8HTBBNBNCLXBkg7+g+YpeiGJm564= zfec: required: 1 segmentSize: 131072 total: 1 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:MDMF:h6zjlq7mo46y6zqu5w2whkpmcq:ykakuwcliizsy7n4gwqsntjvsxnqznzt62iepifwuy3b3kwyffva format: kind: ssk params: format: mdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEogIBAAKCAQEAu0lVdI3gH/PySDIEdhByXGIH4UxlSn1ZZs23TgiEZhxVIeeu HA7K5uHfXlstKt7phM+6x8j3hSHPMpq3svwdVqioAr4133F6W/ITVvZEeBKGrmEl kd4SI2gU6yvG/iDJmf6Pq9qAq5embshOY8gC737fufoXMkaE9fiOMXpzf8sqF2RL w9D0WXBiI1g3QvR3Q65SQTY+s4N96odAAD8p5HgO/LQlNRA5sFFJVLfCSlmx6FY8 wIYQnv3VsK+xrdo1KKNOpBBlldYth64nhCpzX31OWDn2nDsoPvZgU23ndXDPcKBK ztgZW5rJ9vt5HGbbShKFKBxGQCK0ud/IKOILpQIDAQABAoIBAAm4RlpYc6GM5k3X ZLJg66J+RvTrI1WgmEd01TbUS9TF0yhBjyBvJxog7lgGCNvQ+lMVedbdB/WNmeSB MZf1LCufcKrFxuN8DvLfJyBMAyUtJva9XXcKzKuwPueum7L8LiJTGw869ZMSOYXF 2QWmL3rQ/Zj4EQSfss5WMkEAnyZqZMY1wIZxdnpvQ79iGXy8dJDMACBfJ8pLTeuE lyPERg+RfDlvCCP3rmcNyNLcUdA8caaEzKS7bOhXM27ZI9WZ2D3wR2WyohCl9eac EKAquDoDwe4EVOv82cphUuFhcTF3xU3ddRADtP7Cz3UtuITc6w8C8TeVB/pCpUp6 9LDQlbkCgYEAxzf0oesOILxQecpnIbylDIk8khnBVeCAmXpYWFhjLu96pf+Lf26F IBiWA9jwETPCeztPVZy2KDnt3lh62DmPUW4HQN2SVINVTo3Xe13Ve2DSh4crYjuk c+B4Xs8TyXCA74FL19H6EbgSDzf7p7SICG8IHMuxcoiVQHptdOzJiWkCgYEA8Kq/ aM1D1tbRIKeICmqZATkwJJG2MOB6FFcBfRICpIGtn90+ztVdjgMfZ2FQJ9Eeg3gj ImjrnTig0I9Qlxa5DcgUqW2ZueR+r0Bw6dAp2VzOy7nWHZUyv6RGQGMA5vModGNC FvBj8mqD/XOpt5VCw7t8MX1r+GgBZfAPPiiZjN0CgYBMQAKGJu2VYf57XxjyNL4H ek+QrALv16nhFI7T4aC0yjxrZNADyk1x53cjqdjY/LKncCABaKXf56w/uiXqtL1C MZbdIPFtH4d7NZcQRO389yYdcYMNaj6bi4MG5sNwCnuPMDHTPS81sPpYkNjla5fV goncW6pjaBuYPkO+yRKqYQKBgCl1oMfTJK6sDxbLBZqVxon5ahvCplpBMYazfmQn aCEi3eA+YwWKqDVAwHY0w3Q4iEMpvRO+c2iASuPi7IU6uuJu53BQmzz06gYS2eDN pYf2fwGFoCc0fquZByksZQlkNkHmn4oIG4+1XcuZ01D2+6twbvKvopwGfscq1dVl dR5ZAoGAMsHeOltLyITJ8Ti9mFmd8HVhN5Z10sCKtoEBagvlVs1e8egy1wh6l0Hd wPj+r3UHNfoxiWQWQffKMw6sAhfAuuRGP8gdcglzq7hYeVpbmw8eDn44OQYYC3ql lz8eD0KbSm8MlWvyGvASgtquvuDK36uMGEBC2n+nN8Gt1+FPGHA= -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 131071 seed: LCa0a2j/xo/5m0U8HTBBNBNCLXBkg7+g+YpeiGJm564= zfec: required: 1 segmentSize: 131072 total: 1 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:CHK:heczpdxphw5frp3ri5sh2hpqei:6wflx6lphy5mhtpfb2abznoa3yk27ynbqbzcecs362miu72vkowq:1:1:131073 format: kind: chk params: null sample: length: 131073 seed: /N4rLtula/QIYB+3If6bXDONEO5CnqBPrlURto+/j7k= zfec: required: 1 segmentSize: 131072 total: 1 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:SSK:rwikq43zza2vvcy557moizudru:clcrnbchmh4ucl745t3f2cwo5hwhchbvb4pp5vbapqo57nbh3iba format: kind: ssk params: format: sdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEowIBAAKCAQEAyLPhUE4G+3cl/Qb1SSmciZrbAJ8qFKZutcWqQQM12THlNtzg gfl3wTvQsPdjBJQ1DMUTyWb254Mgdy/KJPGI56NY5WcIKMvNUe5QUt6hzH/Cn/uF N7z0hZ/590b7dsr83kir52eIjcyy2Bv8dYwUEybWv7NvRW2pboTuqLpg0ww80+vT oCVvgvJhet2V5eXmElmzb8HbVEZklGVoUg0fGHEM+hzXiv+Nvepwm4nC2+Yq1cay gB9h1sDAgZSERoJrczlfi4pbrj3qgHMZTqGkzKCy3d1zmKYIOub5TmCnzMAa0eHT G7i+h5yWRm/k87H+tRdEkenZoPlgk0eSVAsNbwIDAQABAoIBAF5jIjJnD6eRaD8v 14k51ZFtT1NihyLBBs3bkO8UOG3Vpkt/4uGdVfF9VO702Q9dN/mycVTFZJaKN2l2 AyYOpWjyjCsOomq1NfEzF3lxlDwdVYVxfzwwU/rHuoHNUxOR8QwEtzuTmEe/ndg+ iSMq5oH/QP1UwJ6xLP5569dUF5cIlBfGVuyMBfynjHrtQXjUCKM5ZJ4onCnzm9f3 amS5B0bMGpWNSnOogrUIk5vlgVJoydRkEWook0yNYxx1/EFanx3rpzE2V2IxHjXQ VvKze/lCNCddGWNvgqevsBMeGUsrl9VtYQ0fvWbYdzNrKb7WtYmXE+WkBPm4PM6x Hxe7k2ECgYEA4E1gmYaHberFF4ye3tZ7Xdeke7C+g7Rc7JfoSM6MnUC1cnOklipr yhibWkqGi+mJl/mWB07uPz9c/81LZWVGLVJMI3RikGRfIb70BPXddNIaAXceMd4b cLx6rQ4aWLTijQXqu7t3kz3utgX+9wGbVtB44W9ii7TGs7xVERQs8X8CgYEA5RC8 ZI+4lMFFSsYe6pLQMauLg7d+hJjcr/sQ96TDnJCMCkcDQ9YVNepfBWbF/TZWU1er F8RCLN+HtDrXqaVREf6ESYOccF/rKCavI97QTwD2Oy1PD59Oh7gUo1djjRIPezuL 7EnYvcr9DpNMjDeKKiMocAlWSRDXpSHaAx+q/BECgYA/DaFlJws1G/URvKcAb3y4 kaEcYD/+GBqzK7TRmraukf0v0lBnIj+wzSAGzsJp3FmgjjndjhOtVeuXwSc7tq92 mBbtNI9slbqkauB/8HmzmEhVNx4W2KAQHfvCYB+J5jd1ez9UTMu9aYCMTL0yxJHd YrdIcB5ctZHR/tRO+8PykwKBgHIvVooWbp+QfFcazbyG9MtdxQ0iwimc/Z2n3Lxl 4LDCCVzyKzl8lVQsAbPymE1x8bRX5kzRo181CjOYhXrmkrQSmKUAu1H1Lob0Safq 4RIQ262CF4AlHINhCsClxlVDJH58n3JpGWb6sgy69pSK9w+sOPMoZF/Fyolhh4i5 F4XRAoGBAL9lDVciG+f7flylMPFkvKNcdgKMxeTAYdupkmYhBqg1Z1sKpLWzSLHM HgHel+wm8sDw2atJ0QCgwgDWzkSerzz4MujYEuntM7EReMbx5ArhqsGo1abAo5iT P6pdPFMUBWaw0rVgpEHdcJylb7mIQsorLmN3KaGrFs64/rGVM/4E -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 131073 seed: /N4rLtula/QIYB+3If6bXDONEO5CnqBPrlURto+/j7k= zfec: required: 1 segmentSize: 131072 total: 1 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:MDMF:sj2qlco6lwavwmcjtuxohhsn5a:nn2kte7aa2q42gq3fyy5m5ojvm5qvpyeqpvwzswwrti6ushqg2ia format: kind: ssk params: format: mdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEowIBAAKCAQEA8pEDu3IwxvOnY/TFR/sK/m0X+v6OW4nDv/P0n7KmikC1elek NPE+KZZYciD3RvYqcmT/y6av37t6sV8kYDDRBNO6sYcAlByy0qqaBJEdp/q5sGRr tmF5zfBtZX1nvbmsy8x7pGrl8p5uhfbUy6RfJZOv5s6uIHJQMkae32LfPjgyGkTb Ah5mQgIHBun7NISW43ESNu0XPLatcDezO1gAAjQQCfjsLBcyU/bnNq4zZTj202VL Jl9G+JMdPT2qkEduNUyeY3B0Ot7XXT6SXFd/IBkoeO4r/LKnxsn+QaAUPedlTBcx lk91EPLLCvH4H4ZHxgOLAp9QZFfOdcx1NWc6iwIDAQABAoIBAArGNDM8RDxiEDpZ YfXribZ5ZApLCkm4mdBJ5sC9L7aOX0E66VlMqeUw/2a6XiFxx7rjD5WdJsy6SB3e yv/Wy0H6oZ1HENiDWdIPr92qEHYopdzW6Q3l0II8Pq+2XUhJGgrHX2qTMPmQ3fnn V9Zfy7YglDydS5C3YyaIioADpUTfJf6hUqmbHEg9qmmHOOYY5rDjXxd8L2gaFMci tO52tHBjHGS42DZVY4NYN8cYIAqqgyDuXIUDmI/RCxQst/s5BjUnLvK9FAXsh9pL XQvCDRU7ENMt/FXUzR34lb9k3OL15Y7+tgDU/7njberBBZmgvejcC5fLN3P5bMMI pIgbhfECgYEA/UtYVco6NbKdYq0mI8HQEieLOeO1ecdjO/n6amxHBvtTYPTYmkus YvpEQ0+VRcqPQppMgLxZOkXFsM9MsD24wb3CNb3NtgXQyMGFwo4wP8NgR4csoLlM cVu//PgupPlAeMlicLPxcvIRnXKQmQrOYit8iWy5VZ2G6bOiqJ65JTMCgYEA9ShV UmTZjvqv83tNnA8ViZbeXyc4xJE/qrF4Saxcb5Cihyvh4vc6uBGyXZwA0euybaeC Xqbemc8+FjoSu+N8WAbaKdzy96IkgIJJn20k+ZljqGA6a1s4OSoTcIklx5PoOCaE 0zdoP5opOE7dsM9jxtvfWTCxi1ZCts/vRJBX5UkCgYAGAoqnBnRZH9LSK4+TG58n Px7zka6VpCB7pNPHQKhyxvXUgBq/lnoRoySJgFLnZAYAK48TIuTvGAa3ykNkjyJa HnmEMuu1nO+2Q7k7w4nriWQ4bkGl0p+4tNeaVf0tVuirtQOL7wkUlB/M35IEv5fk BmofDKBdIq63ztZWL+XutwKBgQCCiJI9h8MrVSGAhCPDt2hhVTpb8ddRGoGK0mnY 2HRzVtCjJmNk5PyX65xMKXdqTpQ3vJw256TYwrctQIifEDYx7JwW9DVOU0AaSMUI pSWt3NVqXqpcZTqffV7SacP66y8XTrMkf3j7fIr8F0oFDbfztzjKFZpDNY/aJQci O7UBOQKBgEFTXq2BFYBWQ4ot0TuvnVF9/8/3nAYFZEWGGu+XRcoq0DsgIwZcb+rk LB79AN27BJw6gaskoGKH3muylECFWju3CQ4yDXSLFhZhHH/NN9p8Geu7eluWDNIJ Z2wWnZzIgOK6W1QY1ho+x7+BjQDJ+T2NBK3ApbviaKA5dv05dcf9 -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 131073 seed: /N4rLtula/QIYB+3If6bXDONEO5CnqBPrlURto+/j7k= zfec: required: 1 segmentSize: 131072 total: 1 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:CHK:mz5siv27pqttsl2f4vcqmxju64:rvxhulxtufho6pdbwj7rifneb6pei5fl4fqptcce7jdkbyrjfaya:1:1:2097151 format: kind: chk params: null sample: length: 2097151 seed: uqWglk0zIPvAxqkiFARTyFE+okq4/QV3A0gEqWckgJY= zfec: required: 1 segmentSize: 131072 total: 1 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:SSK:74k3o33ahqgozn7iq5vgnnxuau:t6wznkzfsjrbudipdvh3c5xcnpyndge4amgfhajynisohhfhyp3q format: kind: ssk params: format: sdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEowIBAAKCAQEA7Pj4z8JZKl9I36j9UWEKCdhdrgsFDo6B9Ul2KcZjFTw1shEM Li187RthvkMtN/ih6MpBWXx3YabO7wyI6TWWyYvfcTkXnkzs/bGo2ludJHyPucLE +YPNTLLYvvya+iwnDBk9y9gSPVTQKwVKN8FnXi3uUHN/s61pMEF4zIA5GhqtVAPn cKvpa6fWrNay3+qiyOz7HqL5loTUI5UBlyuQedQH0vcqYRDWb0eLZAYMFM7Q6kCK wWSwZEcu8Fvl07wCqgYdmhdWIZSU6PEzZjeHR2gsUirZc0/nyvOXXMEOQxGgkbdL ifYBFVNJxbZjPBZIIk3RuCPKOsgz4jOuE9gXuQIDAQABAoIBABX/5l5BuIEF7lnQ ckJo1BBZhUqDSWMYxcxD/dofY2yoQoMXSnf+NX484yEeH9RcNIoypK7he2/JMNlI FgaYhZ9ZnFMCYUS+XYFCShIjgssAj8Z65EoWlPhzrhcxxyCskqw9d6g1LtROZTKc cuk9y98WPCA9G/Qa7qYFN5xWPXbeKQRoBaDwnNIWkNTav1wXY5aK/w5IR2iqRXjb YemajUleU4Vbn8ge9CLf6iEj9ILtGeG2N0NzmYvWUNvbxN4HxIxd3LJHfLMZyqvu yqDuhSNTh7nKP3sYxWGkgkLcfoYYnpz3syhhJeAA4L9WXlvKtv92234bBzS6TbJB +DXUAyUCgYEA95419jXoLy0BJ1i0rgr31UwAGs7w78yUw1mUyZeUbj3xxAu/Fv7R pcxmxpzMa6CSeDxHY3DWuXqOu5oXV8S8MeFI6EgBlBiLWYoGVP88Ad0iTYo2q3FI Thwx6jyMCLntcIHJi7JtYMMJSLD7ctSgqh29iDyD1QuUmz/YZbVkclsCgYEA9P6C ru+W6NxlNZN702Ha+c41cN1vo16KAhlc2Li9YI9/Q51hezJMrV2ged8yUfYxFiPG Kqtk9y5CkNmt0wNZ4oA/8d3WatZL90cf+p0G/yGWb4BVNFao2Y/pMJtVFZV/T9A+ 7zDuyPPHjaG0swiQ7NX5eravsq47v+SXFBocUnsCgYA1tlPuPHNJCHIfntZSin6H /hxntEv/OFlsppnnwMGpyDYRWJry2gOP+26v1oNhNUuQWUMDBw8M3NDpUNuPZlWM XFn8SOJOxaQ0oAQPm+3gWZ9/QmPpfIE6sFMDhG671djzdrPJYcLoImZ5JirlFcpk HF9olffi1sg9hPPj3B0V0QKBgQCGveOJ6uOIto5DZRXZMByK/0qNBHx90WT9uo1B 9HjTPpizyz7tzsA1KSU1Yff+8/QTRSGcHh+tgpfBqrbbMyCgXgDNOUDQCYRGP6vq 3aoXb5WZRW+XFYJQBcIupX+qG0qlztaOHs91Xf4Ge0Uyoidy2kwXnZoMH59k7ofY 2nNxOQKBgB38kUiRxm2LGo4pbfuGhgiVf4aDRUDT4nwlaRV8waIu2qeaWuETaf1k bayNxEOYFNraatkRVF3AGnOJUHLNwj90HugILF6uXDc6FhlgQj4R3peui4iaDYvH OPLuU0F+oJIKhUnRB0InZ2Vbaa4hCKgvEftqNPR2EkT2YkHgqAao -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 2097151 seed: uqWglk0zIPvAxqkiFARTyFE+okq4/QV3A0gEqWckgJY= zfec: required: 1 segmentSize: 131072 total: 1 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:MDMF:5jvanzz7djyl3ritv6cey66izq:ognbst5tvfuow72k2lx4zlxbjiwpz2sjbtien4kvfl3ksiybhnha format: kind: ssk params: format: mdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEowIBAAKCAQEAtJv82wDGagqVX1ivYTR7Il5iF3N5NnlBp6aMbCRRk3wcebLT VpTYwlMfo8LHn/b6trMcyP2l5vczc0EfkHYE2zy2f31SRRZ/LClZqtb1mu7lV5Nt h1qLc8jGt396bvAQtQnCM8FavIQW5MZUxpMwizWuvNoMEcjOva8PXr0umvq1wROd b9ZdTSJMxD+uROl7ksX4F7zOS4e8Q/+s6lyumiZcSF7ZSF3BifO0d1tzRR/4/jv+ EtB4zMf60b4m7DwClMKzht2dYmh3Km1rWdVgxZEKncWY9iNJ9Ohul8TpdxGrTWQp bdYdjRS7mNiryzU6hHF4gwzaAEjQ3tXytehvcQIDAQABAoIBAAlttUMRMZdNyyN8 0HCjwHCrlBv2TS2KiGKb34VlVfuvK7HJijfhMfmr3jrhC1ih/s+n3q8gpfCk46lq JmwwhRHIg+lFURSs10gmaaxxNJKS7L0bXXxTitrC1s2zvxM1WO68JJVc5dA+mXVc lznKOFGkJH87NAyy1EjzVe5ggPANMETFU847/4mNE2BYINy2eGX14AK+2ghkQYVX YFfO1OGZiOoxbEK4O4Jqd7KezCUlNGMygxveajO/mUdrRfD/DxTdo1FM69USWSTG ZV+VS3cKc+zxaL+lJVoYyVNJmPi+n9RR/+OtS8HZXqcwszGHQQbPRXWdah6mNiWK Vpf+ODsCgYEAzA0MDFWu8QVeI3yn1o4lKiZTM55gRIBxpFwVe7WCk13mQCp3XuMy Cm1WP47fMmTTGCOsNO0A5U6ZNf4J1yNbRfITCLaKkFw4mOmq9NqPz3DY1dfHKwUp bjVB32rGrS1Qs7Gtbq6P37zXUGLdrfL0qs80wqztkqJKOvv+il64x1sCgYEA4pcj yQO4mAEsz1h3RUrnV83OOooaTAd6i6Vfcdlo6VEnyl42uqbrmkzmzpRrYBrmWaTp ctDMy3j1tmRO1Dhpm+uFDjGlmIOeQcLwFI1kx/7kvqWdRPSVP5mRskF+WNiFV0ER dFptBVjLT1lRBP70rzCQgIY9loM9fdGBam0E6iMCgYB0xc6sTGimM90wz8i5J3Wr Tm107+DFsv/WAICm4DQOo8D93Y+ctMZRY0rlapzemQaZHOkTDMLjd3yEgpIdFXXJ bIRqCxT3El+tWqPkJiQAoeLlVev7+aNBF6dP9SontvQlMbw/yBQ8BTTvIvUb9BsC mTvnYNFAhjGW2dlMVHLIWwKBgBKypiFQTUs9zZTOmAj/xVdZhEsQWlsrwtEDNH0Q k7etGrt4SsvcOlThQ6qIVNP5ZEjBcwImeL/Rm3URke+xOAXFyZUCQ8fyFH0YuPb5 M/fM8NNKl0+5XxeAdKVhAiwSse4hUG9phtWKHjzOAgGHiGlseIAik7J34fsf7q35 kQ5BAoGBAKqKtCu0OndLo1ZI3voVGiQrbn+OZ/7Hhjs/Um9UuOlVopv8rrzqRpAx LnC9rU/zEVG/TOjkno0Kj7/Af49z9mxjtRzxP2J/yMujOPv6Eer30z5NjtLNyJ2u QYIiihHEUULkdZ88FIHfx5YImHX4E9lEeFG13JaJnMYbmvViXtyk -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 2097151 seed: uqWglk0zIPvAxqkiFARTyFE+okq4/QV3A0gEqWckgJY= zfec: required: 1 segmentSize: 131072 total: 1 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:CHK:shbt5viqjzuewblgt6qeijry6a:je3omw53tmmluz6fvqupx4uh4jaejc3fcvfjt56rfzokzhgdczvq:1:1:2097153 format: kind: chk params: null sample: length: 2097153 seed: BTBX/ampNfLU+ox7xipBGiaSbgC0kcB8Gy7BkJB4oKI= zfec: required: 1 segmentSize: 131072 total: 1 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:SSK:kibmgpkvunbi3wmq65ohjuxfoa:y7gfq6oydjcaeui74rjoupf5xst7hpyjuqyvcop6yhk7xxmpnaza format: kind: ssk params: format: sdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEogIBAAKCAQEAsbpi7SI2V3v47iSl/b84TmUZKH6fJIaznI6bDCRiJce+rOCA LATvooZutPlbkI1QI1RxCmiZ0CPjemUpXwUIuLclBUIfZDBXnjZeNM+W5a2C5Yt4 CzAHQl/Hhr7Xtra5csf69dJkSOwVV63TpoN7r5TrCoLrReKlgqHp5iYndaHIsP2S lfUaX92Z74xtXEKxi/ZaOZHsMh5DpxtL6YSZjHkXXctmrwmwcAvs5Rc/qppOydqh H85l5H+PlSAJjP75roQXD8T1xlIFNFb5TUy9+fP9/NMqUmoES9eV+cCjj9Sk3S/S WhSSFXoG1DOBFpR6iQtuJD9qRjL2JkDD3PjiDQIDAQABAoIBABMkz7+Zwg851hY2 wd97f7HoD2Xuf69kSAgKz1YnPCA0LAx8kSnMrVBNGTMqset34UQw9g0oN7s1Bm16 ZJKs3OPirFzs4qs8zs9GrW6UVr1uK22U0I0p8vo6DWis+VjfxUmBE33zl+RH88OS QHxM6N+Ak7G56ORJ9ciErshg3zq7EKFjUrBMcpq7L6m+yzy1PBqZ1BcnaUI/o3WA W5pMTZFODnpo8MTWtmFQQ3ZUUnFfuBx2iwhVEP7Mu7BQJ0NG7C7GLsIBL0bNosk/ +7YbhKWyvvAiRvu46JKRIgv/Dn/iOjZqDnoM2UOBNWLbq6g4gBR/MRdgH1pM0m7C 4g8oNzkCgYEA5+bJ6Yf9HBBSROGoZxMppo92voPSZ0ykJSVRzOIu/8wFqQHlNXhw /udWfHCIqwmqcj7yhl7wYdSLsperEy/ZMnhRb2+KVNyAZBlRlWmZ0kX1lahYJ2RX 5s9+fEr+bU54ibHZy43d3+W7//NW09soMHyaLuhnGuhEy4/lRVo7xHkCgYEAxDJv yLooFWeasa9usQTEeyGQLuh5Fm3TfEXUluA+pBegTqNJ7BCegYl7tI5+btojZyXb 8m6LFdPj7OiTA4fHX8c+FgRpZkh/4HtbX+pKbKkp5IycMG7N9Fg2TOkdDfoJl4Yr NhLap4xV6A0X807fiTG4saL905AT6MOojbsznTUCgYBv+3tfIQLxrVP83Tcz5wYC 315I62EL7u+I3Heex05IyZ2mGjsz0eBGxzF1T+Y/KaC8IHd+uZO8uiVnbWP4FO/+ Nimk9SjIh94b+Dn0O5VC+/N2fF9tTkBAPcxnetNXtz/vxglVCUGuH8Lj+v7fuQG0 QEc4BZPcY3LtFaRyE/uuiQKBgG9ptNDn5ZtCGjaMyO79JhZGGPqKSTjTZSVNAkwr S2cjg4UkdPX4+gnVaMo/oMySU0hf12b0H0dl7Ci8ab+3eyCIpFkcaD4NLZDsfBcb lOffqEqBDrDyO0JmVW+XcUhelNPW/PLYAhLjPmVoChHA2G+wLJGzXTCmwKeNdEoH 5GeBAoGAUEAdLLTRoEUXSUnQ5Hk9OGgHdc44QDZjVfbN4sc08mc8HEXHcMDIwpMx LNWapDRO2uixnzkX5Z93spC23CerGRcH6co1fRqO4N8iRgJCvCZiluKamKlpzkBp HAMlqkVaySOcnAB1hkM6T/MzUu8PV55f0LkkbBbqF/hOk7003W4= -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 2097153 seed: BTBX/ampNfLU+ox7xipBGiaSbgC0kcB8Gy7BkJB4oKI= zfec: required: 1 segmentSize: 131072 total: 1 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:MDMF:7itcabaosxi4au4pdhtyjepqnu:emhaqs7urtgtsastqh3kzforde3y4kqm4j4yjhmbnoox4iqqrrqa format: kind: ssk params: format: mdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEpAIBAAKCAQEAv8HiUgJ/Iyw7fYq2drT3Ct0M5XCZPK+5tprVC9wP3jKcwo+3 gPQ/lOFvyJah8tP3Qdkth978KRU1Y+L7cG5wWFcH3r3KoNB0tUMsI0HaLRjbpTyi mU3MuLWlXJOtOw53BOpwFcFWLdXZhODNm8IxEvPKTZYeJstzhUmew5AujZkpnEEg S2gF9JFqwB2LGae5NmS1EbKXPHVSAUvGhuMBvWnXD1+LNKhLO78VFE2C8rj+zCNu khbF8XCsf0TN7A1rxuQ0xXlUDuAiiS7Hzx1doncQuGSCApKxBzAEKXvprl0UgCcg sRmVzYbPLWoFVkbn8dJgwwyzDXK/I15/aIioNQIDAQABAoIBABDpdKBu/++GMyj7 VuRZSYB3xm9l4t3rUaG9PhTxr6SVKiYurqx83i6vQ0CZqGbWMvRnxxA4plypNjA9 EJf15YqlAliuvHQ6blCeQAJMCIX5r0V/d4e1yNxxiMgFbj3LJMwWMRR6HLOmLKz/ dqLKGbHmNm1pU/dv8hxLRelRigmK1VJtvyplgYvJ64vaEz4Z9Gh1zFIHpdeiXSKH 3RRCCXVwUhwr8VikpsSjPeSfRj4BjyqlZr8ibEkvhr89hsNUxjrrZZ/stZX5x3Ea 8QqRUrfegEn0a+hq93JXN3Gn7psmrL0Xy+0/IrW619MjgCP/s9U/VIAEhVcc29B1 MQj1OQECgYEA86wnP5rqlw2z8rEoXwHKPkrk4gVQpQ0zVwBJQUE+myTHMaaMuArU 8zpcoG1TROJ5FJVERPQLvVttAOxa2iK04pAwtaCiCmN3CPqwv9j5DaLVcZlNqpe4 msFKDCahqgPq41/+yakb9RG7l8TZn1ZkfgRI1wIncphtlxCTPrUXCSUCgYEAyXVe XnfyAj5oN34k3nz41Ee0DYUIIVd26wUKVUFt75DwQ2qucHU1Vz1ajGXoON55y45d avd1W1aZMs24Qg3to0UoC6n2wX1gWyf8m3ibIbTQkArcePPZNQqbqJqICJvcq+jb 8A2zk2h1Zp+DrLWpBUHnW12jpDdFKOaiyyWdHdECgYEAsJCN4Ajg85N6UOEN38ns QjcCosQ3K2HlUaVjb2VXeBOuQsvsK2+t3pDrjVOqgr+X/NIsJcqwtwUIdyLMskNz zresk+9RezWXi2obqOgPj1HuV+I95N8LZReqECPuAMPV7+wfMwDWwT2YMODy0AJJ zwZLwYBOFTteLZhVGZselgECgYEArmRnmKeEW+TiGoecKu1MCZc4iiuK6jHow3HN jBfjrups0i9bagZMcoSuCbN93xzXmhpXS+2DLdo9K/lhc+zSte97xv0OmliKPN7U kVFKGVeI4+hDCoEsmfng3YdIEwu5bydYnOl/di+K0ZdsSOnIssBmInVg3xrpR4q/ idO5usECgYAeKvPyn3i8jPHyAZGcpw6MRpNEr7NjdrsrXE8lC/uKEHdHBONh6kLW +B3Tq4wBeUjQkRjeBCOIH0fivDjDGQqOKDfV6ROqJfqqsMCEc5Z2hMQWl+wNHf6c Xbqb9Vbs+foYJImZUixnhsBUFTFItBrgrjLPIHyhXW4htQ6Y/6W3Dw== -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 2097153 seed: BTBX/ampNfLU+ox7xipBGiaSbgC0kcB8Gy7BkJB4oKI= zfec: required: 1 segmentSize: 131072 total: 1 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:CHK:acgfc4hxhztews6tdk5i5dmzxu:qehbpfrrj6nw32tdhkgncqaqenvlm6sdiskvjq4zasc7g77pzqta:1:1:4194304 format: kind: chk params: null sample: length: 4194304 seed: 2NVWOXvtwcAFf0J/g0fDC+ypQbnCU7awZoCxcnhpkVM= zfec: required: 1 segmentSize: 131072 total: 1 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:SSK:fcxs6wbchl76nfcmlc4wufptya:gtizcwbv67zb5g4ezygowytwf53zlrf544vgsglfmpre24fxdzva format: kind: ssk params: format: sdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEowIBAAKCAQEA4PxKNABaQ7xlzJS4kc12HHplV34jhtKN5QBoo/RSVzB90lnj GXAa1wMFDwTaruBznTbM/QEZvMtdgCG5SaQqBZJoXjkBcm7RKeD0OWDDjOIWfD9e xnbqOjko6XBj0ACOwSwJ+sc9d39Tnd8v0OOSKaI6wPrmXMzIxwqEBUBi8iJsWwjn Kn94dZBHzRZMuNr/FSsXN6t3KEhClEQAG2JbYrmbg4efC2pvQArMi+xw/0MH0CGd qDBALfB/kU93PN2ZEb+D7iGic/3pphLwmlDVGI8ZcS8bTcDnNj73TPabshEpRJTQ kQmy+ErVHyJFndHXQ2XdfNNbHptiEx5WmFRJ4wIDAQABAoIBACQTix8r3/gqRBVE 0xRyIpqDbS+qkYN4zM79PJ+ZuasIeAyHDwQ7toS8E7oU+FoAB29HY8xoD5qh7jQc dEEg5VTFEB5CZtR/fOO0Z4UHL/mDIWw6nyBqM2SIWOKXJod/0g7wrbL8SC4as9ZF /RKyWHQmSDnnTDwc4aRlBRwbIc1F4bWnbmdoM8M4jbw4ORs9Vj+CzGqkNDKaFe+M CYmyZFrc6d3N1bpKR6HG4zS4nwwIwHJxdwV3A8Nd8oABZ4ZI21FlL49uicdQR2WH iZhYfCjqayZWDIKQNxphaXMRuZX5lQPxlJY/3g/e6kEIPUc5R0QLw+KeEvU6gCQW gV3q0rECgYEA4mxMFUbDghRr5rOCjTt+kC0WDsJtLWHMLb1o7Gk1eCz8puhRBhAq 5vxAAPtx722HAoJ7/VkNpawsZ8+O+njjwt13UqPfM/04cz9LAsX7Nl7Iz13HX0z/ eI1mjqvZ+I+TPUbhv/aU103BXpGfUzDFAJOgfF3Tr9OFOgD9y9k41nMCgYEA/l/r w9/D06OmkAo4CP17cs3LzJTZG1x4HSF7Cri52BEhRj69ePkKEB8oWhhCg+RUVb6y qPBBHhnVyB+n2qmRacL2C8zvScbSjRb9adbVYNk1t3CfsKb9bIinEyYhyBnV9Osi bIPz3KKArI6g0UjTJCDz+G5J93geLJ64Tf9actECgYEAl9FzpmR/XO4id1rv85Dr yPJiMt1M5TwI8rZo7vOQZZcMhUGKal1W1vBWXhI7EAZJm3YweuxGSUrLr4OtY+bB GPz0MBYu6CYmvqe2vRJQ4eDmFpzTvOPc/FEbbhhum8pxOIoZfmRw4niBas6LnPU7 cqqJ1jn3YZKbZwwZIKDzCl8CgYANJXHu1pKtTmjeStjohgkqPr6InSy3QEko4EEW pcNdCk2Y7scmCGDfwmx4c4aEgHlnUQq0tm33G0i0Jgarw1WzjJLguz+mpXLePdDs 9mBit/cRuu1V2NuCD3HrCG54g/VNyhUmXI9u2Ksjv99J0aSv2lAhh7mk431TuwQ1 a7wF8QKBgEKYyNDgiDmOdUaKLnwZYi0UfAHZ9m4MzgaereIScyHNxl1FiRAkkte2 X/Crl2MX4eyLxjFiafvb34l7wAsw3I1G39WvNYDS9tI98iryizB/4c3YDWf7NeJZ C5iTznHcOf8+8bGuKjjEVUp/RZ1/v4L1huCqLYCKak1jFNoJyGxr -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 4194304 seed: 2NVWOXvtwcAFf0J/g0fDC+ypQbnCU7awZoCxcnhpkVM= zfec: required: 1 segmentSize: 131072 total: 1 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:MDMF:srwpg74natztaqqh2rm4p3skji:dvxhwilixw57pkcto5aviohrba7gbwyscnlsdvjaqhgvmcy2jezq format: kind: ssk params: format: mdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEpAIBAAKCAQEAr0Vb2oFiCV5paQeKQD9USqx9ZKHux2CWCjzUTOCGpgr/uXLk 67/XL0H+69Z3CNBk9mNRpOR4bDWlps9QL6axSz9Of8E6PSO3YYEKktyzSsMhQn4M AaJvN4RBgmOGcN1idUvslv7wbNtlvMfvqEUCSaylrgErcNsudn326/kkE2ARWfJ3 ulvZ6h8QA0iDKgnQsuOq0k4jQT+4XLe6RzXIubNZJM5AwBCToCZFUKQ9wuqKtbGk hj55NgvsKUj/EbBEaxDzjVoP+hdSAR6XoCXkCKHzAzdJNyLZgUXy5CajeOHFDsm+ gQ2ZjRhKVtAz75QWNy7eas/Na+I3zsJMzfVbIQIDAQABAoIBAAFdkQ03KqryhJqN dRGyPJyhUo/FEDMW86Ghntt4zgEUqbCJmTLPVDnuxxw8wpbREtGgQqD+KQRaIvp/ 99ALOdXhfiHSK6Xmyuq0TT9e1KRtjUCInVzU5bjBFnE8Mm5bgdpylzsHl5rC1ycn nUcfvy27mIXYxfxzhLLmdn+Y+bjkkyXJBqfG87C31Lzyc+08kYZQh7/Rx8wuXiqJ 31CbD/fzAiKWKWeKopRs9nmvls6C2xMdKqM/r1XI3Dyy4mDn9xzRg5/uSAZYOmeo vw1wvTYkPQHYdPZJ3pxQ7GMhY04Xc4sNtxsBlIA1I7MANw0jNVEZd2yMrt4luxG6 gFwUkL0CgYEA52BooMM5xybo0uKSNa85RYWGOepZQxPV3hz4dczIpuiPeekQiPD3 etWw7+nL9aygm7HQyc/QxpcfUGCPZoiwRbaL+94wQ7PMleOtC0ixxeLWQOS/UQ8X QklSaBEiUayDiZJGknQdjWlbhLyGGfzIU6pDh5TiS3fJuV0lA8E36PUCgYEAwexq UugfChyoM3ZFIs8CIjH5rLrmfFZG9RQQu5xKKjWYpyfe+y94TLIepKv2U6b7oeZ3 0BnAF5UB6Bc2/MrX9UmI9yB7a8cloeP1eVzH5g1WISJfVKon+Do7eRTlGUb0Pykn C8Rk6LB5LFEWjsMTZc6AMpo1f4gUtV27fdJS/f0CgYEAlBmMvyJXMFeCfcHS7pP7 J7nhAd80RZBDu8l1bAmpgdSoSdNZ5x2+exyfBeHz0IwvvZji2Nqxevwuagd0op/p nKXNEmnVIPDMikDSeb+NMuoQVDdXEm6DZ8WA/uXAvuCazYsYqxOx+tsuXldByw6X t53rXbR56O6C66hoUe/ydqUCgYAFbUJEc656r/adChBBOx3KKy/bf5d3n0p5DUiy l1sT91AATYNV8CwjqVBmN1G7YY7lJvfvYOkZP9g/0HZ/eIW2nYoxsD0D9Ry+fQyf itMlQvZIExgr3F8l+Ss05jrLDEtFgTdQgvx37ohVjydcc2UVkkPQJrScjwhVUvwu NzaPWQKBgQCr1nEFMNY1EV1cucmhZlp5QOUhybUH0fqPfRjRp3sFuyu3BUOeTmum lkKoA+Rs6Yrs2U3OsEQxOl7qV5Ab350wiUhzqOZsCVEK4UkAWweeY2IlsyBybbg7 lSCo+6eJWyStRracQofwUwsXklCZlBcLAL3HVyH2R/9UuplVU/Wmvg== -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 4194304 seed: 2NVWOXvtwcAFf0J/g0fDC+ypQbnCU7awZoCxcnhpkVM= zfec: required: 1 segmentSize: 131072 total: 1 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:CHK:n2642ofjfr5qljptdwjbwjz6fq:uwtjrewrev42yvez7br2574xu5vleallwnyddjaxqquvouv3dyra:1:1:8388607 format: kind: chk params: null sample: length: 8388607 seed: w6uP8Tcg6K2QR905Rms8iXTlksL6OD1KOWBxTK7wxPI= zfec: required: 1 segmentSize: 131072 total: 1 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:SSK:m36gxdp2beqzfnymsb7q4lgmcu:2gqyjo4z7f6z3kqbbtfasnmfxdledtkxr5vs6hrksaodejunrdra format: kind: ssk params: format: sdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEowIBAAKCAQEArJF3xtkkEXvoJbJy0WIuatA3Ou1SLV815+BAtobFPBhFO4IM f+wF+3TLsLgo1kvWz3OBYhiElx8W+ZBifi0iGQfAXQJNOerRhGFgdBONN5cXCR28 QUvvmmM8gB1BWfJRcoMgRCsGrfEJE7r4OQ1E1iPwjOr36cNvssiIT8LDeb2yQ7hq gzN6wDpFxt8aXR/weo0vjcw86A1MKby1zn36ufgiQAj42f/8F4bXHh8wGs7IBKBz 8ZkgwCaI8UbksX8YCQD/i8MCJCrlQMaIefxlhR/r1CVB0hWII9jwirbT7vviaI3g 7p6/s5Xgf3eB08ZTvfD8Kizenoh58DoZ6eQ8ewIDAQABAoIBAAE7ImsZNnAnfZ7z OG108VbSuGojsj/fm39VcrC6omKM28WSZmttMBe8nA1dKvFoZhZhwQ0FlsCLOLs8 A+/Ze5JJI16mew2MNsVmem0pjIrWeZQXkbW3iHSF+7MAQmyVVW5nMA48blZET2fE ICwsA48xf3BJ5s3UlpHka1CCV6mhJUoDhbQjnIOy80sOVExDlPuyh1OBXp0hsNvz 9HNykDeT1uioE/E3HEQ2Nsmo5YNXoQD1oBnk2aOZv9loFFGGVXLJXrYo9fJ/OmcW 3IVz+xIIaVomeEkM6CH/3yp3J8n3Zlv9lYVthpoR3PGdU58wFPD/77BUthLakPMl cNRMg8ECgYEA1zz4Pc9ldZepCZy3ezemUv48p589BX5ehbbpqd+csraacBLq+RGu IlnVfmqrXD2XYnFzWHxExUXuwWYJEDXQzrOxsgsHXuQNv1nEipSZhvpBvH+etoFC tROEmaa+9MA5DJe58OHW8U48oXQac72bNvB7jF0bF9Z3xGphaaEARLsCgYEAzT/N vtJMKAjTf4PknxhmCk3DPSqwAb988v+oYw3Pqwzq+45Sg5eOGImum9mu34Gwxecp PawdlHRBY8vli1KHJfKRkATh6Rd+8s1maiW6Mpcy2d8qCUBKX5Kne6sdyk6xIkPa gJutO1CPLUKFuKdxaFNpj2pIlo7D4TbHc+sQS0ECgYB7x3NXUIMfmiU7AuY9tSYw ikblet0D4MWJDkTYTWF3IS41j5uTuwgydwkhF0UO2djKY0YbN/PwoyQIEp7ZtKkt hgeFxXPqrSn+xigSLh0Qk7DkL1xdxn5PVjcmic89P6JPTJ5BGg+bXAvgKb3gm8S8 VpYmhZDEJ7FewnLc5RsbawKBgHaMXKzT0HLrLiWfq4QM1psq8RK6PjC0RlogOkUE LCdS7cJgIN6qwcMAex6/a5bi1JRqANMDP46IW2Bl225OO4s6gMLbXxR/oq5g3r9+ jP49gHyAvknbnVl4Xk46tpksPHlbEbBounThAeGVY6EU7ZbhXr4cGFMFoLPLLQaS BbZBAoGBAKKMG+MRnM9d8AlvTWeJF6nzY7H3PXQHFgs1ISINqE5v1MUJ7o+i/OWl rW6q9BKhgdxoQKEmaOSvw/ecS3bxoDyAieXBkTjbcfvETGnUv6JFCmu/tolHCX6M PHUyyYABqJHohWyLm2AvQTKcSt5zK2GCFXtZ9fMZQlHZUAmiNRpm -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 8388607 seed: w6uP8Tcg6K2QR905Rms8iXTlksL6OD1KOWBxTK7wxPI= zfec: required: 1 segmentSize: 131072 total: 1 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:MDMF:mo34r2meiiy46vq7uatrgadt5u:5zscxdgegy6z73ij5zqwq43mlqmko576fxeyhuqc5rvauq5md43q format: kind: ssk params: format: mdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEowIBAAKCAQEAzD877AvjU9kwf7WSCxcTCtMR7gc2Q2F6GL4y0/YnnGOiq72u Zf9mVTfdfn8TcQT5h2dOYWyypEAxnIK3To/myFJ+1/34c0ddPBTHmexUpr2czzx8 wioJdG9upkW4tabxQU2xfQAYcs8sEVmZMcTILfGdN0vcSSKKpLx953rX09jxuaSc sbDOwqKvt3wBGDcRZRuJuoCjEBsKKQMhWDSzzuZBwF1xn/oqr2NagJ8s7oTpj+1c srfTXwx4oXdZmeip5xMeI/nZzuIYNlSd7q6NSAK6W0CRpiK0glK0ixOR849YmCGc 8Cnn2AAw4ABmLOXeBHy/mUHO2Kdfn+QrLBbymQIDAQABAoIBAASoa4Qq/ZHyXyKx IBsXJAodkqgwQL/2zOCcE8+AKq/sbhpcZB9eMfX/D718Wz6QVmV3XmhRNjNnOwpp SRIRAjpuy9w+pegbyEf1ZwY/B2uEYaYF6IggCnZTOpLZqQhMKJkskv/kB5VfXU9U UQP7dh+QG0hWd01cAjfsDDNXepe8Qs391Tzxf4/qlHoZrpYt+B1xItk254YrrPjZ kjcR5JTonutaPi26x6ULLS+VLeSGdFAFf22xW7eDPIX22aSsYJ7nzAWodRPGqBcN l1mgcHgpSFnP/TbWm6+FaM0eIiTad1I9ESpQdB1Gvw2uCNqa8sN2hLgDztkWH8HT +PkOSaECgYEA8PxHZENANGqVmH10WltFAbzmDMagGeLC+omaMOekdBNI0cSTlSTR HEeELWPCfRRo6En2VCYuiJGdi4vss6r5ppI6vu66Q68eMjB6+sozIA8CsarH8N9/ aDBG+Fy56/2vrK41ywS5L0qTMbw1lmRuGgta9eCtsYZvjGUyXtzgeZECgYEA2Pj5 8yurZUZBhbQLGiL6qR2Cs8Z1HQpeGlopFN7o5EH7arTy1f3CF7McYrkyefnwAzCs JcNl+ufgVtx33Bkm4yNxirU3WlzYbtC014ERvdLbUdrKspcxmn1zSwrkHbg4gpWu qLtNl1OZGmpVFxIj27YcdhXWuaMRyKV3PQZFpIkCgYAtzycbDhWkYSZyyFZX3sWt YOUyRIempA6AZavj5ATE8+2BwqZzUX5Wq9mabz5HXJvcnEKxGFj8KQITxtOGC9hN K7rzFJpfx2gsDj8ycUFqtK/Eajx7s2Caw6KaD7Zf/+dnIe6j2xAAx2JXr/lXz1uG o+X0m3MpLe8CdzIuCjq5oQKBgQC13AIMyxaO/VMgESeZEaaNpzmNG5O/8pereNSk NK5528Ay0VYU/Ov5V1w8d0QSruZ4lgxEXsIUitQjmgkwxzgr++JIQ9oQeG/EelSJ qRpIw/qmYj+xbz7ZYbsINCm9q1JaScGqlcvUQfK7DFMj0kWR+9NhOq7OzBq01dPa p24qGQKBgA0qtgklc9qaDCEhNT66KHDgRTrrBRNf36IAWD5mEfb+mpVMBYBiq1ej 1UIae8DbgJTzLNHxlBtFEpCB8TM8xtVfoW7XGWlQaIRK8Nr1VvmhmsCQVnhK4ZKd p3IMoWvoug6cZpr8Hs6c7j/u5zdwbbYA7/mDRer6LoqmA2pYYnpw -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 8388607 seed: w6uP8Tcg6K2QR905Rms8iXTlksL6OD1KOWBxTK7wxPI= zfec: required: 1 segmentSize: 131072 total: 1 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:CHK:nktoeeuydph4acj2fux4axyaj4:g3fvjwanenwsgdcn3oxnau5gtbdmzbnbevlrzrb5qe4yukuwhejq:1:1:8388609 format: kind: chk params: null sample: length: 8388609 seed: yPi3JHKKbWaEEG5eZOlM6BHJll0Z3UTdBzz4bPQ7wjg= zfec: required: 1 segmentSize: 131072 total: 1 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:SSK:bmqbu7cppakl53iwet7rwxdkc4:hc53hqy2gli66tvjvnl2imgg25zjj4lylooxmwjghaxrq46ch3ya format: kind: ssk params: format: sdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEogIBAAKCAQEAq12cz/r3Jv8sUPbOvXlldAhifcEj72eE81sugnCZnGS67pvm d8PoyNgMlFXbTKXTsxeDVcoOdUcq3q5dxhhbKwQVt9XscZJvV56zgDTG789tcmmC NqlvCnoXVXThrj7RU82qS8V5vez1ovaVo6tEnZnXHTJ/JdrLl4yfP21W6nz0YkC8 cMqSZPCYczfq78S46njEulLYaEPxV82Rl4ffm3B24l1Pl5SmGsSsVe4SOBR7WFse JpZB13p7iBzuMT2SIQdQAkIE4ILDCo+s86TSVFDRfJpbFX1nD1dntGSUmtvtIjEf 5uiw56hwZ6Xb9tiSIOLUrPHMWZLCZUYNfqRevwIDAQABAoIBABeK003BWS361XBE D+/42wa/ViSXoaO0sY+rQa764CebsRCxy0491F+vSr8gMnILwBM1Ej55dVYIUmvo QYrC8tdshr1MPuD1cKV1cIyW85OjiBI1S4XN7irezhDX7188Uw6zzQb+2LROdwqN 3M3w7ArIxURGGTCup9SopYIVt+Cb0yRM2xU0Nnuhk2CPemNwcfzulxzLOoaNT4eV wxbhJH9xiutZY7+gk5CFesouMgy5KvBCLGLw8osMgdBhX93yNQULknidIA4Ko9jQ 4+4sesH79TmQWg/z1LSV1XOKepOoZzQv9RY060LRQAAzdUV9C4iCAnKAWiw3mzPz PZ9rj/kCgYEAu8NbvINqFTuQ3iy+xLmDygNYZ5scetnZsxMX2JOGb7DNof0rH1AK YjGku2xxJHta6S7uXqVhItMf4n17eUpb01+Id+oeYgUb4BjTfBW1sThv+Wl4zKvu iQhrTl3AO0K6UrPYoTMx2pcXAfEsy49YL6zYajnPwwXDK3AQijOmBNcCgYEA6aS1 j9r42626zK6ZtPpy4OcjbuytMZceyvz6dJnUKRYm9ykdWBmVHxKJZ+a3VqihaYS0 /kECd6/HAQaPMgR280eDfhVHZouKi442Jh4GiC5TT5gGzAEuoUgP4aB1AQpd1D9t DwZ0iAuLtRQYNN8j4+FsNrcCV6xxrS1LpWwy0FkCgYB5bspYrCEipEh3+DZUoqpi LzGwp/eOWHBcSV/luNt8RrtnJYYLFUfx46tnb6Xo80KDhs+xNIIS9LotT/xYIEgs 9x8adra5rBYwI747BQtiF18LzjPLIvL9ew1zPFzDts8sB5Z2AtceSRMfNWxEJmvh QYchhEwjFAn6gNqhlu+rNwKBgAwoJ9JOYHh9t6SCyTijd2rAXBWfdvuHk3CYbSe4 AVQJ9QkTOJWm7x1ox4GCfbOinpNw9kHsfAZiPQaOotDFbrMF24+p58csJ49PXP3q vghD8M3JaUEgJp75sunYgX4GXg93JWOMwG97uk83tnK50ZI/3nOSXirVrCyImNEI qjQxAoGAM8Or1b0Ko1SQft619DRfKH6pHmGcP19ymidhcAkjfvwhJbrEkoGtkh/2 TBowy9QLaHMnKIPnkVXGNWij5RidLxUqrg8GzdYvrKC3cDOZidr8GNwIKQIaynh2 /TM4NRU628hVBPL+8v81u4EvNvNBTgxuRpn2CcI+nVO0arCHg6I= -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 8388609 seed: yPi3JHKKbWaEEG5eZOlM6BHJll0Z3UTdBzz4bPQ7wjg= zfec: required: 1 segmentSize: 131072 total: 1 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:MDMF:jg47nspwliweckiaov4ajbts6u:yegxrkzkdb4umh4rayql5rwybftrtnghh4g324cehqc333ryscyq format: kind: ssk params: format: mdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEowIBAAKCAQEAo6KN758FqGA+L30E9U8AVEDWqCu0k0dzFt4Y6Njr6GfYLN3a PeeJcuFQe4NaohPfderNWlDSZ2rRDM0Ftq/MQjHlBCkjY/PolqQDARbXrP7rmDsX zyhG0sawySrgld/sy93UZVCprF3Lc9/CVNN0iphImfpOPNtH2FsN8ZCqi2fyuL4D dxzVn7VfPOXKWx3OE6p8ME/0UpwEwAB/W8GT1CvAV43spMxcQIAmANpXKb0agIpG 18WoEI+kgyIS+evPQBC5pXRAmoeRgyPVnNZAjhFNntDvP7ZotLJ2ZLAsu+Z8xy/R TWe1xGydtj4+4m4lMT79VJH3KiGtggEzr1ETMQIDAQABAoIBAETFMW+uXntYD2p3 2VibXiEKquw8igSHt7e5mbBqUiL3WaPpjSoNH/f295MhLjsFrRlql+lIJFUwUDFY DPmtQmPjgkNQYr5EKND+lwCjL/tVm3/7/dKjM7irpmq1KXPzixpW3UfDMbvuI25M cOijgcwpmgGUb8MlyTkFc4O9b99sIRrR7c+Zf/02tPPo1gCXIyEBKfVoUBDajt5l ZceA7hIcFjlUUkdR9zQbEw3QaJQ7Ge0AZBQtqz7t1UwvhLn/+cMD7MzQi7TQpDQT wO778XL5Wm1TgZPivT8t8bvvo/hwtZdPAXOt/gvXJNqXdD2kDjJhWxXI//YSrDRi 5VPVgu0CgYEA1siNix3V46t8B/nfBcs5kiX2/pMTWqQePcWjVQpOGNz4PT6e76UY 23+urd96KuUrn3d6LdJBrsfEr4AKJGDBpETKZnUgZMsgCQ0mnZIExxXYBZ/OpeYw qG8bvB1/Yldtq0Y+14XxgC8/XJY1C1Plyl8Yh17NeCA7htuir1Vep3MCgYEAwwlI ycE+P87Q87+hmO3xHnRD2EScT6+jVtIr+hSQrbHoqPDjNNgHnSNbRzbSvkeG5IS0 l5IpwcDLSiGn2+u0AiaP96rtyk4fiYhKX3jis9vanMi4VKORuwMJuIECMET1Aw/l NgUlRlxnYqmQjOBn2ArcSS/UcrRAyfbI57jTycsCgYEAgZjRjzeZb55xYH6sy1os irrNph4od3C/rpYqT43AQdBTGOFIFWGQ9iC8zb0ige91uurklfFgII35Z8viUsDv FqdLWTcjLK5DzjJZMoqAx3+usPYUQpX6lic0nPVPf48xZT8le/YeGjJoEP2xU/xz kwB+VHAnmmwYfu7X4uOoEXECgYBz4N5IUPJFQwHO8Lp4fFbYO0fcBNfCWJ55hSHv 0awsJxoO1iCIUxoi+NDQvPf1adXxjA8oRwVcQsoF3302IxKufG7pPbtOiaAfPMTD eLVpG2UF5hPu5cg+Do4F+1BrkWzpRtZuhBwjc99RNWHW8bWBHOLI8QwOop4j4OZ5 Fs1uhQKBgCKDVJojSw3IOoDC66KAN+XQfZ+Q/n5pEElbaUzoOCTg1q+/4mX0HZ30 9AcIaiXmWAjMLmOcihMMm37yapIIplhF81Mkb6cYS3i7boFCa8Ioi/D/ryxEt4HU kOZqm/l1lQixd99wjf9i8ozjxFMop4WQdNCNzW9ujyrdBrPtKtYx -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 8388609 seed: yPi3JHKKbWaEEG5eZOlM6BHJll0Z3UTdBzz4bPQ7wjg= zfec: required: 1 segmentSize: 131072 total: 1 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:CHK:rl4bzmselnuezmapjlzssnqg2e:p7kvin2fnemochuxsmh6ot75qpbfhrscbxi5i74bhqdhzcy6i5eq:1:3:56 format: kind: chk params: null sample: length: 56 seed: YQ== zfec: required: 1 segmentSize: 131072 total: 3 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:SSK:uuwofnazzi2ehgwkbzwflclpt4:75jxdbmcilhxb75i6jakaqfne34gu3csvfhem2iqkkkzvcqnnwsa format: kind: ssk params: format: sdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEpQIBAAKCAQEAvZZfYCB6QoKGf+YkZVpLZrQw23Hhl1UboV4nXgKLzfCljmGu tBTspcr6eWSm2kmZQuYVG6jZkHePpnOhTGAtbPI1EvH855zUVccBhkgtLok9WSoZ Nq9Z0TOdgDfvtB1MKeUq02EuiwgAoAU9j0cs/uL9upZn2wlrahwXcrZcmoCstppV P/Zi1E0kQ1WvwJlQAuK7i8UsqwOSw45jNOkTlN0nnnA5UnLbPuvg0czEEu7EaqYu CAYMl/ZyX9V1U8hKw6lmtYWKpGBoR5kJowBGfYhZM9Yr9czuYxbWi7g4ZcrgDusJ uwq8TeVpAne1CYhZZOh4XNrSzehmDnkcqhYHrQIDAQABAoIBAAFj11osksjnDRZl +RFXKqNbodoSCS3jXVr/BjnduemuIICdPbsrRhrnFJQMRV7nWDzR1AjKYaH6Bm8Z fO7C68JXOkVjyc9m6nWgimXSJKapMe5z7RBmE7oBb1+vyU2gQ10xRXGcTkuNqPeD YlKpGm7Z+jNChAtqk8OI5jEcniwxzxRVo2ujm34LLahbQbw412yuohwhkK0m3kJ9 /nIaDuNhKaSqrAS+n8gflIvpFY9yf+k9xWnPjWhW9nLie86Y45Cn46Mw0OU6AHSo qoSorW0BUZZhRPIDsW1B1YINHXOREtxvqpMEeOat68Q1+UQCYJrgMQYgrpMUbKzd 5JoRZKECgYEA7/nFVS1F0a1U2ITvmF7p66WT5unNUFppm8jk8xPQXcC8SX+Tl3hc Bch502NgKIchDO0C65/EgQwY7Cou742iweagQhEDdojbBxmQdQXLzkJlyzxCh6YG b8hWzfc/TlkhgJToqvMvF9mPNkJjo3vdbqFQfee9vPUsuJcH4Y/H370CgYEAyj8/ J1nNACfxy5eNM6FPriRFFcHXdrR5PvqG1yvAAgyTZyKwlE99pWF5a7zwfFye6GDF RlwFOY5U+rP0ePG65Ho6zTlxyG4oC9mF9JuAnKEIIflAWudG7OqHJr462BrEURYZ RizLb6DrI37zlORvL1uLPr/b+okVUJuih5PIDrECgYEAhrpZ+ZozSqbfrbfktE0F U5FgWhIFfQllpVrCf14ua5RboYAIos+mCnElRHLUd6x198XRD+xg7HqYO27rbv67 09ThQHZA1Xm8Tl4h5jFc3O4WLGYmi/XAQ13crkITvq73yjLP9boWRHOWncXkHtLZ 3NSgVi+XLNERTIkumYqZkpkCgYEAk89ff4nw+jE3VR1A1EALtPDLENineRjzF+UP EUjnPlgkjpbayLnD0U+I5wWiGLG0bY7z/rUYGHV+g+9rN80rUvpF6WEXWG2xlN94 OEpB17cU84dv0j//JP1OozEaXoBJhB2LgS8Ry1anIz0QFnxRCiJ0hPrBcbwoOM5W HZIS/zECgYEAnyX2uJ5Awnef2hAQH0gXNfnHjcIy03UJOLjOFCH/z2wJEPXZKzYD HXimoiodVWyjAKu7HzAwCxfp6PcC5l1llevJFjIOLyG+MrJ1JOKi+EtV1ETZt0BS PKkxMCV+ib0PtLyC457p9q3XrtuaAuEH9uJNkv6+e9eg6eS/cvPWaFE= -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 56 seed: YQ== zfec: required: 1 segmentSize: 131072 total: 3 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:MDMF:tznn56mtthkyxbrmiv3q2tjlja:7upsseycyxoyeoals5xnhczxb3iopgugggwievjibkc5duezlb2a format: kind: ssk params: format: mdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEpAIBAAKCAQEAtbFsdwrCBiI+8Hh+s2HB6/6N2wVgjm7Nivejr7FzjmK8+UgN DVycRdnygdzoEybhzXImonLw3bJh55D3BRK9xlIsXFLDfWDPAyr9NZW9316GhB5s EGnvvM4Vr2+XW0X0fymcOKunwaNTQ00ps3fPqyepW/FLUuSrX8wJNuEOR4U4/vEo 52bmxq48oYeWwd7P0gMR2q0eHydqA1sL93S27NwMR2RaRV4s2gxTZyrrG493gJhJ g4xAsXWiieUboDccYrZRO99zOAB2vyYb32gUh0vs6xlHvSFQpX9IEftdL/Vlrbtq 4y+pSPsZiJwCwJPJQqMLU7OksDCEZzJDFV04MwIDAQABAoIBAFrFjJg+dieFVWdi 832f0beaoXkyCwatoZ+TT6IXZ9FTT/DER80MnwAgvhCV8hWbX8T5igavoNlJZLNB T7+nmMrrQ8FOEd9iDZoaEI2ERWtCOLbp0fgzTLPJS7ktaXMOlHMxMRx42aMaex8M /k/shAIQmwJVntmHZ3zBHTtfHXip5EbdX/L36TAfCBjbd0ArsFP6YP9F6vP+VWok HES6wtixAX+ff6MGmU69PYlthJYWXrhS2/7xGdY6Ezdrm0GEvn9GviQqnS+6guSA Rkq79eFG61mq+Oqd0xEe1wO5ul10xngH70NJ5xQS37mio53Ijcmgv4SH7GsIy+86 2VspYW0CgYEAxizeJnFH7IjugaXoPOUxz3nXm/Cq+ijmwz8pZu9KGQDm8Jf/WYYP 49uNeTlfEPf/qVHLkzNVKrHSqaxubwrOONf8/I6wqe+QGHO4k5elll2bI3wFFdTc tPT6tJaq5MTdySfuUrbAx4WC59m+2Z/M4N2HxTXBPvO+AAqZiRLsgx0CgYEA6rVh xCOIKLRN1fduW4C+FuKAr5umFnOiCRya5gDwD6WY1jPAaSbfQVc8INAVV1NExQiE oMAmEOrt3HuYxEoRSl/5AAXWoVN7Qk6b20jtOshEBKkqk19Ew1rXBkfzWfVq9+4c 4hrK5hRpC/MIi5tR8AlTM2eDvu3/WJR6uqCL948CgYAQaqUYgCfaI1nGqay8Zqwi qNBAncY8JOlA4VmXqlj0C0wWQDEqBF4KnSRyF1uVt0WZjCoWDpmOiN6PqbYYQsfk k5fkgBmIak0AiY2PxG82LpjsbpipP1HtN6IRFa4gd2J8CG/IsFT18kxu0m3p7z/0 nMVjg9l3Uo+5xycC4OtwzQKBgQCktyBijvEaZ9cMJzZanxJIazMWiqxXq1T3Ag0v B09yG6wT/4O0B+S8LWV0PbQMcdKcWGsDiXXtf4HorxC6CKTzxkCwJGjJFRY2pYY5 sYdTLoKVpsbLYBuY4eJvdQUyh8pHLuM0RstIBuDl0uyXVSx+wXyTYb0SvGHsH1+2 I7+2vwKBgQDBAbeBgU7reZHXa+LOK1McFVazYXCyi/CSjDyQOtaIRJpfoU2dHQfy 0SpcXb6mWC7gAf92sT4kDXJyts8jNWkvgKhifhtmd7TGSqsdNdhdc8yB+Kl8SmjP AOLOXs2hS6NMCDlmQDBlEyh5/NHhUVyN9FCEt+6l0qjE9gS3FyQAjg== -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 56 seed: YQ== zfec: required: 1 segmentSize: 131072 total: 3 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:CHK:ycvogzi6wllnq2bkx3t6zdtwju:um42l4yen7jiwdfgirvedtty3tt3xuhjiyxzqoourvughtxjar3q:1:3:1024 format: kind: chk params: null sample: length: 1024 seed: YQ== zfec: required: 1 segmentSize: 131072 total: 3 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:SSK:i5hhjthb4gsivkzvch3megrely:hkqc6eypopec6otxa63muh5olpihrsnj3ssnq7whrwvc4fssoojq format: kind: ssk params: format: sdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEowIBAAKCAQEAryhP4uLqtk4R6BJnXYLo+rwmWCcdec5O7Cg1ath3Gvt9RmMb vVVKwTdC0ZDyqx0+Dsr74oUNXA1oZDr38/3Xk1OLJmCcb78V3patv3yJxOiz+jvb wjKtvyg14rYIBoEu3/bgUPasphmEbjSmUEbBjfihNr8e0bi3OQjNNlVQgLUyI3FO 3PX8awmTkyRSubAFfRxOjecxfqj214DQB9j8F6LrsgGSna6IaNTRBI/UwtOiNOPm sczhdoctVRZOVzKlwpjickL15gwfdldPsuSLm1nkqXfoW9n0zV9V/QwyXJCdhasT mIoDN6ZPHunL2XDRN2IFjholYyW4cWj4VYVnfwIDAQABAoIBAA73K0gv6iz0Y6xH 8kP3nO9bZw1OHkMbgPvFfbbo0thf13bNnf+hy3bRwWhFca50G6rI5heXFaqhTKOP tELJFAO29iMrywHzOiugBS1gtya3WTVOqvqfOOAlz+DUe8AOhpJFNipEwUCZ2opN /k3KldwK+79BOiFiHmmFmn8DcBLnA/iI08hKJRRUOtn51kVe+MPAZkETXQthMq+X NgJ0a6kfRDV+7+aQZfhdV5raCB7nK8LZsm0NzGLvtasX944XxFH15kKXaLkKsvvQ zV7QNeiQMfshxlLfCbcgA9NVsByYdpJOfchx2WD+L/ZEpIM72wVfCIzpqkXJTPGi +Rqil70CgYEA7fgz0pOe6mGElk2TSQBjcYjGLrCGttyKtl9OCmsFgOlYadufN1eq Ahn8wQtuEkQRbf8t+0bMCZ8/XU5jlIUjO0bIDgZ4LDPOckV45e19fnqkCeEnlPnp NAaM0SewY98gsvjHuVl1r9m96pVzlGiyC4XFG3T0XLSzg7EpIIKey6UCgYEAvG3F BqXU6o+BgamohqPHDR7qY41Lt6f7XZ/o0WCkHJZq/2/JFycwmHgRU3esj91zbHcc 14TeoCsIPIZcSdqA3rwtWolLrSuUvvmDfmKCkEgAJRsHi0oayYBcHE4lW8UHWcVT FYjc4JkcK7viZXBRIMEhwLPk+DsSjX5CO3ZKDVMCgYBpt/l11IUmBRq9F0uWg+ip 2KSKu3utoz6wlJh8Al2YjpHrvVj3Yiex9U+Xh3dn//tqTZJk7mfY4nlo/1k38wna 3LAlovQiVwWhOIHkS+STmvJjPTazdW8H4N0QUjyHsem5+NHp4vdonyhDHhAR340x l0Ug1I123gRePgdSXRUkzQKBgH2DCMiC0a5kZLl/zzfQBBjjTPF+/r6Y8EDO8X/2 RZqdPyxiw6neeuo0oCXfA1zY/7dyKA4O/VPnFhdq0DKJj2nOIs+5wGTbMLt87G/V Im8E5sPQm1fWxr0N+U0JaK0WMu1DGTKw9Z/NnQwsnINBK2kL/HWl3pDSmGsTfP6q rmztAoGBAI39m3K8RjybR/46YG4+4eOOFeo81amjSR1Pf8o/SsqpRpqqCzcmGWBO hRbA04KNjgludOcMIV0LYAWqy9LKfaHKcbhdh9Q0PL+VO/fJsh/NljkusRxHxTXj /bIeG6yEqFtg/MYe+zeYUZvgGsZP7+7Swbp1ScexuyZes9H37v2D -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 1024 seed: YQ== zfec: required: 1 segmentSize: 131072 total: 3 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:MDMF:n3gsik4lgjkekhb2hnvkhiieya:35q6b6riggvqzsmyso2hldyb7hklnrmu6farokcj6d4nvmufqzja format: kind: ssk params: format: mdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEowIBAAKCAQEAhaXFN/J9blEqNROOLhzzOY/yy5wAQgNmBtULXPLgYffFop+K bfZcedR3VTGArQAz2eOm4KfP7BNSd1udZA4OEIvub98O90osuj3B9cFGE9+P4jv4 1GfynZbUnBNq3s71P1TqXSTEJ8DD1L5HFY9coRXSmh1+TiEwhaGDKsBWcMMRjizm AjeH5hzQLhY17NLjP1215Mmw4T+gXUYWKfSR40hfXOEx42uz0W2nlMHR8vcDoz8l UbV2xCBmMa8m+vl+UwK2rLhjWvkZZP2o5hpk14kHMYoLSF0U2aQPgP+7rLCYYlhN w+ClF4B9Jixb+Zl0FhLZeJ/bzXZHdY6AKPYhDwIDAQABAoIBAAOZgpWDrqov0yBe gvgDUppsd0sm9qt7K559KChcPaaV2wO1eAUx21gdXLYgNl7dKwha2oNMMcxM0Zii ZeZXKed7MAm2VdEGSpcc6Qz6pC25L0cWJhkT0FPl8fBe5i7THzA8qW/8eLC4v0Lg ecn/Ca0oXnrv1sI5oEobyWHM5WcgZ/EFWaixuz2g2O12pb23XSXeoREg3Hq6UoqH 6w6CfeRI3rXyN/E6LOywHv+gDll6rBKilBBOrkwSwoTca3QbZEf7iW7FC42mI+Qt g2T+T6MYgK81hDcu6zpMY67D6ZS5qZfIaDduZKeCabt53BTzJDxSKqeMVjCb4iqA RDzGEmECgYEAu+5FThTnzfP8K4Nwuc7lbUQuU/1Abpw+bc7vQ9QWE43sl++YIpp/ CJLBiGuhW7RQ1fjDk16M8phBIyeViLkHCkbxsUkmurlQIHq2KOGAvcr6Jxnhb5mM crsk/QBW3GS96x+z0q7ZKNM9jVSqVhtmZ0nMzZBPv1VhNKxTEMRuAS8CgYEAtg4h 8QQ96+FW+PrcUwuQ88KC2xAVBt4J7sa8sQ/QLJHSXWnQoSSy1CmpJVAVUpAB1Dw9 J5ypVcYNuu/DFkoK9Ill06wP+vSc1RhjW/ozaZgzWIIUpC+54G0DMj3Pz4BUL8tO 12RqpgZfbI8rBXwURuldtsTCC46TR/SzxkJVJiECgYBnf6DhkLfdABsH59qkKiLG W39cOCRNBnWHSikRZPNHj6kWQBi8LfP6R8CYHhZ+h77hKKClP7RGQr3U248J/kS3 Tzz6kzvmJ/rN+Gbr+s1JOUktUZ6LNLhZ02FaiN5NgJnrrMj/JdZpGnVSqacpxutN xSIqr+iLijz/okwY9uVSdwKBgEzR7dSLm61a6p4pDKsmKEYTf6/8O0MokjxlM93q 9Ea6SXANZHF60NLhuXP7NOQfzAXIXW3Hl1SQO97zqPhQygqhp4wIAL4+Vac9oT+A dg1Koe/pA9i8IszmcwDSQEwotF1uhpgw0Se5bK6cQuUPlGbPtjGXGOJTiSZFxU4V U2TBAoGBAIYIgp8UZtxpIfj+u0EJqtQGJFGgw5UgqOQLozQ+OmWxE0Z4jaCgxhj5 TW2b2rNesGN3FlaFrXPW7QXw6caZ3/C5/KWcmpwS9gESCJWP3iEbJ4q5Avh6mxAo 6KnU5ietcfrDZtjb0TqLo786IHndR3aljbqjZzWvZS7yncfIkpVg -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 1024 seed: YQ== zfec: required: 1 segmentSize: 131072 total: 3 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:CHK:3yjtuv5h2g45g2cncg6hka3euu:bqqhp6u73ldawial7rtrfomx2qkmboyvqve6ywo2jpvfbu7zptga:1:3:4096 format: kind: chk params: null sample: length: 4096 seed: Yw== zfec: required: 1 segmentSize: 131072 total: 3 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:SSK:qucnax3vhjild5ifkxf6uxl66i:26feb7whvq6cm4lxyajiwk4hv3cry5ongo7d4h4vjpgskvgmy3ma format: kind: ssk params: format: sdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEpAIBAAKCAQEAwa1PAiv93FgzQ/4f7aI2pEWAyJkqtC4cP0aE0ifKCZxZ/M8F xobPnNw1A3aWrAdOC/4iew0y56SRdejGPrrno7d8eDwwc4qV0LqWs/dGs0yb3/KN MW5tXzi/VVoo06eQX17YdyQllKqf2ebpkopDYZ23sOXOY1krUFWclGGSlmrpP8J0 arLxeDZiWnzE0jZuZqPliQmCozUzPSBbi3cSaiw+hjYsEdt1ooGQcfBfKOwCx+HW jjyjck5rss2jVogEDgfo+/HgUwkoZ/h15YvMSt0PBh0Suh7R79P+WXl7m0SvM5qr w/xSJKpi+RIcsLOkU89BW+2DiGM3ZeOghnlm+wIDAQABAoIBAFEiBt0EDL6HfEJZ bIqhz34VV5OxBkCgqFihc/aNkIdiJhhPqT23L7WoUdT3krrR/JHtjgg6ST7co8rf Dl0s8uiUbuH3ZNyiC4x6/bK6PbXSu+GevCMe/VZMcWqR8FRp94LcOpX+YHfc2kXw A5zNqthzt4W1XzYjHo/yrTtDfKLhvwEp7NciJL2cQ9DZeQAOTbASjVd80Gps1ZLT XGgUpbwm0AzfpjK/6I4qtw2XlhlSBesj3k5PPHqNB6H5ZtRPUGFKeKk4vRl3TRdt XdKWbGtawWe9He4fpGc2BMbNE9EScs9q4qZ6Xd+90bgGN3pa8O7xSqdnHtlujOEr WwAevaECgYEAyWycuheE2nU08iCxicgiEzJd9L6HLhBvyeYoZuhk/NsSElav+7Mz pzWohc8sv50lKo9C159onkK6u7dNqTSZezoDUn2GqrazsYgRP7LPB0jcF+vQUK/D E2gC2ZrTutdmaCr/Ddl78x6v5WLlKrVEvnJBP7pd/XmjuVvNQyDrQFkCgYEA9idS bitt1aqndeKAtYhquVL3xJKwqASKEbWfxRYXFX7fRqbrmCGT6fg4rs2rn9YAB9wH ZnQ2e0tZtG4liewD3Omf5nvekkvSHJFKscm9NS+XLcv3s4UJuCnDpDlmJyFRXSct UuVAYcOImpZ+Qtvy+sA1IJBeNl0Zgo4fbUWvl3MCgYA8PuI5vuMbvEbTzPeNMHEQ sNXtaDdijcQB7XdUIFpkTtn+5jLI4/alIqV/MFJAFa6SJjtl5uYRv/++Obteyr7F Xrqzp5vp36+rf/k4xjCqCx7ZgMzT9V4xpcCEeYyuq9KTgZi7+brbIuiVgZjtxz4C gIYHm6SVNhbEUDL6yxPSMQKBgQDqkfxOelpXlCGzCB7pX098vaDZFYT9CB5e0/qm APAMjvPMy0KVneHrw5yYj+wuC+vJkZcHvlUw11Rryc9CCMSBn6y+ImqudUyL5rUM iZgh9/EUNlwdGflyI3KJrB05ytlTcQMTDN52i7RAxIsbwahh5gp6trjhC4VE0ZUH N7ImGwKBgQCM1L+omSvZLd3HyvCEot+EV+bzUsoExDK6+k6a7JgPN3nH9ReHDH9I mTRuonZ9zNdeulse7E/7UXlHH0YtgC+XeMQAjiiyV29HuycYt9NCdF1xTrV1Ao3f 2Hc2JrmHgccuZP9y/xYTgXU4qD0sOrOpDD230dkkt2xFujfapl2PoA== -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 4096 seed: Yw== zfec: required: 1 segmentSize: 131072 total: 3 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:MDMF:vspwhm74zz2ffbihu37grhn72a:len3xv7moahbuptdlfzy6otoguaybmsfroeaonep7koan6mcxzfa format: kind: ssk params: format: mdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEogIBAAKCAQEAtyEYblHULHgUI3qeiIBkJGYKj8bmbIKBz9VdtYWbyp0Elvx0 cE9QcLFK25fSCACviocH7Z4yNXPd1yamDPVhFUjcI0J1sj4UfgR27iMYS6rJvHMx Z/KwSKz7TW3eDPVJbHl4Y2MstlQBhkTbzzve4XCK33E9d9hMj9OTfgG75XP9wdOl Y0tBLoQIPRi9DzVqPMg05oV75B5o/GOWCSzLCTdGXpdDsbFsQPO6o/nNDUvXwVM3 8yxEIZW1oXw3+dmbmKnZi2/RjHvRixzW9JjwW6kg04P22ehjYVRyqAATMoaTDvNq S7q1JPe6tTpBGqnhoFjQH3hHsHylhAyXMOf6pwIDAQABAoIBABjHnaqv3+n0haUX XoRR+zsBo8Q4wc1FC5O352o8ngwYmxpjJs5brSLSmrKEJKN4lEhGZUg988VP3GDU lfuC6JQcu8z5nTt1MwiqSf2HOi5i+dFKNRE/waLT5V1g1H8kYb8P4L5yGQbC70Hf 525vR4Vx2RjLFeo1loaPtpGCYo51nOJ9UKkaQZ4sKVgNPL1K/WVDnzmaVklz5R2L i2dV/4irCO8HP1ZUagV9JHVpGUT1HJDzBGaVF/3zmFa6hrOrW9KOVuhc/KdybLU9 Zhk66KM6fKVgtGL8TG49O99nhcT+jcp+xyzKq1fN6V+dmMFS4PtVXCdLMRfaV/5S NuoLhgECgYEA4zPbX1M3hXsH219AvASkDnaQKio3AY18T58XMt2+laPPEIhFDaiY J53or7IOfBMphwX2+V0LqL32b+x4cbMTnXN4tUomLVZnvJKA4+1Z3qtTf6xv5yif kZ1w2GFcG9rvqCdUnvandkQo63FLWsRKXG4zYXibXof5izSbtMaM8ucCgYEAzlcs QqTzcCujrjA3Pr7Tj7qkdJZjCPOce3k5MRZfzqpE8f72OkMwHw3DR2DXm9vP0yev OrrN2u0LhK/3fkZAhcTTkUwlH4WqMLGmIUGlUqMyLvSUEW777qzcupbpGYG6K+KF 63Ie54KpmAaly7HWRhWsM74XcY64dHlopnpzgkECgYBhWq0bcZsO1SMOuwgQCKUL lX48sw4S2j90FqVoJGAv2ps1aE6+hYl9IEq+TjuqqsNWmhWz0EzYp11bpCYQAj3b b8k/VWB6eNXGlbgo8mFZ6mvC/26LzHpjeOULstw3C1853HCEFQi4wogOKuOxJv7+ EDJwB6/7l6Q0I/y8P3/R7QKBgArb6ZdkSO95THbpUK77qfShdPAxzep1r6GL4qej rs0YhuJZcanlSU4JEmLaRN6N9eT97KnhlN3VpcqI3DSIC/M+RYgbAsUi6q7/Wmfb pZCwx/5SnsxAFAAelss3D/NosVyH4lZeviOe67/1cZpDtKwhjdt3QJKYWTq//PLt 9NBBAoGAQ/Y0z+5u7MhB3tW7o/CxZWdlFWDJ/qB/fUMMcULP3bT9Q1JaJs/Bq/SJ MhgeCsc3dR1jmZvgvxsosuvNHUvBhpksoGYtJvOERBN5DpQk36bDg1MP0pye/WYX +hVtQVrSOU5JvQ5fWu2zQLDTHLEXPaR4F1Kca8ljgFfW/Ay/T4E= -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 4096 seed: Yw== zfec: required: 1 segmentSize: 131072 total: 3 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:CHK:annnqmu72p7iels5loqwlxt2zq:s7wv3jfi2hlpcvp4dnloc4eex6vre42kwiel46achaie5n5uodqa:1:3:131071 format: kind: chk params: null sample: length: 131071 seed: LCa0a2j/xo/5m0U8HTBBNBNCLXBkg7+g+YpeiGJm564= zfec: required: 1 segmentSize: 131072 total: 3 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:SSK:isonfapi4rlbrp46a43pqvzxjq:xitths6mep4ltlbktkwgzegtoaz6efeko3d6pvjgccqtirhzzgiq format: kind: ssk params: format: sdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEpAIBAAKCAQEAttp4OhdOj4JqoNbsQ7aoF5SUk0aIkziEJK0M7DRCqtgU2c5e e60BpRd144CXkfc3uzAgEIOTt1F4MtXAWkjki5Ie90pF8L1/+SjOdPii8IA5BHis C+ydELjbQY85z2GVPvCwB4VR0aT2GsCbtDaeshZsUn+AH3BPsUUP+fXJFsIGtkpr WpC5pd/AQlrlYWj3tw4a6j/Fh2Jofez1ShDoHH1GQ2IP04ekWLza0lrzxNqQuP1n hPrGhr1+kS4sOLM9LYmYRrmIcIUSF0P75EdFbmrSKddENTk9PcUSauOYxpvdk2vt HFuWBp5i27YUxqcEDKmolqoKhbh5HDmdjslOWwIDAQABAoIBAAGOFLz/EGlNWqAe M8l/oX5R19GeJUXbPS8dVEwjRaMzoznBn4a9ueiFgo3PZ3qTok2yjb3rizNhO1HI r5IU/JyTPuArftzFP8lb/NAnLSY5G5hbeQXA8ApXCBuj9CyR+orxJmNp/CrO6ajg lR0Qjy5Eh+H2Y53g0/dV9wTRKkdJ63El2MVjlTgEKdytMWK8S2RyjbcOKGSMgvvq vgw2k9M7dx/KHVxGbqgVHefSkfUFCQne1wEPJS+zk9eLRcJEXP1MY4JgMbpMnH5B m9BVhPvCocxfjCDoU4CZW9mQmsUx/J+De3icdIOuZsJgMMBJ9uV9e58x0lStdscL gSHlccECgYEA1HPUIQ0jat2cJe+SxIwUWqpbjQQN4Gd1R7QtC6UqH4WXDDCe4wfD K3OpaU45BNm2VFNlozfTIMyXRBKM1SnBi3040TWr0TEzasWcneuCbhdP0VUWS//R L2rRpThtJeV24aXWP64XMZcorMiTSPBqq6f0OksExzSkqDrhVckD+30CgYEA3FV3 281DsFgqPhJMFJ4NNJgNbVA0mSnsHiQ10tnlioOgjRhRWGgI8Z2GSA0ZxDxyS5kX +j4U2u0+UsiHoa5tv5/CUgutg+HEH0PTvJ69f8kyLCinBD+b7FPHvLJBAzN54/dV 1mvZ7FWfdgYlCNxWcSQ6bpjDLy7gAkEP7lNJKLcCgYEAwbB8FEnrIVG7O2bIswJW yDYKU2z/zbfk16Nvce95kNV1WTq2kJsSF3pSWFxlZYOrVAPYZM7PYFbGDdyvouN3 vdlDRJEe/RBTJSPWXq9I8V+1eE6PjmhC6W0EhxIDiIpEMQLFarcoFCEQhz4x8Uym o6ry4XZle8wF1g4gQ9qJE+0CgYEAjfdX8j0w9wfnt/TsJoCr+45ZYGzEZ0fWxpkI QSJ6vyQOp7radv8ZfCzGX9hpGMLl1gX/qBKmN2WTuZ1RnwCX4FdcyqaRl585UffP DwKtERAfDsrmylr96WkWEmQ8dYaObC9qlG0LjjahN1fANxRZci8ooyg040rttSYc 0K/DPMMCgYB0RI5t8wbyIo1H82Wl/cht43SVXT37rP1lkz7kYISaCfU251jiHWON BWECvFup+k5IL4Kx988CxZt7zJcAbgx8CI5ShvEJ1Pn5sl4flwjoblzxY/mpyNYB efYc8zrCkd6ILhCrKtbxijt1C6ETh8ufOjoLKWFISLkizKwi0kYILQ== -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 131071 seed: LCa0a2j/xo/5m0U8HTBBNBNCLXBkg7+g+YpeiGJm564= zfec: required: 1 segmentSize: 131072 total: 3 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:MDMF:77ev4xscd75db247gnsymtav34:ssizcjz5yooirldyp5n67sq4ec3h7adkjzb5i7nf2idlgum6ve2q format: kind: ssk params: format: mdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEogIBAAKCAQEAooIJdI38ekWIkn6i+Xn5Aht38L967mmHX7N5/PZdYJl33Oob ML4xSK61X7MVlvnDnMxGbIQ2fFdbBnpVAMEb4z4poKB91pYF8HyoogV9XP+mJbCl mPzYpbEqcPt+ROyIOgSshE9c6ID6hdG489oqDkOoavTvnEcfEmCF89Cm5PsEE4DJ nL5V1CVZQfPBmPcS01KaDvNE065CjRyomx8dav73zpVguKmGLpxba3d2GnlTYI7z MNaxZcGl/seDX7KiGeI2kUC8pWb3ezyvPHNbwJOAPNKPkw1oVkEZICwnv4znEnXJ ZlH9qqmPIWmn+3PgWRqovpYhEEWh2YOTZaXtYwIDAQABAoIBAAy5g/CwIYHtfz6B U1Ts5muc0lfJQTX5O2lp+KEEetan4wkq/PVkdHV4K1Q9W0ZsHj4L3MPTLR+wCLz0 HUMm29PVDUC6RcA6FRL59TZimai2N+tMRAXmXoxy1/Dq2xNIi3jc3tP+vDapyslp skLIkBx30xs051ePAMbZRK6NEFRuQexOYnUXXJPqPb5pkTYxaYmgER21L+CfalYc sqnMhE+Es7YPueHv+fVr/cw8Sdauyzx4PkH1K9iummDGLIA772jerX5SA2oBXwTQ gV89YMdeVCyOa4LpD64OSG+NKda/H5ZcBi1lmJMDqyegTOALpTB1NC9bM20PpHZb ILkMQKECgYEA3xvNtJybLTLZ1bTVcSVhuuftlnBYqVLHue3DZpDH/6L6wSgUbtmH tWpPTzOKEvL1Yc2IFhnl4j9/d2TFs91Z/MOZB7XFsoOySe3kq+8X8r7XlsqCEdbo /yFxN/espG+GDdT5/k3UwGuAZek0811po1+EXkkjqKK0Ayhrtaa1z/MCgYEAunck zSg4wmYwT0EgQw+HuKWwmtYUO/9QgsruVFtuCr1XFHCwEtR65qnln1aratJYMoeg +eFw7VJE47hTuhIp1kTFe0zHy2xyOme8kw3Uu5kRMMGnaAjWveQMRHC0H6CpXJEY hqnz0gs3jZe/rjKxISZdibyQ2G3yp+u66EveONECgYAlvl9jcaby04p0k94UAR3y b7AK5kCpjH6LXsTSwiWDgr/nE2+5fQVvVGfMX0y5fe6zOAEQtBrm1pUqzpp/ni/O Cg0Gd+LVG2B7D0rDJ3SmtVBliybL+8548uBjdnv3aYKFLoWIVwRp9QXIt9YkYaYw ZEVRahAlRDkt25W1KlnRrwKBgEIq9Uw38axKRupY2fHyGN9VLI5FWXjQ6OkAygH1 T99PwQ7nzhNggxxHPcyFHN8TNWj46A7ECSauAvQr/MoSl/YJAWr3nA57tS00kp48 Ujkf4BHFJnqsaEeKHTBMLh7rDC5k7qcauALZKCV+q+5M3r48twShfWTP30PnSrQT +A+BAoGAcLzVVavz+whB7FuH3wcbfofHW0pI09PB88qHHbm99voYwi1XmiNl0r2A a54LNTlifPhPfI1OIIV83XvyRitgl3bCdww9CyqD35mXSEKhjp2UaKYbz73gxddF tkA/rbjwYc0rvnZumDEgeMpSBvxHKNiMCHuwvHbkeDZu2hCBe8s= -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 131071 seed: LCa0a2j/xo/5m0U8HTBBNBNCLXBkg7+g+YpeiGJm564= zfec: required: 1 segmentSize: 131072 total: 3 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:CHK:rhkln7unkktot72mit5dmuqbdy:ilo6u6hugipdimyrzrvlam47xsmp3ur2lwnrtbecmvocb2664zxq:1:3:131073 format: kind: chk params: null sample: length: 131073 seed: /N4rLtula/QIYB+3If6bXDONEO5CnqBPrlURto+/j7k= zfec: required: 1 segmentSize: 131072 total: 3 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:SSK:6lnma7gxkxs4gz5wuitsqs7loy:uugb5btz6wxdrtmtqf5uc4i7vnukhj4wwezi7ygwlkguee6ponka format: kind: ssk params: format: sdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEpAIBAAKCAQEAvsUlCU9RU0cOaxNW1B8P+ADl+e++2dVV4YDfwTQ/tfAV4EeV ajvOZ2qRveGeXPFoFiRs4f1jA78AhHDu9YXUlq0ID99yCZUGAa4S9ea6eSYybScr 6ZgvAxJo+FAkR6DKvrjGEK9ioav9NitwVlJMhmyrQbLQz2kHu9TYa+WGdxP+0R9L MGgpjkl5JClHMK9YQFWbC8OyJqtua7fj0nvE40yYxw2v78wyad+dxpAieoROu6ys J/FLcYcX5/ijp+ipEPW1izlWNtGsKLfZkulO1OHaTllztLcY9RKcS0sIZAEGWEIk aU41rUVAgnjz6yNu/NojE9Hb3Yogywb7fHpvuQIDAQABAoIBABw+hgA93RWOVKUn xNg9DRkz0NjTVRddTYzIjthFCxW9yQ9eqdGDr0iCb7eEOvUZzMZSeEhxQoKgecwi CE8TlGkGj0YCWBjxFmWTRz1e/sbHD/o3LXo1TOiQhjgVbXmpZbsdEj4QKXM0SpYF kmlFYA0a87QTbHT44Os/VReMcP8aFlGhs48JafEMxAeHRk47deugSML/DS2OlywL 4FNrAwA6AZZrgE9+8qXHoR6J46k8tvRI04Cx5ADmbcgoOrLBnfheUVturJOZGxKx fEftj7TqTtHY+YtMawSVEd4tC6V2XP+vqIVduYw4q0gy/gPWKquKC8NE0BgpgXui suZvRC0CgYEA2KwFvZ39x7+ALjuqWd//jnfA/RWGh01632PCatgrlmr+XMHQQLZl xrpN4bQCur9yOtF6jkF70npK5n/LnbtjWZ6CQK69jXnShUvdkgZJxk0iIDT1o+5R 3r8hi0qWxolhhyLgpIoKuCIU8+KHRnRud8Pw+oW2aMWuDPYVSr12sNcCgYEA4WWN 7Q+h/86Oe0L1Bjv7UgaunvvCDpNVAGk1IFj3+FnVFf2PXNW+ECsXBviXNvGzfNDm UoWRZPI1+ZBU1vzlidxY3ns7l7Xa6NBVBEKfEWwSeQ881S0pF0Cf7MxIP1xMUts8 tx5koWaCf0+my5kouTqHFSelAQmfgpYetowJge8CgYEAz5+HoLvUg6Qt4B+sjZLo AE0g0WPfFahZJdciZd/fZLQCKkBOnrQpstSz7KPiObFadKJnHgoB7R7ixx2OsAbw nOAXUIQhf4BNCw43s8Xyy+L94H7fI8crDJd6PU+sS3M50ZTKTuE4hFmkWk+n8QuB D6LjOC0JLjy/HAxzOrtzEOECgYEA18YTl1T23d/M0L1pub0kPAM/md0jijaLEiil fkENqge9kR665sGMAQhvM/I5OJUsIZoOVAOgC8Y/25jLT0CtMUvrG5lXlEW4ulXu fXSVuOT/zjrDHsTr6GGqd9OcemOOgWd1+Uu0RDrRRLVo1NHbhW89MAhS0up2dFno LxNiaqsCgYAFitD9jsQuSigjltdWGBnaQA/cyS9Y5VzU5y59B4nXW4JOqfnLmMps o/WGAO7CKBvnz+7UOCOd/1ZdsA1ny70ZbLsgaf0Ku3+ehS5XNy/ow0LBFqBEw0aT LYdUwudveoV5ZN1SOOR19Zzc9+30WHRP6Nd/UtfX2m+EWF/A51AuRA== -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 131073 seed: /N4rLtula/QIYB+3If6bXDONEO5CnqBPrlURto+/j7k= zfec: required: 1 segmentSize: 131072 total: 3 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:MDMF:l3xpgqv43m3df7pr462evj2mei:xeixqrar643gyxwyyz4ccxjte4oznzm2ka347klrwr6kwaktzjgq format: kind: ssk params: format: mdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEpAIBAAKCAQEA1HnRxcfB+NKQ+Kw0dgtWgRUXgHcmKmQ0cu/44QlykP0BrFJ0 B+jmpHwDROh8jG8tmVyW1caWlAZpdIUvqHibCB6CJhQ4ymKGkC3BBTlAzUSUTxEf 8vlx3UbPvvMZVjZrfTxfkcdScEybizqoQVcrxeA9iU8Ud4ef/dn6xT/rgtoByDQi E9PWRwByAqnF6fwsuKzQOzMDB6vMPPjMgo0lqJCgXHXZ2XY7ApHmQ547pucm6GkL bs+Ox2eGDeizTbmy+JgfqriJC0pb6cMJC65gBrmG3qL4P32EHfQFkZy8vazMYbNu 7pPM2CozaJ6XHGCOQFNxACFLep2V8DiNb+1LAwIDAQABAoIBAGG01zMlBdzfL406 2zEBS7k2MsV/hQxvYfMMyRzq1EU6I1/T1smgXxd6c6JnaobFxWlFu5L4rFvLiwjr ChxlwZz9MopCOE9Q+WIpuB4n0tXR2IV3cYKxFJxVqMi9T4RmqA1CCwylZRKBF/Hx elf5twZadNHEjLveoUMBzyCPoURdF3jZuCjAJCX54NrD+x53hK8gKv4OOjp/k/DN inW1wk3p4ZEzy0/A7S5EsP+BlwRkkT2yvdEaqCsuOeOEwIuz6ZCK/EZp57hsEBi+ mbqh4Gdvj/MxOiWOa4MwThw9eiYmSGwBpqwwdK6qGUNj8LrpTREYAIVw9lwrN0cz 8iQgYTkCgYEA7L4Rb1Iq9QcDDnMBWFUDdVASILfePCFwbmPSXeq4I4d3V/hRlYdx 4nNxva+Qkl+xZZovOlcFJyLu3xG4YYqALuVDnpjVXIKbHXrsJyYRaPTNc9iLjz3V cGQJVT3/uIQRkjjvws37FiumntYwQAkhEVqVfemE9an2D2sW3zrSW/UCgYEA5cJs SVF4P96iNb6gKfHPvMwTmrmXqRpSc3avkM6WBWthx5RJIPp6jEw06rpXfclILKPt a8V3TOlZpYjjC0M1QgShPtN3zUPN3L9T5qs7x1ysppn/NNOLGgHMkrXDRcoAW7xW 4rBqobvbY5jIgAut2WH1A7MrYo5F7YnNOY9N6BcCgYAzjx168hk67gEDZ5aWZ0Vu ija4e3LiA6JZ2FGbdKAP1NPwC7uw5iOuXtgZqJ/C0SZwa0j32rXblScS2+gdDi3m iLXWV5C4KhWgMQI3cHoAMriAD1wtoRjX9mF1+B+2TsUI5G+LLJMPfAg4tYsilxpl jiXamz4CxrY5G2iKy2O+9QKBgQCnQvjOD085P/xan+G9Z1pSGUcUVpP/+TeY8wgw pRQ18cyHHH54UaCxTjEfyHQ1EDlItjX7RQ/qn94xUgvngQ/edbxlHlGSzw+o6mhL /tBP/Dl8N5PAg1g6oKCrFUOJJNtJ7TxbXw7hmv7F7M3Z2abAID3caazl3Kkvmigb BSY0FQKBgQCAM7Q1W9SjcZ3b5M8f/POYzgb3N5QU9RzvOJbXcs1jEB5LDdOkKorL mx/6lLXrKN+mkdJxXLO1ruNzlfrns8gL7WeRUqQ9h3OU0Ru2Bxe/VFja0+ZR0V+u 4TiKtcxIl9EijEsaGkE7mRAmlOnyvOBuEFmEkVA8aaCNkzJ4v1AGfw== -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 131073 seed: /N4rLtula/QIYB+3If6bXDONEO5CnqBPrlURto+/j7k= zfec: required: 1 segmentSize: 131072 total: 3 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:CHK:7yjcwwt6454lbv3pni5mjofsxe:y5nwpzwmvpvr3gqxnykjixprpxw3w6qyqmszf7ijyxnm3wl6f5oa:1:3:2097151 format: kind: chk params: null sample: length: 2097151 seed: uqWglk0zIPvAxqkiFARTyFE+okq4/QV3A0gEqWckgJY= zfec: required: 1 segmentSize: 131072 total: 3 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:SSK:4zprwdvwwgfc3ai3fumd5zr4oq:e24f6pm5p65dacq6hpv6vtcospblpahmjrd6uabuhg2zbahfkkpq format: kind: ssk params: format: sdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEowIBAAKCAQEAzrq9a2BWiiPkWXUt6BMAdSHLqm7l7QFCjYeTEGJzFIvR1G4g LxSdWfLuVX75U8M7A57Km73BfTzHLkQEtv7HKvI9dFH+Q5u1DhrdHD1yaDOosqFI 9v/xk9ih5ngCUhOZ6oXVXnOnOpUYeXg//NTHNIAZcKS+YLExeFHpKbngDRvlsk7S XFTixFWO6xCsHVCNiq7cUe+1x0kG6I1gqZGWFG0NkTxycZjUjC67YNXzD7HGwOfn b1DCgOq+OB/u8EIHcDnw/N8ijELxbWC51Dr7zPF5j2ndOK3qtww6jod9Dihc+kcg ka4N7D8/KOPOzkD5OF5oZ9SW2Dfnl6SiasEHhwIDAQABAoIBABNGyGjdx9QDusQ4 r/om43EIoLQYuSnbZzhJPwZVF8P/saYsSqX5Nx7vDxg7ycXsu3D/+oaBMCycYTpI L2Rc53dytRZGmv304/IXwSxj8moS/xUBAwu9G+qcVaRm5lh+6Wg76IRxJlPJGUoi U10g/h5AH58oTXQ4sZM1mBdC9Mhj+Wm9dPIsra4v2x05y1qxhHNqHGe+n7JLrDNZ c1jZ7I+8lxBWP9Z8vnE+XxmOVENjXndSVlQsHRo2Lqujk5I6I7EcGapJH+xv1UD1 mdKewzqZb1MwaK5SMmRVt7q3QJ1MmyJm0Gk4MRWkGzbpj73zpKT3eeHTHxsw0P72 yLEKnAkCgYEA/zxyPOrUAk3AYIXKt/BbSt5evDg/S/9FOMwLiDrRgiI2neB3NFfW 0qOSKKhvpJM/2s6rCpHAbqI3cG50E+XUMqQKt2gSwwpxS23iky2qtetFdcPv+c+1 DJ+wAODXAsBpMvhuS45NEvZwdR0D9oiKrnz82mBrRiFokukuBmc1qbMCgYEAz1kh IbZOJu4+qY2+k9iir5gjuk9EcJzvTJ+PGI5d/gJVp2jUUPaAqM46R+eirhhBas6j WCxX7g89rU7rGm5aUCh7BDHxWsqtwp5k24eUwv8tgVFqOe17CXf/722jRJ4LOKCI 6R6qvSKU/BssmtZugasUhbjH02ctU+/IGmiIWN0CgYA/gxcaOYUQHbDlU+Wh59mP w83nIEf/7UGYZI3qFFjV/RWCK7z99W2rdLCGFYPSfCHDnPHK64HrBcqt245e9S3c fB1+jhM9HXgbwPhEj3SPWEAskdlBXII85e+yCED4mlCTMmafvoVHVrOdMN8vlcKM sOVqoduP0/hltkiRp1UfKQKBgQCvplOY2XUvKZhPzlHpsRVwJzPs+oWB2JAnmut/ 4+rf6V5iKT3jME8hsUJR83oufUG9lzts0HPUqXiYPkiP5XgAe6pqjVxmi1fTjJbo Gdz9q2oDKNMSK4mVJeDcFUbANBpRaD1TLrV477jCMMsCiDCpDCKgfT5aagdOGcys Am2uyQKBgEAVR90u1U6oevjeyFCMP6EzqiaJFRQQyE8BAtHNHlDXOZsULZxpIvy9 Y2Ib8QctoHfaurErSX32rSrXyXqK9mKnNUk3/DXicHhw7zPe9SrOL/8H51iBRouO uV3hW4DnDIJE42SShQcL5WNdFBrzXG/OAnyDnjmfO8o3ir1YJccJ -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 2097151 seed: uqWglk0zIPvAxqkiFARTyFE+okq4/QV3A0gEqWckgJY= zfec: required: 1 segmentSize: 131072 total: 3 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:MDMF:gmqhs227cyrfb7bkvjz5nkhxlm:hqbl4c72w75tn5w6dqiha57cwyfgijagxyhab53h2vccgqnox6ia format: kind: ssk params: format: mdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEogIBAAKCAQEApFyk3JDYamuKCyq4piIPAGKW8xUEjZ7CSH34nTGucEHBmTsZ xzvMbnnIMCIDB3NIQeTTIRaSMnmdEbtDjiA5sJ54lkCSkHOaEHGXKVivI/NNTcnB rC/Xp7DESLWQjn64rw4UD8X3Bw5mFnj/i3dEQc0CSnwRAAgoWRU0LkXWIZaIPIaV JMX5cL4bgakkb7PgyQ16CIWKA6XdzAJA3Ef4TozRACeB6YcZuC1p12CSoO5kGvOt TLYd3OIvOgkU+TNoaWxf8k5ajPF6cXKDmDCDjwTShK0lhCRXqKqas2EwcZcp5Wxm Jz/tYerlcr2ZLQW3fJ2I33hzd77RxFfOwYiE6wIDAQABAoIBAE4oL80io4aXdKEy w3Ncpr2MJDObPvsJD2HhZSN6yHRhEGqJDA0NhnzSNDuPMNmOHEIZSbxmO0b9RY90 +P7QnB48fSMVuZwvHIfNPBBRN5dkztG6qvnyFh6LlArvK7pW0AOOMkP94yXb1vfA uePb5v1TO0+oB314Y5dY2eNXGigF5Hmp5rZIKwI67ab8AywapYHYOzvrYMj16Ur8 emmxx2KEXnLbABWPDUT/3T/4+h+T7pGOfZOl1bJdtNARGGXKIUYal/n2FFIan0/f pzoSL2m7dGyUFYBiPFWO3LYyVKkHpvXhicNjXhQ4L75gjAD08fnjXl01UB5qD70u lqU5VAECgYEAtlFX5quPBPAf+HgkwDZl0zOy4e9P+lIXKth+FqiHunSnf4uVbkMc tqxBNTr+Z9NvF6boRMZCIIza2QTjlCG+yfo7c/+/r7o6weaO4E3dhlb3D3ZxRVDy 8LxRDXJPHPiDXeMSJMt3apL9PVQxXxqFy8iobuG4zq+byOOm3xHR8msCgYEA5smU 3j+GQ8Uk/SK9HssEXwkRJY2BsEDlhnyFd7rtzrBMjSpdSC+js51xfmEfFf+wJDjs bg+LE/NOnQkqj0aP3cAnxRqxguYLDoue31/NTxHqF8Osh73CVaycbtGOlgZVSKlV wbIopIZCf1qT5Wqmq2iaHU4fexYwyOAt3YzRV4ECgYA8n4+7BBDqc24uEMUnpO72 65nvxsOxWNqbRKGopyF6vo9zudZWc7p4g46rRJKTs5qdIsLZG5OhfzTGNPn3p8Dp KGvcho4WwLYJA8E+lKW0pfZBDgFcKy6dHgFVl0z3NSt5bKf71CxBI99RJU3FcexL ds2LEUOCdqI2inxScHp/QQKBgCAlykffH2vHRWzBbwigDP7T+4B4oq0TjSVbqRfR gRi+dBth4FaS1EHL16hcDQF6eWXCTCTUo3Hm/XdgdH46vQWNo7yQCFQMiVPAXSQO a0HtY+dliV3rL+vRPIUvSaZaQXz6oYh2sbShQxgMXPejEI5l5rnYTQhPQgJpP9pR qEOBAoGAWq8qBylHt6iCe/Y+mp3MkGbFXlOu01Nz1gHucIoAQdZYrIRUQShHGBhi SdXiBD08R2GwJw5LvvhIAf6U9EW0joklODKtl948OANOZMwfBjWDTIUeuWmHzXz0 YU1eZrwemS4Phq6UH9WGffDhW9UgKI7lysZjQDX9FBEIsCpz7kA= -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 2097151 seed: uqWglk0zIPvAxqkiFARTyFE+okq4/QV3A0gEqWckgJY= zfec: required: 1 segmentSize: 131072 total: 3 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:CHK:r7wva6tisq6m5zszgr7seu77cm:oqlgdw3hi72qtahpsi3h3yryxpqdshagvt6xnobsppkwp7ic4cia:1:3:2097153 format: kind: chk params: null sample: length: 2097153 seed: BTBX/ampNfLU+ox7xipBGiaSbgC0kcB8Gy7BkJB4oKI= zfec: required: 1 segmentSize: 131072 total: 3 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:SSK:oq67w7mdvze6t4nr4yxgdvs5ky:wvz2swuip6lw4fk5wmtcfchrdai4vclctok6abjyrnnblenlm75q format: kind: ssk params: format: sdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEowIBAAKCAQEAuVhfMuw4NVMyvfH43pOdmBQ54Nk24hgAl4ho2iKN5cHjncRh 6E1Jx+DraeQXeonpI7YwPG8rwBbvJbksSQAvGZldCrnHBXahcef99uaj5ozx0poU Jd5EN+3v0VJhKdF/BZ5h6okxG/aBX/9mzjg4amNBNpgCcdudU9NeukSfTKUAONfb 2RBkWLmJfv1ypoEVUUSWa5x09qDL45PkGWcnPgKM045pzbCBowPSAymU2XWU/Vso ELkYP12JLOgRY51I4rVwAXWa3DUOlE+xqRO4IzndvGceQfc/gL+qAoF5HvpIsOS2 tF6w7nXs9Ykv7qYPU7NknyMGhtbU7dTuPnf46wIDAQABAoIBAACIHF3NB6w/fNnG o+wfiEgzZqcaeGnVn8rPfV2C091g3QJK2e0Mq7WDE6nOGbMkSiLsFt9Vgk6ewjjd ex8KfRgZtIafWc7ONFBbVgMrTjUPGnJ8NqqVJKT0bcev8M9p5m9hsO43TojCAEUC Oo1E5ASKHFnHvdoNwznGVXM7VymlHkRvTRIv92BoaOYVWeB6HCaRqdlCpYLht4FO AUnaG7JVdyFG39xXV4FAmSpJGjZLSNgwNnl64CyoilyrcTLAkRgLzD2DnnWQ9V2j 6S4dumacT5NhNdZ7ISnZaTPjFi4tSUaBUFBB/d9gLCvk/XTGaIYLf3BtDksY6G6o e5fYr6ECgYEAv2jY+i7UsczKZFsYQOt4xDhOHN/kQayXPHLlxFm5Sw5fIQZ0BrmZ t9MmP4dFEbBkOGI1b2iwO182cJrIn76mWAPiopEAnE5MLZTFeQdKK3ITYYJitRiK e4HbOIrR8BxTMthjW878oR8LUQIn3C64HZCM9yVxyGx7x6oeoG7Id98CgYEA9+Ol rp3tT/6bqepdFAHb4zyewdHajc3P15TAYU4eImSGMZlaMyARjRaIlAD5nzdeykKP CA2vdgVOjsCv9tOVG5w/WUUS/PC+wjm3ZuSit84jghhEHVOunIkA997H9DnY2jcF r+ew3O1P/ebnqH7Amf8JLDeoF8RLWhxWpIMM0HUCgYBM5blt4UyP1b9ly/cVdcRB yIERNp2ECOuFXH+Uf5tiXPa41NfeL8hiwpB2K0kDT0MkJ8hh5sQORjUfzf9Vtgks CPuO5gRBx27xTPh8pAIXLDA/F1vCd4aDEetZbuPiu+5s2eQo6SIzNL6eH+iVm6ta LU2EqOVqaLLdxymguIEPLwKBgQCAfyz/WzYM5Xpjle0x1dTZ8i6JYfLc1vcKVT10 Mz5DrcAyLcAoCFOQw0GBFxBOjDFCv0XNcuqlTxLtxMxyMjN04IWmDLxPCayYmbqM R7BhfyXA5jtIyHwXAJ3T31PfMa1LUIJOMNfpbcqtXuhu22WTbjSfCyrDRymYSGBR Xu1hkQKBgFFuDFlGPsREXJyvgNiMgYdTIyLQRq7nFr8g+kYw03xtgjpcw9j4WYQD 3EkQcurkbHTY/6qUf1oh6vgrY07W40VocEHPuimSfZHzyeXbbUuI2kXyGoJeu6cc ibZ+MhbfpLh9ynjyNWreSn9BoWQG9beNcw//7Yah6beMRe0P0Yd1 -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 2097153 seed: BTBX/ampNfLU+ox7xipBGiaSbgC0kcB8Gy7BkJB4oKI= zfec: required: 1 segmentSize: 131072 total: 3 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:MDMF:7jicgfnv34e5aim6u4agso7gsu:w53hrgczlrz36vuxdwum234dzgfra55yqsmqmx6ighnfarzex3ya format: kind: ssk params: format: mdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEpAIBAAKCAQEAj6EQEncNJm+aeBHyxTcfVnki3Oh+MLhMHYXSrabWXYDln7aX Fl5sSGVWDOU77INbWexekm7jJArTRGHEafOBiYF35P72MVQJv7ae05tS9DhM/tVF 6rBYb9BURjMeNoAls5BDNX8QwctMZGTF6NTtx2q1mXj6I+acxzZagxcunZDQZIQ/ YqQLLqlBxtY65UlRrpMchzVfqFv9ghYXKGxlC0OzoLVERuTYWMKjsd7YCAZkkjFu SPBsFTN02ng/9ap6H85RwpuomYdtGjgM1CVRGllbPoY1m/8Hk/MZ049SNeByg6FD YP830YZf3Wwru3xo+WAS3YWIYrKc9TOZHTYcoQIDAQABAoIBAA7aR9heUaX5tjPC yLlRN8nlm/p+hlzRFM9GwUsY13kdimFc7JHbxinUZN96LcYJrWmGp8mlHZMNatyF mbNKgvOZHQYOwZ8cmCd9oeizBWiLkUFsWkm93l9YX/v5xEmRhenkOgxZrVNvtmDO tP1HVlF1Z6h3xLJFYk1i1whQXjndPKMvlDV+nXdvZx9Y9x6GOKG6eHyeQ1lYUmrS jUei8O0ohR5uBXNVETDpZFLExOhpElmejowrPvvNc8A1goHkHpx20ddZGKW97aIV wbgPiWA0W/nyWChPwBHHfzv1QXY5n7akiP96KqknvoAQUUhEWYbVl3Ra/4YtjImB p0lfLHsCgYEAtr1XIHTJpQB/jaqKrNCh6ieixDDazjAnlWEaY6Fn3HFRHZehPzfL Ylnu5Md68PqDbb8ERrstAgbJ8lBZQboPBsq93Bqj0R1+BrsYGWuUXj+gFjj1Z3sr Kissmrc43oIOGcBvEuCIkK1QgVyB6o3Y6oud2iB1S7YyPcLLBZPehMcCgYEAyTXL yA17mcirXDuRkFqSuT2ApVRq+ddOf6TYxOcP5RZFEuSG01D4XSJ7iOsELUSGE7O+ 57N4MCdDdP3TRGOWCt8n2vP9jvCHT7Gz5LVuxzw9oPoe4ahh+YsdmkWWUWxgGhXk gifqThihSTQFRM0nH/MDpDNy2BGp2iFFVbS6G1cCgYEAlyDPxY/QlB1tYAQC3BlP Tw+olQiybIN2uRutb2g1NSKiKw8T0+yYz6YA5EP1cQY9W632I2j5OAvVSAkbSDhP 5RYXHskJYhA6AecJbzyBX9DO3JIOop5CfIVoRivxZFO6xaFYOwxm5P/w7ItNBmZ5 VsBQs+zUFOGBe4J11Q8NoFECgYBcb9Z6xZbvA32WFde91Z5qc5LSYYHz3bI2eekM LIrk1+JL16kJE73GK71NHYsBsOVXz8/4aj7hAGjBKosQdB/ORs7sjAME1AOV3TGj 9KY76bT2a3IcgVrhZcPnx+hS59MOqNgd43CFCFOwabGx9f3vc5lMqkYsdZDuoTJV OX32aQKBgQCr3mKRSwKzmsBEnoyfebbW0WKi8E7A/LeQYUsHY7zcKy9XNPewgqi3 f4cgFzPF0InVVI9h2k14VW27Yf6wqvpDaPYmdZiX45b4186Z/gSnjMgLthkEo8nF HFeIFIz6LwA/o4f6cmHIL5+r0GuOSQ1RyQPJaC2NGFqRLP0xjw5HSw== -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 2097153 seed: BTBX/ampNfLU+ox7xipBGiaSbgC0kcB8Gy7BkJB4oKI= zfec: required: 1 segmentSize: 131072 total: 3 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:CHK:joatus45ult6nt44xj2awpeu6m:jr5gh7rt44ppdqwwwq7v4feibbygqfbdwxv2siqwijkbnhvdej6a:1:3:4194304 format: kind: chk params: null sample: length: 4194304 seed: 2NVWOXvtwcAFf0J/g0fDC+ypQbnCU7awZoCxcnhpkVM= zfec: required: 1 segmentSize: 131072 total: 3 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:SSK:zvtntitwrkhj4cgrby3ffhlhxi:vn7utwydkd6e6pwf7iato5o4yplgpvhclc3hbt7zgy5irbdrmvka format: kind: ssk params: format: sdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEpQIBAAKCAQEAyK3peTMxSvg10hxjiJ7bUENuiH+c8rXiPG0C+aQGCnlzn0IM gOSrSX0FtKBdlMgAGdlLJB2R702xhXX9xbED4RnB27lEFCEVCMZjlRcxl1NBivNT gSMIkO9+YRHsSQeIOoZ9QVlfCSk6TfSMyRnYAERZjbITtJxW08P0M4UY75plTVrh r8tcubELsYGCy7DjyM9oX5gUY0KEqWAIGrxyCS7TSHsBHCor4f6hug7t1pdJ5RWN Yn76apmjnXnhZj9LA8AWhqUqM+cS3+GJ9F8AaIZHOF182vDC9u6t4dR41+OqcawD CYCJH6wZDYyUxf5LWKJauG0VH8w+EV+7xlgjeQIDAQABAoIBAADFimxI4hujsLFK IqfX1IgOelJUC0pzoJcS5DwJgWxw4xztqBamynHR5T+4jiOQUU/IIh3Vb0Y4SkJ1 HbCw6Y7oBnLN15EsP2R1PtTH7gzi6RGbtep9M/86rIW7B+mP+dofwkOKEG98lRjP o+ryCn6VxLJiyOic4UiXLE02ac4iDBrI2A5S751PzXPPKl6rriVF3i45IrWV4bXD iNFOQeq9T4jRrpdhEGlks4u20Xdrk2qZrfYmjlktKoypBwVFkIT/7phuL8sibkpN psfnrUhldGFIiF4YE62Wd/2axCqiJ9xX2eNandSlSH/rugwM3haGDDCjlneX7D+Q tc8jCjECgYEA1rZB+QWGt5z2f77byK8oJntpGdGxQEihzKvV47+7/5nNfLPmR4BZ bHmvus2LpkZ3OYmZ3Ay6hbReJjR8ZGgEknuOxqUPp5Sxo4EdrSYrOmtdgc2ktGwH imPGrKuwB+oDlKKpxKrr9f731ggF268NY55ZwrJdeVahW8D32S7rIlcCgYEA70Td ImY2fDnD7d2zxG/I7RiPfzlDV29cWuPk+t35HBbEnY9IHh4ye+UlUCcQTWp+tL/1 BtdF+xQvk++D6F8opaxOKvELH8GFQe0L52ougNXOle/jzeRNAg7xk2w7BdtDI4Yo nKMlqDHbu7QGwXFww8ZWF1N+OxgCaO3ohUlbZq8CgYEAv4Q4mn/kZ3k5oj7C2mHq RVEFMYOKQFXJBMAtfAV1EovE77uj5xlEKm7sYYqgSwNFq2vicpZj9YkqBZgBcKob kfFmLCflK8yFGtu7dcu6+VP1RygABvLpUvamqzRFQvnokbb6CTOQX486z42+c/LT 1YzUcccZe3bbXPVl3jJsh+cCgYEA6f9B+KtXq8PejplcfscIDIARjk2VQ0RAYQ8x V/qP7l2B6ck/sVy86JfgFvQtKFj3E5QLcKZF5VgHc7kxGqc9nFDXnX1g8KyUwzWt h6M7WXo/8DjMZAZbHaE5toCJdJ/LmElTHGUdpdEk4PweAz8LFhu5BFT+RZKkgLPy y69DOTcCgYEAkyRNcRcKTUAJF9ETcURZUydYxmVp61zG870+xkNib88wR4hMrUvh lCFlZsUVUuuCeoqHWZaMmwwmgfoj/xMc6r7h6b57Rfe1tTQhvhy4yVC3+Q8nR3qH fMB2mBedtoJpcuNnohg4TVmLWS+iVPwq6LLYXmqWs8T0gkehCx/0AFY= -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 4194304 seed: 2NVWOXvtwcAFf0J/g0fDC+ypQbnCU7awZoCxcnhpkVM= zfec: required: 1 segmentSize: 131072 total: 3 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:MDMF:zg7tbgey5lcomfxvkbxcy7bdbi:fvkr6u34kfm6lnojyz3bdrpdvvpyzhtusee3lx32blskntogde2q format: kind: ssk params: format: mdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEogIBAAKCAQEAjrJ3kuOX436Eyzbgusynmu0jgJDkonYVq0aYKRZJS+zSIRR7 GJTPJb0VzkxMgKsf4BrVrsbybsZLB+I/JDzKhmrKaTVj3pw9Rz190vqif/JZqyY4 g4K7El3P7C+9FhgF4/WfhOTpVmG4Qk7XBjzWOQcBcwlkB5fcSx0dfK8ou4t+pTjw pohbW1l/ewQFqj5QgMOvvyxSQJOpQ6fNGv2T5s/7EAur6wVkp/IR0eB7QHMOUwRs yQo3SoM9Nb6AL5ai1woc5DgjFYr3q1Zr8FbLk2n4otUQ8Mn9CcFgitZx9ylG2rSn gf5FJf57uSLrc1jnOnv3FPe7Bxm9wa8ltI7RMwIDAQABAoIBAAyYLnSUMN/SPVwG pzwYrw+KyZZ7c127CJUtR5j5j67E8EdQj7+VXSXqc/Ly1tFU1gN8hHEG7yo2xzPv 1Jal5Owk0pU8y87ZWsRsZkWv4fxjKMxLaDNQ329mDa491ZvbGYrLe/C85LLyltKh ACdCFfHcYlTdNp5y8u12qUqyPQKca5oJoMXnkDUISKO2t8QY4yZy7oOu3cljiE0G NOQvkyU8y6RzL6X3/mWGR2dNNvFMSx+Z2MkNCN22WI1cq76l8S3TwTI6Kb2HweET 8brtEG90BQ5mYvyzxXgRhIuzcnIekRoHJT1FgHwNq9EGPAWLjH4FG3wINUUdr+t6 haoJIWkCgYEAuJC25X216BEtCXfofu5giyENG2qtWZ8C/kw3MLr/qB2MR1P5WKHB k3JeIwFVKu6A8o5obFsz8ZN8fUnEq5PcUCLTWKQw3swGjOLdDcU+ANUeZhC02cUH 6EqhlqccqwdOeqrjzDIh+d5OT2NECUDIZyxteZAqOxNMg+3ZFmEE3PkCgYEAxe1U aLJdWSoiyrr7ismFmUByJ3bYYu5q9fDqfLIUBH2FHIoUymLARqM0kcpH6obZjskF 8AXW3DYW/CkfEZOw8rS0JdEsZV/inWdOIdHfnfr0822QCIKvw5gF5tMN2xosYaHR N3SvgLSc4xJGB1OEAS2gKzqGerYPQcBKQ/flBosCgYANXCxhIGBylAu2i7+AsLC8 YYAZY/d4bVJCJjI4jNDE0p686w85ozvn+HdoAUiw+uLKrBRTA6cW7Z4tU5Gw+dsQ 0fSKjhgbiJlQyXtG6+g5FzREHyF4QhL4da6MwTwKBVVg+83Rki9zbuwsQvtB0Dax gT3LduwXqqX3RthYDl3TwQKBgGc2FmEuOc1oUpJDJS6/XaKH379Ckx4r060Cf1Sd DCE3TzWNr7/F9RwguSYZRJ1AyqxRmX4LnXph3mSKEQB6crhtkM9zn2IRuTt5hl3O cnyQRDG4fXZip8MoQFOY1U3e/SvAVThE6cwE4xbqDYh+fYSbxT0lnMDatWQFIPUG jPB3AoGAUgp8W9DPRavRwo90Ijmqs486/0YOB5MKfb/tFu34W1KTCtG1R30JA9oh GP4M6PRNYq2+CoWu3vzdVNVRmFV2TZVy/TJD/pF/jcSBOurnlTiafdi5CM+GU7I7 VM3I2EaE+TPEB6ovFqbtkhb14SPPYVt5BwK9EREC0n4kMRvth/E= -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 4194304 seed: 2NVWOXvtwcAFf0J/g0fDC+ypQbnCU7awZoCxcnhpkVM= zfec: required: 1 segmentSize: 131072 total: 3 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:CHK:mz7pzh24u6flkfmfvvokdwmriu:5mzo57de2ywsdqkdvm7557gcikkpjqbmj65mk235t2xyoejtpbta:1:3:8388607 format: kind: chk params: null sample: length: 8388607 seed: w6uP8Tcg6K2QR905Rms8iXTlksL6OD1KOWBxTK7wxPI= zfec: required: 1 segmentSize: 131072 total: 3 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:SSK:yrwzlwbgia4maufleime3qnzym:ymybiqtqxpjuo7rwkvmxqov2baz3ikpyes565u3b2azlga6k3axq format: kind: ssk params: format: sdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEogIBAAKCAQEArKRDPNNLKhOIsALx23rZMi3gZVwZHDzlNuem1GlsnuCmGjmN kTFruy6QdNiecKO134SCK4XFB1pLsOrHlmyiWg7sulwYLAmMWticGZ41aZxANQsK em1AEoqqrtWDKKqQ3vrGdsLYrVFdgXk+KVavoy5RynZTURXeqh1AYiUujTP0EtxJ SxHyQeXgtz0SAMi7RnyY6ohdYOlhAj+6hQMBRkOOdXpMyt718QIBnVM38d9VCJX8 tH6fDIB8rxj/xYEu6V0jdxomYRtzhAK+C8a/JnJEJ4L0N0Dt9GgB826DQzMa5JS9 GXtyucnOmsmFjs+qZh3YJf/BQSZeCKYsAjcZcwIDAQABAoIBAAOFvyN0vNyzv6m2 XatDPemSsGM7tlLHq/ZAqBGUbGrEyE7SVsucF7J0LZlcuXAwlEDmahDWRAy1hcQr KNMDuL+f/Kouv9rXf7LI0aKKLP/QeCo85UQAik/2iVX+NZS0/4tf0CsofZWdCoyI 5eufq6oSLglfUa2JWzIwXO8ZCsfxu0UfdY+dp+/PWUJT04P1pkGxrWmOjXyCKSd9 fRvX0UGQBc6OsJZy8SWWoqU+0KiVdS58CIV1WdiRYdad3lNPs0IFbX4wQ6KaX0Pm w3Y8Spm1gM/UxKZ4pHuA831Go57bMLr/3aZPeT8wvTaG1KMh2qK0mIyYd8Nvd4NS dQeJDjkCgYEAuB9FGZWCPQBtbq9xDdBHm3585IJRUo5WzhEMcnbbqfG7TMYeNP+V sngt8B79om4ZdAHQnlmWwNd0AHQpGvi+V4OsO8VEpbtiN99nSf5O0PompWRm+lP/ wgXgVG9CVniFodocm4ouwy4sBrzvsOVBBtCBpq9++4iS9xX6FSMPWYsCgYEA8Amo u4x8XI6qbrXO6w4LNrB+vjjOV840cgQQCSoWaShyCqC/E5ZolEIFEQmnQXSwpB/n y+XTrPTWazEBhKhhed0Wj+SNAkCSrl1Zv5yPemjmuOUThiR7msDJSFLZhT4HwbEL O0anGk+Zfdo0e4y2unOFCvoOujBQuUeeJFairLkCgYApbwABzd9NEveNXPW4AhLb Lw+z7I+YYGewX63JZG3yRG+9yyepDYsGan0L+C1jjBs2O+JSgB6ortNv9rP5Wdib oQn1OWNFWHG12RRJVm0uIdzogzuZQaXgZ6of1hm7k8uerJKbffEgAxftPD9EM5L6 kZlbhUqyF/3alJt+fjFKGQKBgBd/2pUB2+rzzJuqEOfSKCbigIX+6bSO63N+ElPT Wv5Qh1mjAxHX18Ur8XMJjuZJlkF7HiZICcEU9yjnU065bVGQ43SS23ss9y861F59 5U4Glw/i3VZ0m+U6mnoKImF5ASllO8RB1nos8MnxYtH1pK38QToh4O85a6235TOJ h8OBAoGAW3c51UXU4Ix/QkOUmCreh0QYruqMhdQSGIzPTVeBHm8nUVDqEnnbABNw 9dPUFRvkagDCHBM0J/G1SISKNYUStIBVnwOCVc6wQopC7K+SacN8Us7oG4tZwFOe C04YAg1FeYDYLuSfEDFxsooV9OV2Hrkc4dQ/27X1WwzgJe/9s1g= -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 8388607 seed: w6uP8Tcg6K2QR905Rms8iXTlksL6OD1KOWBxTK7wxPI= zfec: required: 1 segmentSize: 131072 total: 3 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:MDMF:2fneabhgwzyxbv2toidi2mafga:ultt4xt6hlsliy63zbvdzgp4jzu4fez67lfbg7ogrpzarsharh2a format: kind: ssk params: format: mdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEowIBAAKCAQEAkq2D5r8pQqQYCgRMs3aMkVJKrkLY/EFnNLqM4xXj7aDMwv51 CqMNw3A17DsL6jCVAFqCK7prPGpfBhFG3sg/n7oRbrUDn26Ow0IxP/6JAAe2+uc3 YnF44NnKU5+8hNpyy8GJmtabnNx/+0oaKnrr0R556u//NKoW+1sBB5lM9V8B3GoC bozR/kosFoxzjCYSKROg+V8nQRDW/aqqDndD19Mk5x56cND4++/mqWx49nTsAlVb 1RiygnfeP13BrMiUMP998a1iZtnpQyoAMVSqObFh3eJogmpL/lKh80V8PzeerR0K /iryh/zRxXGyj7HWE2qa9JNKo8qm2ijFoPYGEQIDAQABAoIBABaVXbIo66blkgf0 yoR1M8ZqN7Jl+3e3cDcHpAqQw8PCFtTNDeNB4KwfOkYxfy+jJEXmKZWnAbzPGeta vKEvjtA+e1Gi0Dh7csKwjdmvDJPzHCK8d+QR2Y0xKzw9425HHhauKUwsT9BSiOf9 bFLvEyDjAt1/7zjCGYDA7cMoQNekeficFlaB1I7XsTH2w6w5SlYmSLl8rtkwn1N1 kaHgZp+YnotGg7D87J1WOsGR1grnK4E4EjVodCdEmFaSOkpVeCI22W6ZCrKdBNSf h0V3vyEfwrGd+FH5YJME51cD62ifioNnNQjzKvQLlxXCprUe9JFbhEGdRRR3GF8S /WZbJ9sCgYEAvHNMZ6mo+K8gul5VyJw+UNg3fhOK9vTKwkaF/x1GAyNQ/pjkup/e f6087aJ9Xn4avuv4h/yhbbQSPreCr6lSIX6fGDpxNXcUWH5eKLk/bsOcmneUh9Hj kvPJ5k/20APSoAsnN+pxiWmlSyoRo21DAtC7piULchyzEsY0a10DLScCgYEAx0EN 5tCBYnuKgqm9ooB0PgANWGwkYEBP9lhPtNAIThANI0bHI6yM5p8wzr1exLMN0EXH KB0m8c0lw1/1iQMGWYmRljmteORCwdTb+txiZNS6bUY+mzbRx6g5cPzR923EyjGG lu66pL8JcfJQR5BlkqwVJTbM2S5uSTFjP0z0JgcCgYB2lzArxA67gKnd3mOpfPmS Mp6pTm8S/fVi0LKeWrOmYeEkdt7pupVwT3qaKLkwb7cxEpoyKX8E5F7e7Ojm1m0C +wXvX0fC148MKWnjwr/yWlMAuePUnPbTkWsq7oNpYB557MrfWz/bs4n7hRvYdnfH G8gaxBEx3HGsjOKL9dp73QKBgQCh966uNkVGYbg4+HOvGY3kLDSs8NMs7npRfH3m M8jcc83KJCmSRRwQB80r8OGNMSOEboQyhmf23FTbGTDFHBFYTSxsGhx6Dcp6N2ZN 6EGPRyD33MbdctVZ39Q5lTm0UKVL2rBWFl7ftm6eEmPRmH4ImRtjMcWYsVZy5tOP TCWWlwKBgGMiL5S2zJG2SSo4iDqZW9jbnxnOAbAoMh1X6CDlVRaj5XYe+FqXFEQ4 aK4VpalIw8WCzgTJPdJLJi/KJiWywco4lvX47tCs99PwYnP1hT541XRKjcel1pCN GlE/aY2p8fIYY/yDDSOLkssn2Zkye/mc1jteKlXD38T6/ecimkm8 -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 8388607 seed: w6uP8Tcg6K2QR905Rms8iXTlksL6OD1KOWBxTK7wxPI= zfec: required: 1 segmentSize: 131072 total: 3 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:CHK:54sq6lrqwqlpxicg747gdls6ri:2aaxyrdytn7r74my36ek434hlbhe6glrgr2ic5vvpc5bbdxmvo6a:1:3:8388609 format: kind: chk params: null sample: length: 8388609 seed: yPi3JHKKbWaEEG5eZOlM6BHJll0Z3UTdBzz4bPQ7wjg= zfec: required: 1 segmentSize: 131072 total: 3 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:SSK:an3ntbx3uwl5pzmgz33s36tfoa:5eaytstw6x77wsov5jgvxt5vhcneykoe4myucajylkgznt76vecq format: kind: ssk params: format: sdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEpAIBAAKCAQEAsXGGPkzcQLANVVzczL+QBOxxyJv5zk4UiSmqIdJavHiGYDeI vp31Od0jdn1eTpbyixEDOrSKHRYwsDRavQ2mRA2NOJkWe0jYPIzFV9tvFBqjnUcJ NaOB7mKwFMHKUsIjepeCaFey98Nqvfm4Ba4s0MmvA/DPpoZEGyjK2AoUdgzlzfFx sC+XO4aIFKRcDxJX2f6lAOsKnUvFSgGw9qmEnUFI9kcw31Oo4Nt1Tkfix6ioEv88 y0PiJlMDIzbvOJO53dUCrMiAnv3CzpCIx5USKaIzoPu7fHSUmMIsetvKHoRHjs2E WqpwBsERKvOxte9SFUDiV3my5ssa5rq6DEZ5yQIDAQABAoIBACUVxenFZI78fHzn wJnmO3Jb/FfiCW6NsQsNsyoIbcBQLD11vdWg67yhNCUyhIBGWgComJUvYGI93gUl nAVBEgvNDUPT8vfnPJJDFYeMLAX2n1VioFEekCxDYeukqOVs/79CZRXrplLT+74i r8w4H9OvIy3eqXdzPk6y0toeGTKmm7WTWheMXTfiKpsn3noJBj1+OyjzfkEEV0Rt k2MDwupzLgFg9N2i+CEutoobxgQf2SUys6SM3Nf//+mtBd3pciQWusOKW7rzmX5G kO9GWTkG8oJko2OECRZExBpPdw+3/lb8HXnqLC42Zc3fECSeWLo0evilT/k9jjjw I2TC+BUCgYEA7/P66VQaCuskhT/9NDIVB1Mwhpqxvy0SO3yyASEIkCXGk1m58RsA bPV79oo8bD7nUkbQBS+ChTPpwaiPubg0pyX+TTAkAYNtlmP9bQl7LYR0tWDjIuol Zxa+HbAvmNclzQlGuk+bgevT2Hq/0/E4OZP+XcauaQfAmfi2FXOhuC8CgYEAvU9f tXFAbYbNNYM19LW7DXTVxQAk7+vn0b63ltpysqxVYGRxxReEtiHIgIapBO/aYABo ZMJg6gX2zGjcfBOzWKpnIYx4jP0cUrvNHm/by3/2WNOAzE2dYpUZ7+UzvujtI9Y0 JrCpB3q0B2885LVchDTU5XjYu+PVo5dFIwAH94cCgYEA0UsCwL/dk0Z0bVFZ/kvs sZ3rBo0pmnGqpH5oGLoCaRC5+s5ZdCa0IVWhkXITr+rSA57GVK+S7bJRItxuuHQm YOCvxg3GaheD35hJdPC/Iv2UepwOoeaPRzK8EtMZQPvv+b4slddX8WOMPRcb+LY+ 72HZjVv9xpi/cs1PrLhWB3sCgYAUpRJP7DDVgOziGBQLQsJKXmJtoG1myLg4NG87 AUme2JJa97k8gCsV3atK8OR/yFRtQb4gtt3wx4O5mPnqgg997N9gVjxTS8sJ7rcY yaQTljncR/x0y0YNmSsB8WHqQOaTkOmRCpT0XtpBMU1Xt7uGI2jQOZSRMPB8baO5 hGhaAwKBgQC5GJv3sLa+21PIlXp6UuIGMFp39iwfAKchhnhXP6rPkC2pP2WB11xa W9MuEko++3e7c7+adAoPFdsjf+a15CjogEURZwskiRpBYyBugM/y90sWM7ddOsJL 3GD4Y0aRJ1+N3R+l0MAv8JMAo86g2Xl5J423ZfXxnosG9JvtvPGndg== -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 8388609 seed: yPi3JHKKbWaEEG5eZOlM6BHJll0Z3UTdBzz4bPQ7wjg= zfec: required: 1 segmentSize: 131072 total: 3 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:MDMF:5hiosdrt4b4h3x5fx7v7cxmyqy:jkudr2qefit3qow4xs4eldwclbiv7m32w2cvu3igrm42zcak7noq format: kind: ssk params: format: mdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEpAIBAAKCAQEAsU9bSCK68rb90LSqN0IBMqtLfLkVWlUdKXD4J6tTtFiFWuoB Z6v0kpepHIIKVfWQc74ms5wFX/M5TzPhjuzf+IXMMBC2d9Y4p0iBwIj2S0RIPxH6 d4iapNToglT8eyoF0I7r4S6iV7t3Sr6q4DqtPlQmgCSvKbDuNYuP7eygEyR94Q2+ 3DcfwZ59W1u21humiftuoy46PSDMR5/RFY/0KfMaIeGnqO0IeWnqdSsJlyhy9jrq Sa27VycC6p5FnpzqkTYlY9WsZn6AXvrAbVX7wmNT+3E7LfO05GIBNDKCK10Ruxiw lppkD0oGdei+cOLTS+L/MYS2xpTZcPHQybQopwIDAQABAoIBACxz30g+AM2iCA8/ hB83apJ39IRv6IUNqrJ9kpFreCBSQxiwayrBJx9ra2fsyEeVuaHy7cQA3S4Zjegn 8yhAhcRKUw9H2V9A81IpMPKCw/DJzS0Wxksakd66TBKE2QnN1shbVJLPfL7vDnDM TY3K2C00rrRYvht/MrF07GAzf0xc4kt4dZ87/o0tsryZogpJRzQvwb7T8fiMNMyE wYYQCA5Tw8/iYKTjWMgevzi6qy/AiLBTT+KAcbaQpPF6IeRoCfr8BsTRz/Zvp3gb UUOWC7tdOKNtE2M19ado9l7JgoXNQ3p6bIydDFQSEg4bQxElvtz7SyukLnM3N3PF VZIm5wkCgYEA3wgwDFsIAm5f9n8r3SR2sEJgPb3iF2/x++DxI9SJRodM1qT6RAXD rNUqLSXlUUaQvUcDvIcwlP0hpETuBOjQwgCd8QEWLPRllYcWgCkBtoe0E5GyUEGw j3sDZ4EdPGQUaJpWFR7ZKEGNVjFDcRq3cEGQUkJWP1uxrVBQU7ohtVkCgYEAy4T9 nvCfrmUmhKSawXWTVSDVhMoBNAyt+KCOaIevHscCj97PfuJNhLCOrpYQuShMGtpF 5DqohfqBVosXRmGFqjxJbpsQQaQCL7zKy0e2kojgoIdSkmNxKoQup2oew6SWuUoM /08QaILHjd7t795xIFPimcFgVtD8Uy5OaA7jDf8CgYEAu9KyXAVhZmK7T+Pi9bYa ee88C2LYfzJYD+1sRedbv9h7fhurYxOTqP5PKXxLdTm+9JdUbzVOVXojFaqy49GI 1IgeenKW0T70OYttCHsAJU58+Snuh6X6YaqPwF+8VjpV8Y1fxyOWb09dDmQoTpzY NKISPyP8bBj1NWZ4bzpF3ukCgYEAmJDcs458lfab1mmy3X3vcayIg+AO4N70d+Kc fv3gKHlVkVadQ+gP5n2YqIY0iSkNTD/+juXuOWmeFat1SjyHQCOrhK/Xku2I+hJU D+l1kwnrIkvveW/0gMPQWY4y+8ThfItnjOjPSxlm0RKiWePt+CcKQur09n/5971J 57XpPi8CgYAK/ypz0j0gYUgwvYr+URad1OtaF2rgsYNt5XwoJjgPNIBNH0i0K971 gIiprx1i9ew6xWyuKp3E3dtClZ28m1txgOrcMuP6L3rEXU3lA6E3ftDY+l4wKYK6 O9zVS5GSIqjhUgEJQU+M6zo31L2hKcOYY95zO8/JyWfT+fgBC6mW/w== -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 8388609 seed: yPi3JHKKbWaEEG5eZOlM6BHJll0Z3UTdBzz4bPQ7wjg= zfec: required: 1 segmentSize: 131072 total: 3 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:CHK:vvh7fppprucnsblhp2nq7ixyze:ck2nmw5uynyyhbr3s7h5ciffgzw766bt3e5n3qx7r4njjzqzkn4a:1:3:56 format: kind: chk params: null sample: length: 56 seed: YQ== zfec: required: 1 segmentSize: 131072 total: 3 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:SSK:p6slmwqqxjwjrnch7c2myu3j5i:kcvumf4itbihkvpkg46pwcdcajyr3wuglmiyw4gupqshdlnqp5ha format: kind: ssk params: format: sdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEoQIBAAKCAQEA2wplwURV6PZJmwtqdq54eKXN3yhOiCaDEhdGyfEK9qUtrOAj d36v7ejCM1pxunmjqM0P/zMAkpuo2ezmTNri6OVwITtFrKYqhvzsPxGmysJlByhD 2V2Qjhngbc++q9ly8ArD0UAaq8p/c6M41YbIkafbtzrUUYMdx6kH5OP0T3fppNO5 F5kwnxDXEIwSCuXuyF5CZajk6RO/iMB/IeYhiGN8Hr8oZ1pgknLO57OlbnAL2XD6 a6qd4RnSCWSzWHRyK0N1/AiuvINBBN5b+l8tdz0BnrPYL8EVhA5XZ3Nlz+9y4Z6v p7vfiheAqTUUDcSBR3pf5G/AWkhIH4FSg+wa8QIDAQABAoIBAFl2Oq8gCPKYPOHd XMNSaRPlrFr7rG+BQ0FNTnVGRNMODcSw2uuAS7ygt0igJRkje0uDTYhOvWojt2gi kMFNGSZEJ3L7MW7dgzsU7CyqOfRQR1EQCf4qb9MKEJbpJZgsvPv7eZTqWLpXf4ys WpcjcKHE7EE+/t637Z7Rk87Rp8QYlVS9Eu+bXFl7ZF15k37uoPA4Or4RZD0Btxnj +QoSi3UacdvU0bkmoD+H/XBFnyEPNPKnWtxV2lQmp7zLQ+2jvUa1TKyhQyFuLb9U QhqGikpWnVB9YTskw5Z57YJ3w8pdnUbZokpblT1MFOc0qUNlstjXFZ3+q7s/ZNam QYiXwc0CgYEA6A7yRw67Yy/UWScJYqA+c54RuxmQhalk2jockY6Ur04EHA5TRBzb VKhvX0Po6fu6NNg290BT+jFCZNsgHiE1sIL6M0rxPJEDsNUe580tL4C+tncsqTc3 wdULPGYh0IYgZ9mPWuuqX8TAels+P73FFNzH29uRDNpDIYUAE+AcDKsCgYEA8aOh PFrYqE76tkYvtUZFgjI8R5Q045eAG4eET2BsJNRuP2aAK/bJ5ngwlo2kHlLtBi93 Ogp/k5wXi3JQ2y1vhjGQAu3cqIP9vVVYDZhQzx6WKy5P1tVcKw2r/KyDDiqqGcjJ A9efnVp/S/nESeHjOfiUhMUzpsCExa5vdDC0/tMCgYBMQ204EQ1gYX2l9wBMm2Eq 2g31sUcfxjXQyjxNUdBndHpBRivzPJCQV/KSGl1XWFUvvMcDpu5yUPIC90is3jko 00KqzLxPLVFLMh9ACtwIuoTyrmPNEMqQNxXEOcRvJUVNG+DS/pQ1eRHQpF/mztUQ MCa2iIg48xoQ0AbggUx2lQJ/Uh5JrTkyaABvM4Kms/QtqxFnauvzDWVvI+vqCw+5 sMqArQsog8ha1PgDiyaXn7aO1otK+W6X7JIfbkRrNhE61WACkPxFAP/aO33FbtlU nQ7H+eTDPT9FE1ySFkyKPUZCiICzz5p3pAIdQLShAHrDve+8iWJ7KzBB7uxY9COZ XwKBgQCRCG2U7oHU+R6eznmLIUscowU039DUNBxe/loZDOpx/fN1aZPJDlaII+Sl zkcOqXR9ILw/NDTlfpeptYJ+ahBgP6DjX5saA2T/uf8wgr/B+Aox0j9iBwfUahqn YVukY/sfdXp1xdWsllk0wc2HcoW+L90K7qHi0ihnR7oK4s9e9Q== -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 56 seed: YQ== zfec: required: 1 segmentSize: 131072 total: 3 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:MDMF:frye2ctqdffyf437do4uq4ngpi:lloe5ajvtkpmyflag7phiacafx4pfwt2dnriayshfj5hvno5ltmq format: kind: ssk params: format: mdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEpAIBAAKCAQEAuwzavy62iNdYkMdbRNLSzYURMLaw3ipteYaD93gk9JfkDt3F W1Fz65n0IQN4fRol8po48mzPpi2C3Uq1N9FJrs62fx5pdMTzMolXWJbNY4xAXJ6s z6ciaze0VpfH5TMd4ruRZQ9hLPcGT0zODgNnGxS0l67+VVUhmeNXGqNKUwnYtUEh 0RMym/Mi/LmGXNk1hBtWhC6R4IhKhZgSVuD4yr0FEAlxUOsCmritJROknA8boSzu UFrUrrFMpo2VBcwD7kfm02tejDyXfe9aLn9kLIrbSG3YmTAQM/0NkVsu4PUr4iPg ULPeyUzoDhYFOnEAZf8aXRKQL529SRN4YTayIQIDAQABAoIBAAJTqlvy+Y/Rt/cT F8pPIhKu61QTDbexyOtYVkdrjfAh/JMHxLb6WCoP3/bSK3tI6jxumTNA0cN0MPrO PVtcpeFADoqdxvuOIKVaCoVeWN86ZSmRyr4JivbQ+lQSbsjl3iMOKMScUJ3l08UE RFLtzWhLlWSGp4DJvpGCv7hj3B6UzRIpPiisYUecyoiA8+YxueCRwE/GjdEBtAqm bnucpTpEgCE6idbD/8/zzHPk8Y9wLfivGi9T3M/kRWbhv2x9qrmaSFTxp2eARISx ohWmvlvl1XTF8ghvOifsIGOGARXk89AldWbohjB/zs60x3+Ein99WJTHSvPKBAN4 54ftTAECgYEA6M/nz+42MLOSrQnotL8L6EqAI6vB9h/Rr01eRmSy/zB3/o9/PpJZ 7gYLpdF+PQxU+ZpgYL5qv0gPppLncWe9M1ET2gqImr2nS6KOpQ3Kban8OdGtSoL9 qoSdrQPpOM9awK4y+Heb2O+VsVykcJAOzH/wOz1sDH6GNe9Yldw4QYECgYEAza4k Eo9m39btQBS7z3Ne5zwT+XNusviF60LA8B9bepx5f3uKkcuSE5NOdjD/AYtBbEFW 6tqyGbee90emW2/ei8KlqkUpTwYH+9+59QTQR2cZUFYMDsqsXQCxKJxk+VG6ikJg /leB+M+/H+w3MD6qG2VcfUTmlE8NEjoebSGVgKECgYEA3IG25WmRaBVdcom1ICTn aU/PCHoxDyZaG3jjNzc/lpbYwII3mhNSHDEbrSW8NKROg89lQ5x3TM87C6GOlwoT 2NwNOnLJqg7ButCv3MMwHShonnbrdGyXSH+tPGc86bL0GRWlb1MSiKl8Fe5STc8U RTtUVTe69CaOhd06AU8A9YECgYBaTC2CCHr9onoeO/wII3pywilyxn6/C+SfWHsj 8GBVAAVHNpGrWFgVSAKWWQRbRSu/vx/Nk53FNJwRq98ZHY/yg83/ZsWv79HpfltB eo+GCXlPj7dTdx8c5YThc2fRHVRsBqBWiUyCU5JxDV9dSuWbiXCFfo5MZjgy3Fkn SCs+gQKBgQC2d3WE8uxl7F5LVyM0+ckCPzBQX1cdSaslQ/PmzIMkjmpm2nUVnhGF Ww6CDF6GOxLaRYloV1o8txTaKAKbrOZAUTer8RqK4ha5KVSWe+eW5MazbtBPGF0Y d24lR0kEUqH0i6qQodmnGsFHf4l/INvbQ2VlBtR1yzrBm7fki9WzJw== -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 56 seed: YQ== zfec: required: 1 segmentSize: 131072 total: 3 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:CHK:pbfcdvxhbikxd2hcc43oel3v3e:y5txpmiptmoz36ionmki3p6krmdbiqasw2v3wdq4ia5lmrhh33lq:1:3:1024 format: kind: chk params: null sample: length: 1024 seed: YQ== zfec: required: 1 segmentSize: 131072 total: 3 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:SSK:xol3f5smh63b63cg5zcelc34zu:646qwuwu2vkllhe4rcblvp2sdwc4hlffoh7p4gaxmirmtzuvdiiq format: kind: ssk params: format: sdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEpAIBAAKCAQEA0O7WP82q3/+vIKJl8tvzSgqFP2N9NpcIJ37Qp1qgVJMZoipl ulRnnmf+q5rNBnIMBtSiKwE8KDStxVhsQRloCb3CTFR4RMYIsMyxgKb5KJWLN7oF zezgtxzU+czBUhuXy3hsW7W0wLgrjJXsZboey2exrufDk3VfZ/8mMOVB2BRO8Eiu e2idLhWvM9hSxLz65clRaWJR3MbBFvXTR16e3urb0eBpRTKqm6a42YPR79eYZRJD XGHpqYk8K5AMaS8Iha+37c6soPsyNorFTRBU/CKsDb00v7NHutkuNVcTk+pY2vok 1ymla4hgZBfF9BFl6BKj+UMSrxwaPOdYAFR0/QIDAQABAoIBACakIX5b4pA6iNBr kJjduo11TCcI1rHXtYOedecZwQDbUtiV6EoRjbdzituAjg403goOXe6/s/lIouHW hHD2yrHQhWSaE8M+cAGI3gRJ/VdW7xBPqH6pIndjTvVjO9bccRLJ0xqUDNm+xhki aj33MfZTF37ecOKvCMXiX9UXtXmJlHHry5fMyaQp13lyEU25IHCVbfgHeeQrT3Hn jS2fJi9Dg7cDf14lM9oMW3T4rH83NabRkz9a392RSnEKubYMWL/aHcUiy16Iur5A MaWxofqcFvF6jxJK1giJaooMCRtWD6EqcWLkgc/ZTdztafswj0eIVXum8frEDxr1 G3kGsLECgYEA6Yi6QkzUZaOoELRPQA9YVr0+sx/kcsLA+8t0+9qpUCuWr2p5XCPD YH1vFUD1iOCHTVq84bXPxTsuS4xQFopHeyhMk2A9pB3/HCsVkIuoEUpGCAQwsmMa mVfqtX2iLxCy6/KBG/W61kiZq0PzpfdCTZjuTMqK+tq5xmSZl/pj0zECgYEA5QhB Epc8LnBOsgNVarkvj6wJfsKGjyFDQZd0KtJfIMkkc5HjSU+yx/dh4BGDC2UJ48UD +aXkdhXYXvSVvhW7nrNmOU3cCy6JT08X4BmzwdFcs0uu72FADoPkQLS80KFJ+xP4 NQ2OdE85wX3/Ac8aSSIazzkPK9/Ec8TImclUk40CgYEAo/zC7jONIiIdrj0vOUiN O7t//8BxZrSjVyyzZPdS1V0GXv9hYPYsB+GM01veDtO7rvH8mHJXB6RbCenpgypu r2jI/OQj5M67iUgnyGyJBDsnmhF3MIyu7ObzhaZG8M3FFjIfv0Z6gGZSohUBTpWm FV9CVuITXbuhoFKcGEBXQMECgYEA0b63aUEbGiQ7zYaECRLC3b5di7q15ApAP6dl +XljKPFL8pLeJVtZjQuelMc0zZCgd/kLZOtpyELFPmCbadMZWYNN0JjfNVZO5VS3 tsGS/6KuVHyxHgRf12st85wRdrbeu5NUMbHSje2oJO0wRgXWOreC+dd1b2aj0Kmc VEourFkCgYAsQfMB51KrrMyC49sUsYm9weiDBGQJuTATCxX1mSC84XRMiEzf1pIh csVTLMXlAGSZP4C1zqvWaQkETCjS3AfEVKnXI095dYp4bADi6QglwYV5GFvpJnMQ lQwWUMJEup11XmTe3KxKxjPj5MFRXBo7lZg8SJudRpgvE5IHQRfjXg== -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 1024 seed: YQ== zfec: required: 1 segmentSize: 131072 total: 3 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:MDMF:po3s5pnang2csh2auurmksfhwi:q7ltmms4zwlmtmjw4o4dy2p72fae3fzzvzn7u3pwvcnqpqzwo33q format: kind: ssk params: format: mdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEpAIBAAKCAQEAuteEAZr6K+1C2iJbXOS/KguaPV5jI5r8lmZwenO7XiEnnj4f iDaXFF0Sg80UhsKmdj8IRPbRIkUMHzui4/g1xUXHJcosuIPxqamru5dDGj7DcRke 4TeOfKWLWNwrf5oL9I9NJZ6hicEE2Zg/13dZeSDS6Zyyn6NQb5Txj3H5ntHMlcxv UWGCESsA3A+co/AwHtin7Up+29i7NvqUXgn0paBpE9b8ZjQ00QZVksEGTLH0KwHA cRDBnHrY0WPZdYS6swgsmto+gtEt8z6J6w4K++eWphQdeRQhQNfdIqNDfoa0G7Sq R0uRaTo3TbHbzVWLPBEzS+J7ELggWja4uR1h0QIDAQABAoIBACsNrUPnb70+g1ib cR0TMr+gA88fWE3kkU6g1UtKLsMudaAfpYlwNtkA51rKn2+8G7qEpMWrcB5q9bOe vNa2I8HM5epdz3dHJCEZ0VI9NT+vdb6ycKyp7iHnzZfQyA8zsoyMltTT3FpPSWxQ iml+fXYNa7xcGMbzTX4gvpb0xvXi8gcy5vWEmXaUBeQJrO/5Q2elYxW8ovyXvI+B u6+Ul+LCBzqUQsf7xspwydSPDX2lJbUeJXJUsCqC0+1PdRbNkXs7T6sjZ3kvTI5G ftGl9tXdLkSfJa/yvtUoKvhWOgyn4JZoc8nuNetQhrQToTkAqYn487dcmVr91HWf FS9oRP8CgYEAyC9oX5OUmBHw30gTXn/0doC+SPbuFkgLvE10xzSnSjGhuqegkjHl IPOT/wL7dipYfmN2CBttRYfyaelgGnefhCFSBkJYufIZeUWQhbe0WG9TPSmEp0t3 rBV2EvLPc+G/Qm63vrtHYlF4SxyUFd4RykETwVJ6LScPEj3hSQGXZ5sCgYEA7u+z 8eYaTNC+uttNFB03UDHkylV92daFF2dVykqdcFgk7NiyYOBppSgVsxaGW1epOLbK j1QAHmRPGCKnRltzV4+jb7+hxohHhakx8YRgPb6c6HZ1iW8ZBBN8+oRLsE3cSEhe cyoiwDoAWzviNRhIKR9cUEs6rKoHLoPiUaTvsQMCgYBpHNuFNAzWPLVpyILDIBTR FJDV5zLk6DehTFqBLxiYUK9HPzWFDkXto3iWco5vYZTN6JPVdfFOjS+whSY9P4q2 6ngTaUsFeCYAE5LrY6aCuRHQD7jjzzCrXyl8kZp0kpjG3TQGJng5G+Y6KmtngA9/ T+R7oj8c7mFvhqaAmyFQ+QKBgQCM/NYyY7ObJgWVXrfxqXetE3PMTHvxYVqxP6Fo t3SFCQ3oz8kZzvGnqap8PUtUdLp+o6WMw2U6ibf+JtyLcITz4ubulqYP7vQ9E2RL /e+IH4SYyuV6Dhs1w4YYkJ3Uz2yvHjzVOcS9prv1GbXV3Jkf4shm/K0Hm2CXeuy9 flSNHwKBgQCR0jQz/koDWvM79/3w/NjFEPKfeEmdEFEFkOIQQiUf1y/qP6XJ++xn BEvFjp09YGjDAZdxrve4AAGgB1KbeBv3w2tKxg9Pu1452YFic8zgqzlQ9Dj1bsxq oXB+6DwwyyO6v7MGMZaUTcLGhlzulAFW6eNugvKC7ZZUHzEHdSKDYA== -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 1024 seed: YQ== zfec: required: 1 segmentSize: 131072 total: 3 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:CHK:vkdc7iae4pgkdomhbbb554kv6a:g5xkatxv2entfssfh5eqgexb5j3qs6jvohofjqo5p5erlxlx2dga:1:3:4096 format: kind: chk params: null sample: length: 4096 seed: Yw== zfec: required: 1 segmentSize: 131072 total: 3 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:SSK:c2vvrwbmunt6tov3q5eptobzjy:ppk7ttf2hkwd5ff5rzbqlfjox7ygijqzguzz3egjy5quph5lmrma format: kind: ssk params: format: sdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEpAIBAAKCAQEA34lcTzYiozXhilksLDEew9TFTRd21etDcUnHEJNt92MdX6eW QKH4V3u8l1cgAVIa983CuLeQcR/Qwr+93aQ2YO0GiC5rYGch9aJ/ybTSjN8MF2QP aHjc3etSS5WoF33OjD7wd/D84AkOmXA6On7oTcfteyeTtCql1ssnrIukgw2qy6Hs w67kOYeKu/omlmmKHX2YAwFjR+j0lBNS3lV1S+pEz97kNc5mBhyrx+XFWXxYvtIa DDaObv6BsTuNbegqLM1fvhDUbcAq8ymnr/IttlFGcGzu1w+1wo6SZQeO23/Mrb0y +PJASekB6CDr0tIrvU+0gjG/SroDE6oR/UcC/QIDAQABAoIBAAHE6qWeP8TuuSPC rYS5OM1pyP8uTdyTxmgs1ZxnrLZvouMuBnrpehE0ygFt1gBP3/KRJT1LOqOI+G22 92MIy/z7zN7vQYToT0HMV1phCeX47lwRbyvJAedOiv6zjW26+BNur7GLVAnXGILI +06ZalZFqR2d6qxBrPSK9BWIbDUBtv9u1fe7eOsjLkZGjYBsOxrTtiMcDILRvzHr gWzEMSdfCEiNdmaz4j+JjYjqkFiE6rKDv72XvFWS2NUSmhcLG5lKKqeUk55aAEWz lR5xamqdyyD0z7RSJfg9xLn33uFybhzW1smVcjpKP4ylBmxLNFGNQbXPiPH0ja1s jlCsU8ECgYEA6jDz0v0oLjUbVceQUZpFaa1orQEH5OCC2Kmn8xPR5APF9GwM0bgI phPTOp/3BM9KNMdJbwAHJHDuaKwTZvIYyjFijWQYP3iKRZi0uYG0w28m9aA/4eJE ZHoo0LEQuyxp058FLkrZXScaxv5O4piMKtEg1HTq+JX/brI4OB/zx+ECgYEA9Fpn hh+j2q6+hhCuDPbxYIUyf7OOx6lCEs0zlPWXfyM7VeArn5ys8us6jQHIUu6/rhf8 bP0k1sOvqCMyVfCh5iC+/3zAEmb2UOUQx96ED5k8gU3DNJJbI+HSGt4p1b+D9C/C twjkawoXTd5upxouAuscCmnw+Jubt5TOUkY7Lp0CgYEAgUAnMYk6xdXVklAj3IWy TZLBNMpe2vj1/jIUWVnU+20BsdZ4dL6HN3G1oKNsp6DoKZzbcIGpb3lMe0SNKMHw 4JbE95gIse8LEUIobEGjzEDqVaHt3/MLIBEzuYof2821UnBvYY85y+mrI6xzSSg8 I91rqxYkILJYWXXPBVrNJsECgYAVcy4tRu/CTZ6p9CLjPnY369lf/molOsVzExJZ HCn9XiFiS3ho3X8NH/sWz7Y/GXg4FyDwjFREig8ManKLusDri6pYkSHnO6SZu1H6 yZy8Jc5651GgdsyLXNJty1zOx64UrHCiUqSChPNAwari/lhVpz/h5iTiHf7QYb6u 2D1vvQKBgQDUHknI6sHa87DWtHEz17b6HNeEHlP5XBI+THia35LtrI5NUxphFTMp HSwnZmDufBVd2Y8f+0+vIuyZKvZbp+YloaTfonm12kNSQWkfwKL2cF1SteKcl8Aj HVrd38q2YwG+QV4AHmmc4xU9LySlNECFbT7sJFkZlEDtU4qVCqk6mQ== -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 4096 seed: Yw== zfec: required: 1 segmentSize: 131072 total: 3 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:MDMF:5zoan46gn22bdnxt3z5a7dahyy:o7wkn2sawrdpukt6gdoh6wu6xuxcu4k556mycqtfdhhhoqwfx4ka format: kind: ssk params: format: mdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEpAIBAAKCAQEAw01Lm2+OJNohSz0WJZPv3bY4ntD0NDLz3HN6LpNW/YG2+QeE 372TJhEORe/V9yNMzxuntGHI27EtsBNy2Ux1IX6ZoJyTn8WmwTxmAM4cnWWVhaFu rQgPXwhR/DGfYrsMgg25m0t1y8eB2n7cZHkLRDDePEZdk+54ZbEwrC9hQYis+xt5 CyLgpnz40/4ubnwIcifM+QZKiYEVz+tmlFyZPJ3wEdjNAvthQ2/MH50SCxn6JdYU OhX/IvJfRgO3PPk6Av0NhN6pJ3ifQscfwjzDtdiEbeiNUxsZggQ3H+taIoABtZ6m oaghx0k10mkgcPrcg2kw+qLCZEYoSCyLHsxpxwIDAQABAoIBAD/JNHrpNd8iYQJe SqfuR8a9V4PDUibkR1JGYu7oT16PqY9vHb4nf/JMWsGLwfGsFU+FREI9N6lNFlNu HrIK7yyH2SwkR3DE0KBHFjeIGb9saKfS4D9iJQcQRBqeqGRKHB0z1115iVkLaYVP rrKf+AaHAWZlQvXoSmlINFHgTZ7lrvcBM9iJ2ix3Q5C3rV+wdNrC1j4RRmv818A0 5nVSuCt3rH3j+xwCXkpxzMa4AJo3+RKy41/2uIrV+DETUOnY1uLeHXAOVj+Jycp1 leo8pyue6+G2nyK9xCgnSBXTDEk1Vn1pGzY83deMpnkdY/wcAYgBt1dbA+59xp4m 2OMHd9UCgYEA08JxY0V9DN7XqBe7UmACuMD7YBxjkmdkFDmJVCyIu1WnL6IDQ1CY W57wpnoUMMNDsbVrK2D2YAL5A5yHxcrD4xS6QYhCV0DoH8MGghrIs9yhRH34h5wg wSj8HxO0mU3J09oGvFOBlFeEJd8hpuj90hJ9EUOvFwB2LtjOOvozMpsCgYEA7Bqm BTjtxtXRuQ03WM7XmSX/XZJvWEOQAv393LnZDTVJVjHKmzl25vF2/l0Kiqak1Jdu kw9ipuLmT673v47/0LlTU/iYP/ZEgvbpkZgve9nWLnlRN8gIwJS+P9mDbyJloacL qeVhEPsUxSeWnRCOEsJbEU7PmznJwPjl5fZwskUCgYEAr1Tku1RSyPBN0VDs+bSj LEQlHpwC2bqfg5tsGHTTNYEi726OkxLNQ7ci/ERCKWnTx/U1afJbrH1pntLhHCTZ 8lA8M3xVqZcFWx8IaXsxyLKaGHLQ77+W4zhDIJwZQYHF5ZI1V4Mw3BlmQlEwtNlf J6vFQCExfLMWJ429m9mDwJcCgYAVvApGdswkvrA0ucu7iCb+uSm94moPlQCf1ePV uuIJPjuHDMRa77pLXjUXC1eaFeccjugl74ekV0TeVvwFjVNtUnIiS8MwOCpsZ/Bu b7UHULFPy+k3Gln76HVvCUX5KBB1BhyjwjLiTkrGL3PE964seenKORgRcQtqkT8Q oVQUhQKBgQCGXXKYWdX/mGxXNEnaSxqpQKOmQwnldsPggnRA+u3M5pDIImzt3nbR KNWavPy6v/v3GLHSCkMvb+oor7qk56YiHpfmL2pjkNAiwZwcdUky05sfHvhs+9d4 qLxi7Wt2LH6aidBK4feMC8ReOlu31JCKWjcZ8RwNNPEXbxjtOv1Eeg== -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 4096 seed: Yw== zfec: required: 1 segmentSize: 131072 total: 3 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:CHK:zvla2jyu5fqlb2s63zftyfjnla:yqspaexhew55cvv7w2m6czezhzkqnyk5fea4gvnplc5za4xlvbva:1:3:131071 format: kind: chk params: null sample: length: 131071 seed: LCa0a2j/xo/5m0U8HTBBNBNCLXBkg7+g+YpeiGJm564= zfec: required: 1 segmentSize: 131072 total: 3 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:SSK:5c4mykwk6eqqcbh3slodybxfee:uzkht3h5m6ianotz2cgy74zxirzuaxtkxpcy3ghbafoime45n3hq format: kind: ssk params: format: sdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEpQIBAAKCAQEAq5KchP+y3p8pRzcw8rMZd1Bii5k3b4TNm/2jqy/3jUt7JY6T pNmSzPnTQVe0iz2Seyg/2qynbX5GcgQ3Yon/XSXQSxkKmVMSq2emWu61G3k0y/fs KpblK/YLGUiXvaGZQLeB0jlyaaAWMKrFAFcJ0Fg74zS+8rfeOQxGMtedKJGVBcin Q7OgC8JjLBPKlFgossBzZjZRguTCd+6gzz+1j6aHSgPVYjjNQ+g0U//zUdIImSgt tSXBDiOmvRWG1oxdtY5xqoY3/H2HKJUNK++Q0D7BLV6bQrYWBusf9JxJbBQH6ite Vc5RMW4b/tsPJHvbhtozZO/ByJ0mAgy1QFaVxwIDAQABAoIBADCBDLW+0fME3PcQ n8plHpRwCcP8Z0MkMLpiTMRnFZ2A0sot0gifJ9TB5drJsDVTDVe6675m5BhcxA6U qZG2gJZ1S7sHU8s/xH3nmgyIAnRHYkktiDsMGLLCZqXZs2g9SKWWm2FysykwREWy 6WmssY4Qe7HCZh8ZIv5OvYO/F+NxOSHPcpX/ubbypIlIu6GhhTkqfpw9a4v7IGxk 4i7J7rhqC3+jq5iU3oeWGwUXxN/ZVwAmx5V1iWgz8Q+0Khc55od1BgpLU49hxrIP qmY6cbiuKo7KlgyuqLBuOrrClJDc546sGnbvudQf6ENRjDhY4p+udwXUSBuCLCYx P4Ud/i0CgYEAtlnYKLLuPIq+jwMxefwJtWhoV9qX3sQw2716+HGCbemdFoPLBXia 9VGZ4dbx6V+pa2KE56x4s9fe1cpFOVfaoY8NR2hOgy3n5OoqyGv8xnTGqdFGDDDp mC9a2fKZ5QCtdDCosMthnT2nADr30NT0RyHVAA729zrTsxC/uTDNYDsCgYEA8N5Z ymG7j8dzWvSTLy/FAv6hF/ovPh2/4df33EvNjLPgzhgC1qpuOH7Yw1+CbCMOG5Kf YXjEZBBwa4G8mF2NuTriY/ndezhysFGoWagmd8UDm66DnhQMUoYJJR2smJ17XcRc mjk9Cx0kvGw2bg7LSq9wZjM85AIandVV/1nfc+UCgYEAitKpWoqmDlc+LZgrwYdc SMwcq82R0xkfbRq6lIut1UmFuw9Ir3ia1+pwsVs3PgkC7OrK7akDFz9fuPjNbJNy sY47eMJzCzEWmtKfEYgMn3VljQDyR/Ow0pgynTwxZwL2Cj/FHRsozFGUYvuBkG2f LswV4X6DC8KwSmGU5ELAB+MCgYEAww895dApxXD+8QyZWSA1SoyMRs+bjJEpACsW lXdpuWU+S4hUXCVe5y+KOQXSp+HnndqqaZQUbviFIfrJkRZKHFQcXFxPyWbYMgOe 8yRiKqIInv2/preTlwzmwQD78geujSvk1hw+XA643kI//fbLGOtkec9Ko1c02NaY MxdEByUCgYEAhy17HFcyGvj0QnwgoTxWB8AGqivfq7ZkCrBZ8a4jtNtNbYBiCH2i 9v3uomcjTPhZqu7W/wloYhsf03b4y/V4ABQHpoiGUq6NNAQoSu7/V96jW7K3HOT6 b7YMYzPnRAkYmruFnL5Q371e82JeQl7aK6hkxglVF/AeXKpEKDkMw58= -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 131071 seed: LCa0a2j/xo/5m0U8HTBBNBNCLXBkg7+g+YpeiGJm564= zfec: required: 1 segmentSize: 131072 total: 3 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:MDMF:sjzzvmll3obdgezri6tldxtdsm:5rcvg2egftpcrsli4giqg6txgz36xdackev2yhqdubbhncw5hisq format: kind: ssk params: format: mdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEogIBAAKCAQEAjw57luJrvanJrEOiDtCSd09DWTIrDWKDTTuRHlabqUyu7i+Y O1+WUn7yWdcryJ+hQoH91gmexQ61krA7RqjkjFBRthSKeVhlEA7gFfFVtBTxHsqW nUwyKzlfIH3OYZjppugjyaHX0uFEVFbFPYa7iQHvUbWzzHwlBFbM/fdM7qfwV2cN IgohXyTeeM0JHj/FO2LlE7wbbzC+nxDrdTmzbLy1I3a1gWiQeFJKojuY0ONtlEkv hD0TS8lp3VQ/L6zFDdAmlBk/GnmiwBetGRJ8CniXIgUQbCYUg0G9NTPBSxKUvykN zKWXzTMRt/xkhL4UVH5XcBc4fEOzxYbi3akk/QIDAQABAoIBAACtU57WyhKPA7da toOK2+Wtnxum3evLvQtjDQj8+SO+WfSYv8zomUWDuFWRyXYqcY06vdjUMgT17jAu aCakv/Nkl//yefqbstGsbPcKi04804U9zlUuNndrLHV8vCwH4sxJcqFzktAYNhxx Yf9AB6DYHzYq2MYsRWVs4Vvp9wuwfaDvp/rJ5sndknid2i21ZLukrNXccmdhcOHq rDqv5GyyWgDtkxoOFLausInih4Y1IF83MtsqNdsXJ0USFv3jMyyHdX4nLutZvXWr e2c+08Em/cMYvnnRRnL1NDHz6NepQqHatLI8UdoZ673VmWXS95pJUHsn/wqlKpXD FBSHzHECgYEAxriXuwvlCom/eIALAfjVweHM5HDe/1OFN5E0dZuxbke59oqp2/rx zjrUVxAPh9EIwjVi/av5b5kf7TXJNfDb9vrKLZmxn1uohlqxSKmZeEmZPrlD04T6 EeNVH4HUdQPoVZM8xzUEKxuLlL2wgmfVZiayxg9Eh+9o57cbQmugPy0CgYEAuEp4 B82k8CvOq2IOTam6Nw/72uTxrvi0gBOsHuQ1iEHocB3zF7M7IMdnlHfAp+/4B9Qi n7JWs1Zn/L9O7MIHwDguYWs8kSGNWeUrmv2HE80U33yrTnA97pejy7480X9AWjfZ eSlKeAMvl/jC5ABjIUu4C/dfP3DDn9ktfbDonxECgYBdPjo70v36rt2/zdzcZQTv v2Kjge6wwWDNzP1ffdmIVHGGpFPFW39gdCw0Wd3frY69ic1UGACng6L+a/FotQaR YeXB9c7pZlmyCRYMcUAIuAgG7WlM86VfBVtouEOXUGkQ1lB7bH3zOC6LcWJEHjJ8 hwX1dy6B1i1UJI/O369GVQKBgF0Jy3s1CLXvh9DiGhJ/Q3GU8jNaLeGF0apvRA/8 akI18+DWXelzJMCxqss7gdnGvlNFd41j/X1ge/MPqgrBEiSquE+aUeN7Kt9dYRxI Li+C86y4Rcu9wqZ3i8vKaO3i7lwdof8XJ29bPGXewpVU1Lb0gSAZbM6cYUaL3/l6 nJXhAoGAZhDKzgaBYkOpsTDOsWFneBDM1+FjNDT0T8vGXBTod+8I06gVlcUq2i9Q ufLTXRPLqQWhnDldp0V2YI6wivyQ4ab2sRch4f8iY+N5aAPexrAd6wYxpakEGQ5W sg4iw5714ZTy6ZcvPxoa1IFBkOaLhfl05/Zw6jq35Y88gweZD8w= -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 131071 seed: LCa0a2j/xo/5m0U8HTBBNBNCLXBkg7+g+YpeiGJm564= zfec: required: 1 segmentSize: 131072 total: 3 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:CHK:3idf2xqm5wnw2q3nbva5vmpaiq:7nmkh5q55omjloezibvyzvveq4gqeaivei5ypfm5vfuugwiz6hpa:1:3:131073 format: kind: chk params: null sample: length: 131073 seed: /N4rLtula/QIYB+3If6bXDONEO5CnqBPrlURto+/j7k= zfec: required: 1 segmentSize: 131072 total: 3 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:SSK:2cwnw732f6pd42kjvbin4zv5nq:fzrpbc322pmjemwlhwn4mv7qlfmnxey2wxmcmuksyudcjbomxjaq format: kind: ssk params: format: sdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEogIBAAKCAQEA2C1mTlS/6c3YrjFVNax9bCyDp8WR+EVMdHQBNMrIxOsTkpXo 3fvN90vWuxcZNOY+j/XxLIsK551JdzC3PFUPICwBvBWhRNiC3P3AiIk2q9Ju/8fV zm3Aj/fmOMk4Yy9cTTmXgk9YEnpLnhJiLni8au3hZpVHnQmfTMKv6OrwKP9DopGg p31pfZgoUi1Yfc/wpOVpMyJUNMAgrUOm2s5nnoMcNsA/pxVdmS/Zob9q/78Xh4VA 5uYCBVj3V+XKBvNg8xlmkFDp3CrsgjhG10Nz5BpB0ucdXfnGaId26IGHoGrlpzLL C1G3bX0y7EHHjBV86G59pVdb4zV3TSxW0mMPTQIDAQABAoIBACVn1agAKiT+pVue 1auv6RPqr+071oIrG4ua9wp3fD32nzBiGCUxCPadfM5qtMXegTzPxad7d6uUH74s A8jAvxlGBBbTd1A+VoZ+se3uMDOS+fnwTiKmAwfmUUPKLaOb8lC6gmjd6dNoreTw MWTxJ0kpWDMz6WxW2eWiWmXnIR82msDgdVUnhdUOvLdD8HTC2HV+Vn9CFL68k+tq Iat+G1BEeFp1nDkLbnH2zDMLW8GUt6z24HwjFbHbVkgKa6t92Cg8l+pV1sNu4jt7 aU2+AoKYRnWSSLedBkNjSw+nasK3Tg1f5/6xq//hiCMPrN9OPL3fAkN5QLjRW+EV c9HbIYECgYEA8XWQQU+6k5/rrkNJh+UoeeH4GkgFUSlzq5H4b5iQdQwSzY/FrZbW 1R/S1YkVqq0fjmZjCjTLItBpnbfCt3lhMQ1RTBi3fs6spMjY09aGdJeFJn/BGK+7 M/o0p386LOI5VHJtSKdfI97tctOTJ2jvrL8mVJ+oqaA3PJzHtz7nQ8kCgYEA5TIU d1N/Z4GlNnf77mEishSs7Ln6J/VTcNzZrzsF5fd9NMKoEPcO9OSFa+ZJKFOyMwkl haPY6WOv3b+LpW16/OvntHPbfirk0ruAwItdLva8F6vOYDOrEezMY4bXVAM58p/S qYWXfk0mXr1kq+Y3Jk4KHwh+HFE2pqHqPybJSWUCgYB4wOmWsA/H2jdcXAw+6QyX /7k7M39tOoS9be/Hp42+633PzbH3gTMJPLQM1FTAmXnpliy7ovFgBMh89rRrW0mO 5XEd1FKYGTXf5w4Ayw1M66XMPiHMfb3qXZvNlGP8pFo1cFBVAFclMkyfm03BbMpI IwRBV+NaiWR2bJW4f73aeQKBgB0o2/W6h9ERa4Wcik7vyMxDWSTdHHiM+8q2LnPe 3Ic/j4xw35UY+awqjtcFe3VIALoBheaUy/oVlsBtqESpwyX+lYId42UP7ADrnhvh Hz/kYFXao+0VZcRoDjDzbN3hczPtJY4v0vBcvG79RZuhNI8JCrycBf9wTbWxj+3s 71sJAoGABKJgNFvvI5DzXXkV8V7arT2jv08AlPvf3HqJ0V6Fw+VV1sfcXEGkRVKr VleFaNqo9zt9atAvSNUTidx4AWL/xlUzhR97lzdRQyhbJYa3T6mU1JOPD/zHJAnQ xiQGOW4ll8c+ht5wCademva3C6k+R9/pk11W6JjeTISMZ8W5ICs= -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 131073 seed: /N4rLtula/QIYB+3If6bXDONEO5CnqBPrlURto+/j7k= zfec: required: 1 segmentSize: 131072 total: 3 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:MDMF:75ls7trk6lfklxbrush6ex24hu:tho6cve73atgkhgpvjd7446w6c4wlta5bszbfv6qhbs5ykjltwvq format: kind: ssk params: format: mdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEogIBAAKCAQEAo3aCyqOOeNyiAhFyMMUjsC8yiJHP1N4AQtNJnaj6fG2p9oFH LLVqXIj+9X8wFQvU1APqfp0ZhdyJnCIOmiZCWGZxAdBAKMupEP7KcARD8r3pSeiK MeDYJx23rejrtKYjJeJAeocIj8x9vcCNXpimlN1uOLkHJk7FEvxn0qmCblfqGKPc xFBsBpBIPKVK9SZ4vOPa02sKz+CgoCLxRSjyLj9XgH5+FxU/O2LeOnrq0TVttvsW UXILD0P2IsIZcQgXszyPoCkdqFZ+GgzJA09ut/inlDtB2Z0kvNfrxaqOtTlClrm7 7zYclePeL2mYP607xbqjQzfQAe0hrlLR1L9QQwIDAQABAoIBABSgpRL0PnYzBWtf Dzq8o0By8Zy5UHGWIuX2lWtvV6NcZV565DlDRWzwSW2VGMNhzwAtjv8I98pkJlC3 yCxHWAehbxuhpM8LxZskQ/XhxpSlxtWG95V2wGAjSGVfp2A7DOQVdTnhEMFbYUy7 ipDIO6aSe3vDTEssaoMhqz+yw10FctG1HANZLxStQl+SpOm+iNBUuSE/m4uz5OFy ENoaDxgWyar9OF2ZvLZc+hg/QaQs7pAP+z//QyGNVsq588aBA3Tq1J4tfTDBlyKl 364z8g2RN5Ap2hW+j1C2uT0pUD8wjkaRPkdGtsieoutCX2PaCRcoQaHHZ39t7LIM YjcdT7ECgYEA4/R5aPJLWd3soy8YpMzRQ8EtzDK0yOeNe6RTFKfESkBReJuGBbH6 VS9nETazREJapY7y8tekppNSsN6oMCWtJX9EjcLELL4ms7o3KC5IeMI+TwUpGhov CZTiem5b40mXimteB2oREcIFA2EucTua/CRDqvmiuhLhcyZejk97jlECgYEAt5LW BaDblWaAeo1gRATyumg0vX3MTwXOR4iyz9yJylISMsSLPbSRUk8qLrinBdFftYwz GrDEXYJ26Wak6b1JAQMD9GY4hnbrHFcLB8JTofV7c9Aem1hKOP+KhuJFd/aibBBH CZKQUvUCi1E7fQA9iQX6qvDmwDxgvG+J783GbFMCgYAk2GI7bVZymyVhpv4jvRti CTp+0/9Wrd63inMHVqqqmcTRasn556+fzz6okJ/fO55tPjLUv7hUWGG4RvUGe0CG XBDXnRCabs3QpRu/OePq6PKrURk4p9zMfq0wvt/JWB7Pd9VF+4XwydyHlFCuasT9 Vls9qoX774tTUnNcK0q8UQKBgCscedTCjS8N7nhZgVUYEGUEmfYyd+vLAkG8cbnt IhL4qTtw+v5XzJUW8GIejWMJY7/AGDRZdRQ80m5H48zc3is1qRUZeIbjoJ18N6Pv 2DI982skYju7RVsTcFXzB7t/mW9ldzlhSTGiRqGvRxg5GTp3xAGnJ5nX1CQM0ckW e1XvAoGAfdS4gbE6hbLmXupDiOeT8/W2FL36QCtdXMf6TfI/n8CEsAiuKsxmU1PO dh5f3uzGlGSuk/aFxeDLcpKkG5PkaRS6Ltpy3+39dSy0qhSiZQt3cddW9lFj/YAv XConXKubaYCYzR4XfkFJTbbS3oHv3263xIaZlplH0cgb5EBO5v0= -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 131073 seed: /N4rLtula/QIYB+3If6bXDONEO5CnqBPrlURto+/j7k= zfec: required: 1 segmentSize: 131072 total: 3 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:CHK:o7xdokumtcukutzgmmpfme4nnm:nn3s5gau5dychy6wlodwriuit4wyavfze6bo4icywcbhdb4ccxla:1:3:2097151 format: kind: chk params: null sample: length: 2097151 seed: uqWglk0zIPvAxqkiFARTyFE+okq4/QV3A0gEqWckgJY= zfec: required: 1 segmentSize: 131072 total: 3 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:SSK:saskbmnnhte42buehoshph4s5e:sgy7cr3nt57e6rap33epd7nx7u4zczj27mwcix4kfiuyyh6fhg6q format: kind: ssk params: format: sdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEogIBAAKCAQEAwn8K/miyxzuWz3uOVkrgIDi+EAvbgh8KVfA3QBrwR22RONhl t5eTvdFj5piqIFfGzcxcAizKI2XNVH7iKHWoJJFGFegL404HgW5Oz11zPIahv7qJ a2KtCMaZF4WkKeirmcIryLjROV/j1nCsnatkYDc3BYUpdnGNHoOzDccoX/Aun7AQ IM/FavvJPTb5vPhWpely3yZluzO+rqyqRoZBJIJ/16vp+8AEAzLKoHiHvvXdG/FL 9XJr1NQ1Qai2orM5ibnda4cvCgSBH5QSRN7ZSkM4avQMB2ZA3iJuiOQJMOwC+7Rj fMpzXMkx+eVj8TDaeu29VHTdkts2j9UR+etj6QIDAQABAoIBAAeyYlt+BjEnNQkh 8RiOHv93b2IQLhAgrVaISo9xYXfaKKiQu7m/uFuHKUZrXTQpdRcY0r1NS3SKJ02E Nev29//2dckRJUNKB7cCCAFhx9kp1MXTGnQS9BkITu+k3MHB0OSlT/lCAxmbp1cJ n3Mf4LmEBdvkkKb8yGJgQNo3OuxM8coddIMZp9pLJ/Yin6AaKgzaM1SX5HUgVQz0 vfY1P38vkZRk1/CN0nitzUm8/PVvUncjF1jnaHglYvy7SA6XuR1e1rNWbNkJ6Dhi WZmRGZSUOG8F5WL+3Ib6sr1996TEX++8Lp7IdE4W1Wxw+kMr9xpySb8INg4hgcm0 nzwrKT8CgYEA3avzL8w/de2nZogn3tZBVxEgt3/Wx/ZBkMHdQwI3kiceG9HH3L/d D9KdWVJP0IDXrqC0ymm3sxnKLz/tct6l+JR2p5pI6uPqcl+HAQRFIGV8WLodH9Hn lifonyLFUmUDE7/rlMEKfwOxJGMFq1y3aiOBe7v9izxxnu8JNZg4rNcCgYEA4J29 HEpID4gWhQdsNezOYI96fM/EYhkqOm67E9z9CBShqZETvoYwJClzJQyW53ghcuPp MqEGcYHaFmFzuNindwp9SwWxvJOX7zvGZNEwPOX7I3q2/9zUinMuPVdXtGeFkIMW 5m1E9IqIewebyiKkJwKLYTMyEbL+pl0wpd2vnT8CgYA9D/HEd+n/TUDwwI7jFngX SNOPWLrMiGxVOOH/ZGv0aawkk6wPhhaaFjVb9o2f7O38364NmAOPZYpJa724B9cG W7c3wgtWEQRzDxd8UzXLj8kqE9KUAlleBo5Qz941LTgkx5hYeLiwdk7krBZStw6b QT6Y3BcitLrDwiryRYVPLwKBgGNUnmrCVre3oO3XaH04adO935cOcnRHWKtaiJSy J5vJM+y+4ZJh2SxEwEzkEl/ueixKqbfgCe9sUzuOgRR/ix9TnjDtJbqVMp1zO7sd 300vDy6TeBYSXFOVuB8cXwbCuQg9UIU6UUIreUufA8ASLbGqqGSltUCqfX6ou3i+ XokVAoGAC1g8mfAZ2jAqhm0+ZDnETP7TduBqGMTNBtrNyF5L7SpYjfm5Z3WdoTQe OhwKIGVf0gtfQoIllKNtDymxpM4f5rmB0AvnSY4Nx3OriAVfXqXrYA0Mk2OfI3qa IPDclojMdyMF5Ch4Bf4WQvbE/sPbXFrFnlt1FXUIv6KkUYzToN8= -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 2097151 seed: uqWglk0zIPvAxqkiFARTyFE+okq4/QV3A0gEqWckgJY= zfec: required: 1 segmentSize: 131072 total: 3 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:MDMF:drwxx3brbk5snzvxum7edrm5mu:io3zsepo4wtx5zvsgabwopzrwe7ym6qymgfpt7bdzayyj73orqfa format: kind: ssk params: format: mdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEpAIBAAKCAQEAyiq2CDqbIfT1f7icI+VESHA6R/j5AthU06WgxAvb7xHe8XHn z6lN8YvL77zK8Jg+B36rZbw2hIhH2tQVWUx7EbXWgSFUJXuYT1DSgM4TDhKQRwwn iGqcLjShxhy4bn1Bgr+JCJHdPZhhszn+prKCPK9X2AtLGbBo3FEIbbJTxJFNlEhD hP+mUzHaLjCkDE2CaBABKexSYE8aLFxbOuE7IaRwRbJaxx2Wm9rMH8BXM63Dabve 6eqjiRen6taW1IIhFI4Cfcno+Lp+cU8AR7c2Rryxe/G4SzaXCGNYmFumrzx7SQv3 RndTofKUfsnixPwZ0dsCi3jI5xLVAxt2KoGvDQIDAQABAoIBAAPiNGR1OtUlWnq8 IUIzbFRrI83wEQqtweRUBsbvzf/n0VOJq+XpJoFuVfKzxKF74MEruAlCZP3eFgdI Zj5IVbhvfbHnHPaWmeIVtzExZrSJ2NSvB4M7eV1JGNs/J3Tshl6bR7a9szVIXCQc iCJzRJPAtNxVGuYIUgSYiSPgLIc+FSo0Z4gp4fp8zHDK7ZBFF2eKQAJFA1n1H3GB qpBWa5ig0IIAJGjaphQqPD14pWSDAIxGPeESGnzJbiSKau+5gEcwTwuMSREdoGlK FOQOFKCw37ILB75F/h7KzJjaFzC6LtiZZuZTavq78HDhJVSILW/nepJV455XQZso UgLRhe8CgYEAzmJ/1YEa9zt5xigz5q89h6BLEIf7kI5609dUBUleVvLjFq4Ys32a fi8LnijUl12EcST/C6ni4tgiw3Sq/l3lZM68H+26pLnK8BVdHMF6cFwR2Xo22lUK xmbD/nsfmIohTJVMpMQC8ClQgOUpnQJvzf3plvlol38lGwOC9rbj0f8CgYEA+sSg +uzi1TLjGYa+x+WnRRVAF8Oe0yAXA7wY0MrnKQsQPBcmuoj3abi5mv/+JfHgXzfm QJeun0uHcDHjyAQ2tDvAna6sc0DuCopE7Nuvn9IiDoL0dtIttK0qsflmf42WVo/4 pDz03rpuTFaRbMv6eZAzRNUgkkZ1FcUsWR3qpvMCgYAbJUGDJ5QQaLY/phINiYci S6cT6Y7hGJx3OJ9Igrnx3ciYtxVwplinuDBjASPVNOuyphcVxaaeB6eq5bGH+3ms pLSBzpb6C6Xxph21Jo2gMbv3SufkF8NvDR1CX5dsTN7MX+bQ1Sc9x3FbQskSabui 8H7E6NEk/Ag5YWDcannUqQKBgQDtavx+lYitEWCx6kD2QRf88AGefjcA7IDdqFhW VcRFt5PHUKP6N3MHRT104qlcg4RKokH9JZ7OclPohVODK3ofafMTVy0ucWrtz7sy BUxhpDFaS+HoHVXomYqytc21NfgAPI7L8Gpl9Vw4Kj3FI9og/cWMhbwwwURZODSk qw5ewQKBgQDIOuJpbnGn18SSI0zat3hz6pFpKsyvk9AcMyLiVD6QJCwbp+fUXe7E 29Zck+c0djHBEBWivHbO75dHqWp9ERW0riwxSu7qUIgW73yxKJK4tI9nXa/yznlw MULnyIHM/0c07fKut3vRS79Jm+6P+bjUh6KMCIBbZJO1nE+96c7S2w== -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 2097151 seed: uqWglk0zIPvAxqkiFARTyFE+okq4/QV3A0gEqWckgJY= zfec: required: 1 segmentSize: 131072 total: 3 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:CHK:vws2xrl2nch2hsptqb36yqqu3e:fhujsv5rkhyiqstrktjvfy5235c7gcwekyd3gux3bv2glsgstjga:1:3:2097153 format: kind: chk params: null sample: length: 2097153 seed: BTBX/ampNfLU+ox7xipBGiaSbgC0kcB8Gy7BkJB4oKI= zfec: required: 1 segmentSize: 131072 total: 3 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:SSK:62g6yna52dvu3dd2ccmtx3uvhi:3jrudt5drwfjwqbh6yfph77rfgmtpxflnijmdqzp4mkrijq2ksnq format: kind: ssk params: format: sdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEowIBAAKCAQEArbOweiMtfKQfeHVWoAU+zw25AZs/mD6OuXicSRxSZjnCapUR tekXCsB2fM3CjP38HV7ofiFKoYUSHoD9VJ3mXyFmOynosWWjkVyeXc2o7t8z8rsj gWKuHwyFMuPZo8EEEx8dWSTMMprQkqV4tfd3xZelgNMBc0TLc0hvntzlT4g+CzT5 5H0VKvRHCDVtZnkw3hNmWuJNiSBkBRBBfoyJLa0HizR/o0kQBI1gkXBmSva+++Zk 5pe07cCyd5cYVA+JD1Fvb0AcyszctyOcIFlpW8Ysthjd/UhXny3G9zHNuNqB84Xy 1ykwnMIaZq0/r0GfS8BF1VlVqYJlYhE4lF/RBwIDAQABAoIBABoKDGQa3uhG8ELO EEjX0HUYoQnZHJz1j87FAmTBXqbddMQmiaukACTH+ls2OzqInqFGh4LU+cuh17gD 7TYgn5bWOm2XGD9ztaQGZuU3/eGlSzPRkv6D7QdRiKw61PcD6dj1+p/Q8N2LMMYz ERfyO52+4Hwh5Z9Cil9DVhxSD/wueIM5zGWm6vOqH3mBhNK31D9/QMiWUdVFb8UC uESDRYcEt3R/r/AN8hPMptyQRoFXJLiZLt5ca9j4j4E3bNq0JQ2qfdn0nTjpNEQ6 zgacohbJ5ute31dBOf8Kj4VEw0zQDzm/vNoCKhj5WFjG378ydL9iMMxe9mrl+/oM c1llBF0CgYEAxOn0k2pDXC/lqW1BR4vfrzxH0xJHdl0ro5qmxwg5wgf/pxbbA+od 3XhrW77Qj4kz+N2t56tMgr+YaVAK/iyWA6c+icG4HLEirKJWA6wU6ZCPHCpY5e4q w3tEUH2WnwlrGo8AGdzd+8LaF4GyyP09XqHtcamyErhmmZ1loPhcO/0CgYEA4dKx lwyvdbYsMzkVJ2XskJ6XE1it9KPgtmXkMCCvW8U+7v7QecNBhm64o47pMDwR6IiQ IkBatSxV86qQsw4boURIbnQ7bSZAnK1DNie4WED+jc8tt4MERT6r3JkaIbubi0Ck lFzG5dDXACRovNBQ43k9xrktREY9h8isBltlNlMCgYBOlfwo1PDbGrZyXor97cGw osMbZqEkiNyAp5jFt++tExohagqwTj/rAkL+U3HSxvP57yaXXZLkX2iJJwusEskv 3hAkVC6RLNRkx0jCoGucJzgmCnR+FwX0C/7gjK6O++hFqiplJ/NjpYj6dqWOdxqF 6OPlR88sj3FK/zju/A97VQKBgB2kxdE5RhMirdyvgppgY9R8LQLKIlO563amG3VB 5SMb2m4PHxjMy940zKIT0YKWcBdhTeJhJkcgIcxRuJr4oCHkT8nIEkD6w4KNsAP8 5NMY/RFqf+rWFQpt9quHoYmKEhoOi0w6fZWPe5m2LdWTVvr1YGmkx09uFQetDP/s oXWnAoGBAKbOfQ3zJXUQSrBlWJ+mQbS5agy/bT0DRdkaHuYmXoFZ0nokQ6SeUeTp YcLDmXiXYzXmsS+I48myk578dACfRUjOxlifK/Clzq7ntc+FfGx2cp2vHBIbOiow iZswZxsmQpafxMj+NUe2GfxDmtw/Lwt9BbpMqSsBYAsnpaZcWWWK -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 2097153 seed: BTBX/ampNfLU+ox7xipBGiaSbgC0kcB8Gy7BkJB4oKI= zfec: required: 1 segmentSize: 131072 total: 3 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:MDMF:6ge2un5djzopz4fgfmi34eajbm:cqgc464puzka2edig5ofmwvciu2urojqsrdnt5jen4zvsdnkfuja format: kind: ssk params: format: mdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEowIBAAKCAQEAwzTPdh1mTkZYtu3zkzFo6oHV9KW+xbdnCKojoA3Vlktcy2br KJZBkjATEcO/S4YRGRdsn57efyKpMZTMigTt1DyNHyoTYULNV1X6Pnpujz3zHgkb FCPwUTp12C0XhM2JtOY9Y5JO1dkECFzd3GLMHCGSP/svLa0Xwh9t54wrdkvl2IKr 5VvGrb/ir6E6nFi6kL3LomJ6iuTHiLx5MX7CYQieXdy99OCUbcXVOUO75ZIPG9hW /a2cXIrKdgMVh646WV3QMgC6W+zdAHUvB63UyWCRqC2eUicu0v9VLnTo4+7WfOt+ o+eUtZ3s/wuGhEmJ/DyFGZwrUrdEVsqM4biyJQIDAQABAoIBACkNVaXy592FSMnr v+JQLU7IEE1bgAPHnrkBQu25ixYI8lJqagEGnHKYfqIpRvUklDrxJKxq9kLJcMiX EO7ju3p7Y3hO2nWFXXbFA5QZHmAseJDz/EhfiH4kq7zTOtN4gEHVe8qRbdfmREVX 9maPNnqiCsY+1nymHs1525yq60b8r2q/mYyXCon6lyIUtuzl9fkHQdTvEBnS1zaZ P2f7NHtA50qgFWeRiQWa68TNzv153toKDwo8sapJ/Rw5IdTOibttC75rxNq7t/3R 51tzyw6+9ypoLLzRFWFo4bveI9zMxXLx1+2hy3vnt3fAKAL5pEHS439sDJ/1aq5z jcNXXW8CgYEA/Z6Toz0rXojny/Gg6ya0JMhJX9KTtA3oLkQmieEjF+xVmO/TlvO1 /+thSmAcj0Pd7mmt8hueNss+VtiUMoRiveoYDfbp/WFnqbUnGLCW46Nj0FSBFsMe 8vlvkzHejA0Fp60T+bivnLseYHX/QbaPC+bUZYRhYY1Qm37iY5NT4PsCgYEAxQnf WgJBDAY/9qIkgvRi+U3g+yobyJ35yLUvBiDOTDXyv6xwOHFnDAg/1SjmdyMYOksB dtRgNxq1Uuk4kxbA/Hokf5F/AlNcF2BVQ0+qmRimi1fUCMoXp/PIl6a4hph4j/hM 7fmz0d6iElX1AkcEA7Um8LR4gBY3iLP619UEj18CgYAIa7t7MAzAlssbempdZGuW zQ+inttIny2WW6zr5w3DPZWZ/lyIJo9kb+xLC+Xm29oCkH+2CjS2nQj02TwScVLV +2/RBuG+B/3pJJqntzVLWaF2yVd/6fqdFqsduAornEMTzitbn0Y1bgEUMtbG18jo HEHxHPQeyRJkF1Js+/dNAQKBgBNnc31zt2AtxWLOePYEhzKx/rP9Y5sQI6cmYKkj 1e0favaBTtPgJxvCPDcLvhaBeENVW6GOLKOAl9bAbPffR8YVaT6+31klSG5s6Dim wdAt40jZr2HmNQovMdPtcUKgBU94TmspKhJC8IcJvAUrZTPQRTNzMmK6zWFDCDL1 IWvVAoGBAIxc0Zx74SEjrXDFfALloWGR2wisDA6XXwqdWzThVNwRrTgr6u8V9icW iR5gFi4Akp4pmbYVUIKKgEOmXbOdtp3wkcstFFi128uECJEuveQeK7WqEbJZEFSR BtHC/5tsHyVRg/eA/yD0sU38mzoeoU4zpMJDy3SJnHrSMOaVkuk5 -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 2097153 seed: BTBX/ampNfLU+ox7xipBGiaSbgC0kcB8Gy7BkJB4oKI= zfec: required: 1 segmentSize: 131072 total: 3 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:CHK:jvi4htauhe3qxdruqqxptzg4hu:r5gy5gjgwoqxeffc2g5gsgm274i3z7jo4d5klipof72zw6dmcmcq:1:3:4194304 format: kind: chk params: null sample: length: 4194304 seed: 2NVWOXvtwcAFf0J/g0fDC+ypQbnCU7awZoCxcnhpkVM= zfec: required: 1 segmentSize: 131072 total: 3 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:SSK:m7vqkjrvlu2kidmivmjwfbmtxy:4t35eoohvrpaomfflyl2pjxvpjb624ytpop7p33zqshtmxz25rta format: kind: ssk params: format: sdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEowIBAAKCAQEAlsm5+iJWoZQm5tDEpkgBP5OnGXNlk7p9jgc+z7GGDnvDBzSZ i1tLXjQJnQc0aMfJo4tRn48NbdhTWkFYSRfvq1oolO4xGEXxOgxmcIxK2FDyiexw UMI7Tiq5IUsCju8vI5AkYnLyPuPZLS9g2CYkIFXGeE2C2ptCXtBWMz8wH4ErzlAz Wq+B/erS4JYLw3Le+QCXjZhvDX9P8o+8+cKNb0+gJQeX2pa7E5JBC2V9t9ayMrnC q8M2K6pNUpl2prJ9VeDdoY0RBgA2ZcalViN0BYZWuBek83QmfB1Qk7/I5LATPTg8 SssoUGalhu+TDLZBVdeF/JwBDzqW7A+kc1NJiwIDAQABAoIBAAkiWzO91MWg9eJR jzgLcJfrV9oA1YxnZaeu5K0sMdS6xouvMgXxF9WzDUoH37LhN2PC0sT5o3SeAB6d ir2Sx5/3rDGpZCv2QLClg6cZuIb2EFsuiXc67ODFUcWkh5klABQFbU7Ra79HiiZk kldFqDaVO5qaB206roCT2kTsdPvTfXT1LBCKa6Hr/LWeoXFDNt5xxB6y+VaFD9LZ O+ykSIcxYSX6AYtglnSir8/EP+pmAM7XieWj3MeEgcFBlRjoT5vUaJgybAB9bR8s Tx9t9jQ/fICIDTWwZbpxp2fut438MFutKqnP9S72QSuhMCdosV8T05EL2lYUInzV Xi4gxdECgYEA1K1tlFXeUsiZVF46pC2+paPJwO7KmtfyVX/iKi3GyYfEQpR6ISth 75PuwUXLww473ydqHpRyYShZ8uZWy86/D+dsnul+mt25qNAnMLKuddabVy+2NX2s 3kUdqRE7DMH77RKxsfOKgmMkwbPz4uXasnblQ2GLu6aJbbxP2dLQ2+cCgYEAtYDs 32RZv+Mw0yMXwd5mMLbNLYOmfdVxvnvlPMid7j1QRsdrD2vgdgsbie5ZpyWMLF0u 1ezmqjxdCfZtVS1+jMLtMF1TtELAMbicFVk7HDgYN6HhYzmTeY4XZNxFkvMhnsJZ xFAeiV2fxQIpfES5M9W2YLpPokURE3UklBs5kL0CgYAaMrbh4+X8GpvQqb7dhIkM jG2I56Fri5hdceBhQ7xODPxfGz0kItzwjy+E/V0JTRKQ/aDz3WNtlnPmGPuuJWyh v+dAeBDRcOiy49lABXK6L1J5XfY7Bp0p0CfEMMwuWSL4ZCohepegUigv+EPdumTD QSQitbxpxCz/qIfJlE+IFwKBgCwWX/M3XfGVTvPKT2gBDJOCo74Nf3CLWzCoyZsF JA+NhyVaJTA+xOwHcK4FXnOSVEUmcUz3WWQ6e2MDH7WT8mxgoNqhoMZlfGfXbtpk rU4Cdid1Q9klUCQzlo0iUCgMtLrqfIGJ8JDvU/K3vrn3u4DSxZUjTFqfKjGuv67W GhqFAoGBAJEWr/O/5rWmYdEzllLPZQ9e2jgHXRMrlu2LozHqylEyJgmiZOvqEBV3 pj6hGSwX9Jp0cdqBaJEoFWgQh/ZYPnK9gydWYy/lGfIUXwIEfHJ2bG6tZiMCLw8q 0YuPnzqa1EW/R0vZW6J1BQHJBo6bS/Xj6NxTTGQh3S0dblebt8Op -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 4194304 seed: 2NVWOXvtwcAFf0J/g0fDC+ypQbnCU7awZoCxcnhpkVM= zfec: required: 1 segmentSize: 131072 total: 3 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:MDMF:zlfyvqls7elrevid5czlhls7gq:bt3bw33shbxg3suboavalydnot6zm3ftie6br72swiiwmwhn4zua format: kind: ssk params: format: mdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEowIBAAKCAQEA8XlSFU/GAVeRwcn+hm/CmqMu9fmkYi+Zll0SBiQjY7vCtd9I 8eZiOv4J84wHhvNKOho35oAvZqCrAWfZ5vEM9fLW33Sr+TzmeXeKkl13WWlP8u/J oXTwYM4oiqa7F3OHwzGrbrAVF09ByG+9ukwZhhBf2LHNx7/H851WMJ1TQ9CES9r7 w+wRwmxlFuhA9pqhImo4upleKq7PfymzEovIWEVeuaCNrfoU9I6f1HXKcVcmLauE 7A7Jtu89sdZC0jeXLv4q1YuR1j2GaZJQv5MuH8kIpQyGhV0N6rBZ3FZAlc6+SREA muUveyC6lUDCkznoOeaDYZmMu++l9th5k0AJsQIDAQABAoIBAAEJDSIMQoQU9QoI rK+04Pe6xWPGmz7Uh2sOoRono4M09ePDvlNTMo6gMji6G/onJuVS4XR6jjl5bOJH qLaFyBFx5hv1KxuZeD+DFLQF9JIMkowvHQU1NCamG6RkjJ7QHv/mQZ7q4FxGObj7 Sav30ZAyl8adFI3Ls7bGsOzqb8X1p8magM+yerypB2PktePm7P1Jy+x10kAdc/vv +h6KR32NPTo6QNNoIPUGsn+JDFdP+cq3zHVCZRSZhwsLAaVpF9Gx1qZjaVs9K8zl yFUomMfdlWFoGHbBlqx3GW2DH7Rxzt/qVLoU/ShKiZTu7mWeHSiSUVyrHlzVkptD O4Tw5tUCgYEA/jXZa1W3mY2mq72xwCasHlyj7fjme0QmVSbmzVI3CdbF8jDP/8OY ZpFi1hc5ub3wnEYBrCPyxtUXBgPwbuHrk8j8e8TFVoPIUTIGkvZcc7o3LycRwIAf Q6HXcVUzT+PVzLxIcoqkKDeyqCZthqSmpvzKAVl2XoksjBRAfT2v0GsCgYEA8yyE X/aYSZ3BH3RzvUsWsfOFamimZTxws2JRbBzkqMuJpYEWp7v12RCU/XTD6+5UORok rP/Q/z43SkqM1HhHP7hPt/ge1ZrcHmDsGdqWcRfM163UoM2e4QLnlREcXYBPM2Vr 9KJHMAEz4bgmv4gTANeNiVUjJvP1t6TKSoZgJVMCgYAzAd8UWGi0mOWehDuMULYs iW4jK9QjW7NNVrbs79g3Uy74v66cpUSJIBby2kos6N3EnY9sWPI3zz4FaPjvZsl8 J9Hxi7QE/gBNunnzNxep6O11uqMnOw4K5ghypyPand6ibA0lXog9wZ9Jehxz7cm6 q/JkfuzvXxrfKJkgCCak7QKBgA/1cvaNS29BYCQ9Uz8wB1xEXBQgrBLmxYqwQCG1 P7hoKy9mamM1ravCL9T2bck1Cef5dEC6RTALGDvS6q0i+6IN6YVsTjG8iQehWr1T oB3p7vKUoOiwteWUeDhLOC5WtlvsIwqZ/8wBuDLvD/Pv7TdX7hz+LmFnD1AvC2ua qAKrAoGBAJyIAekG9AN3qgHNBdGx/g8DWaviWbINlL8A/7PV11OEXZ9NxhQfxYoD fSzPe0tpaHMmS5g5G9+9tgkzrPIy4oD1EPwV/vGOwX+r3lL6RaNZTdpa5YT/yL+D UF2ApoYl77aXQk3OmocC6J3nq7Z7WDjWAcHCrPJahFPnWMzKWdHg -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 4194304 seed: 2NVWOXvtwcAFf0J/g0fDC+ypQbnCU7awZoCxcnhpkVM= zfec: required: 1 segmentSize: 131072 total: 3 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:CHK:5anac46dpfsjumutzsoovvtlpe:yuc3gqe7m6s6xp7k3uodcg2g6k2bac32gt5amu7gdx7k43qnudxa:1:3:8388607 format: kind: chk params: null sample: length: 8388607 seed: w6uP8Tcg6K2QR905Rms8iXTlksL6OD1KOWBxTK7wxPI= zfec: required: 1 segmentSize: 131072 total: 3 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:SSK:dhq6qvx4nkx22cugbnilxv463q:hzssbukng6i6a3zkco6dftck77in7go7gsjf2q7trdisy6htwusa format: kind: ssk params: format: sdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEpAIBAAKCAQEAwVi6IPmkilRG5OvCNOPDKwHWrNKki412A//p643I2qdy1WqR N405gGIlMmWpYnpEY5gpD0F+TKNjLEjpIWY1GLbzHsBOkgQR/6KcIBaNjBJ5GZqH AwqHNjnHwmyOj+GIY0lLkqviXUJWf5Vbp7ngeWXqJ8i7eX8Q0PK+deWt7wSwQHpG mc2UHjmn8zbp+5zBkvlVsxBPdTB2r9Yj8dShoUGZw3hDpfWEkUhVTYS8awHjSDBc BPzFXzH+LeK7RooBXvhg16Ar7a3YQh3bBPmtj/yuvuFNSwrItjQRCRiXoBARXy9f QytfelzIiQcfPLAmZ79bwEPWHFvRUGYuhhj+TQIDAQABAoIBAB2qBkye4K7UhVgE D/TbaQtFKfG0E8jReGTes74PL9zUShsSUZtrUIIxDLHxxQ414h5BrzMHAmCxxLp7 qUEVlFnpX9289ZETjMti4H9P1oHCJ9BU1BgUWnBoZwyeaTUMDkfla/Hh60YRsoG6 oahXLWiSyV03QARBCYx1YeFmzsvX8PLJmD6mwYoO0e1mABE5ase59vC4GT+X82LR Rwm6gb2JE8Ko83lleAKnWOJ1ZnBXjh1V13TYDgsX3JDTYK+PrEe2/YDFm3LFf4o4 oWZAMnG4kc70e/MxEO4iZ4S4+VWB+8x4htSn+1VLnwXSIbVqbazcgtL7whbvMfY5 0WVU1WUCgYEA00wfFvlPBXpvYsYI6Hw3atiSGtAh8RrkVVIgKJTF5Q0frK9v5EjC tSUkXj/Y0ssq/oIUZavKmK4ALD24rg3HV1ewe8Ih/M5MnBie/fPp7Fa1b+7HQ8m7 5n9GP4IAvH5DASxTQadQ4yneLudKWnbG/fzFZm8xDJjltoPjnpvNXncCgYEA6kBk LC7+aYyjSpgJfAFbTrRgL6QlVmQ/RgjI9Afn0mWSJWcDm6t9RTJ27OZapaCadyvm OLsGkIRWKP5l2BacBR1FeHpPw9MaZb7QXt1WT+dL/i6gi2j+U0BxRvIjy5bVGc+b y80fjwvA91az8i4DWpeT2KlhEyBJmnHGumdzZlsCgYEAzouHjI6R9znyrewFgzUB evlPANTZiPUPpHOOKf0b4UZN4yDvUIjrg+VVwqfIzG17jqQbSjN+7HaShqyi3cls Re3a/28KiDQlYSUULgyDatprq4oO0S3e9ncNdUEgdSE7YGcyz2e9wwEHRnQjE4Eu DdNMJ1Cj8rt3OU19cGq+ewsCgYEApIJOFz11jBipgLRfTMgDILXKKwsC8bX7Parj vYVjx71vMncy8HsxwYvcOyjXFiRA9lpNFyA5TvqxK57lVSkjru/Mnvx+0g6KJlQo L8cPW5QbKUoDk4RLv5mtM97PRqYJyFOlnS3T8PiXLtykCPtJfbCfsvPY6b6uEhm/ L5+BSqsCgYBsQScpNkJMelNcQXA8XB3d2jD3HN5s8qBFi4rPaCsrL+/Gq+qu78x7 zcv+7hi/1R0G0dnPLeZzSUjvV1snksxnAjl8BpQ2zfkBxRPRsfVuNNgoDBPHZCT+ Aag+rzAJ8Taa+D8ES3pkoDJ2QiroClMmaCZbpy5SUMt0QGldlMAnXA== -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 8388607 seed: w6uP8Tcg6K2QR905Rms8iXTlksL6OD1KOWBxTK7wxPI= zfec: required: 1 segmentSize: 131072 total: 3 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:MDMF:3b4gpaaarpdku2s24mhbrzfmyy:gpvx3x6yrifjb3yrzxkzekewe7hvuyjrnqz3t4jh3bvby3bwsuyq format: kind: ssk params: format: mdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEpAIBAAKCAQEAw6dMaIBLZvPlxEoB+56rygwktOgnaWOp+NS/v2vBMyb1Ih06 qT2JIXlYUHzPqAn/a9XCde3Fl+srlXenNoH3WUWzKvvbfXf4JutygPoJfiAUsZCb w/MtxO/2Tp0U4KPDML/Lx/uE8e79OHwByUvwdgM+DED4vbqFAhRJGGU6v2oy9oLs No5y20W479nGhOF8jnxet0FiWwX7ONZtFimqBB77zg5rpBSf7Yo7JTGaBZtL41W7 YsyEklthyBS+H2DqQXiMONYyBgTwuSRBUYbBstgm0/gNGRY+aIJnnVtq/PedB9a5 5m23qm7q9tXui0UaIGp+wo4/aG5W8D4xd9yy0QIDAQABAoIBAAmQf135bZY0FJae po+qH0xCgTXdxnVys4+wOMpvBlQNkryu8Jvu3+oEwFI4877LdFLNcZLpw3fMfYYy QfiabGDPFTXj94Qf7f/bES8oaffiMhjHELJIzEM9Fs2bhLaBkuxqZ9gYHdTk4biV 6VFxlqjyOiGHuJEv0co6+yLH6hIK1U7WJgsKMkdKKGQzEQt8mAqz/PaGZN9t+KO4 ovUSDyoCzyh4xiwK0lzhnoI8051Wg1s3x37mwimqbT9v3zU2IDWPaPE9is1xG461 z+ViexDRQSz8G2Cxm5LfEhc89wi/k9vgUb9nB5f2n8eQP2FQvNIOcA6VqI/bkhNh ja92AQUCgYEA8V+pcG040UqLXPZHszQPRbj4LtkDBwBqhX1HqzlQMHYHiiLg/1r4 Woe2hPL2nPxnCLxxBN+sDz73wpmOBN5fk3Lhz69vd4GvgX/5sChXFQy3o/P3xGCI rcepn35M4AJrL780C5gy/WK522/4zp33sPSBV8lOcAaSdkND1NUSALUCgYEAz4Jl oQH9K1hRtRoTbKB069aSneXtD2/f9D+9m6u+6D1eSZfYc3jLiIXZ1VTIeXT81Oww /IkjsHuD1LraLJyw63AcKPAK0BfJwhnLIWBKoHWZJoEMvY9Xqq3wnef0+Jfa8kBp LaiZ77fRhyGzPQICOmIBtuIZYiYjZi7gG3ibJy0CgYEA18yAL4znDG9KM/3YUsaL lPlvomrRAxSDJ/++8L2YDQupZ/4RDRxnCIFnVGvowqgC8lOP9ByJt0PDvU8OIxox dyFx8/3UeZMPt4cUVENsv9wT31iCvybTbBMjev4veuOOsyyOOoODqvj2U9NDLm8b ATFI5pSLNSsbDPLMlV897jUCgYEAyp4Qjf5bLg+2+JbVkKO8huuljfgMWZ5rlxsG ERLJ/gquHj3eZCH22v+Xi+6VMcNBfMaDrpJZ/uEcAIPStOzq83ksheydIkOYBacZ 6SUUuUkambY4sn9copPk9sqfMH1WlGTATozqgl+Cf+gwE8n6UvePpPtwvZ1vwz7S JQDGvqECgYB99QTs804jni1La1QdCdbumonHWEfMrM60csGkrGOjLiyR0xUwQyyr cokRNpIAp+kNNSgM5QRK7WKXjdKrrmPndnV+FUr4GN+WDJKs8lumCKHveE7Cn0YG I4hG3f+HgjdN6HXLN1wgCgP8yufvRwAbsIYkm9Ok2hRrfndxErdxHg== -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 8388607 seed: w6uP8Tcg6K2QR905Rms8iXTlksL6OD1KOWBxTK7wxPI= zfec: required: 1 segmentSize: 131072 total: 3 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:CHK:fb6g3fkfbtlwzheujzvc2dn7dq:644je55ri6q5halshuxfesjz2afhqtebetuzqqbfecp6yhqwbb5q:1:3:8388609 format: kind: chk params: null sample: length: 8388609 seed: yPi3JHKKbWaEEG5eZOlM6BHJll0Z3UTdBzz4bPQ7wjg= zfec: required: 1 segmentSize: 131072 total: 3 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:SSK:nrfalxtfzw2mju7hd5xuvvqvhu:neuq5ftw4keuikytduj7ql5xmo3pzp4qpdsn2zt3magaivpzyrja format: kind: ssk params: format: sdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEogIBAAKCAQEAiPrrD/TaDA1eRnPu7gUE2shwo5Knp/+WbuH9TdEkY0HUa+G8 j1ohiQsILqaE+EdJRhmpHb7RE9ZtoosSTss43tlfvyK+XevfWy8XzUExXU8OhEEA +OYCvGPF1Bhmp1AXJ5HHLzieTiuY7dXUfsbz34D3AqMPRHUx/mN84D07rdFT4pDM At1y+vMb2Gjjb/iJP52YWlcWl6D+381JsYASccx3lWuSOMz64gtfaNCzyE3TpQ9S 47OCIf36VD8B5AW6E4qgenZ6tV8Auc+QKHmzFkxfTMeGOZOGNL7F0xxoKp8FlNZq XFkN9syy6+gTdP4ziWtzKl1Ry0sqcU230MCY5QIDAQABAoIBAAdIdAwOpDNE2LDd RCRinu30/0wrJX/groJpwJqNFqayXtV3lJt4mtTbAc3dK6+5tpMkFSJQPXSVD5I3 W7tVwcnTe+xBMCb1PhRujhDrOPExnV95x0/0hsu2cFPFElwt/XUsoo8Hrx8P4Vsv 5dLxyBCnnjqFKfAlXQmeB4sypkQpC6YpiaI80hkNB9a2aXZr/vc0IW5NUTPKe1h9 Hya4OkFs0GMuapsvE2oAhpESU7jBuAvA3t+mtFvStKzimYRS+mDKMHp68LWsGs15 KHTiBSCWIuWx5vs+r87UmD/4SjDE0ANiIx1bXxitS+NU+R1vfOS5xFDwKheeO/Xn BjIYKNcCgYEAv5JRvXmS5jY8dfY1SfzOcJYx4uGeLx96+8gUOpRNvSprZM9zliTb RCJu1ums+nozq3DDzzF6er7bCMHxQ501GH5LiY9tXQEkgYvqn7oFLs867N9KuEnI OyXTH0QKk0Pnmc5UaYOSMWtEshV0sS5wtTg7xFnDK55EZ3SNgk72m48CgYEAtwx1 SpOLGpJOCY/mE9wwt3xEd4r1/iBXJvvq5Z28+C7yJm0o94P4SDW3TquFrBMcjywJ MzXMcS3w1qODA9HkAw4+XzLIhgVHD8u0+DdRp8FDXq1Ym79BiVJC42LD8YyuU7M/ g8SAJtrGTK8nyfuSIm3v8jT2Dj3A0GwFATnDmksCgYAX330ONqNGywV30cnMQZPc Ves7kdAroSmrTMCwmCCj7TBa7LtDv64PbJcRcydaQ3ZC7BeKr2jK+RPEoJ6XRXUD a2Gwb846I9VPy4behsj0j2CRejYOhytLq6gGom0K8xBei2bbi0jhnbN+2cuj9NyY yLwx+Nmoit2NYunrjjmPIwKBgBY+cVJqs5C7Driiv/bR3yms9DUCsfn7vBuEqXrV vEz8h3ib80qAwv8jZ+8rcMcEW4gaddO/SeTHDGlI3XbtXqPwayvuY+fFZGlK++bd 8hJMrf8nWYkzqKcjU/WF3wHPcq/BLIq6qkgOdeKDtnYZGB0O9wWb4frBDllFhyYq +tYZAoGAHwFRUIo8mJXHw1HoHrYcCkfd541hl6IAj4YYMY62SyVOYxlaDUUVxEMG KX+wyRsKO4nthVDnkzCRXkbAXAdCatt54PGRRxg9wwi3xwZ5lMVa806AAwnXjKke TZ0ofqZaGVwqE+zo1gVwqM81Vs2QsKSP47yK2/k8xS09dHWp5CU= -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 8388609 seed: yPi3JHKKbWaEEG5eZOlM6BHJll0Z3UTdBzz4bPQ7wjg= zfec: required: 1 segmentSize: 131072 total: 3 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:MDMF:sg2kk2aboiazxpzii6niebisru:ddxs4jaogxyavypn5lxfqiuymic7ghlbiesozaq54ubfiqpjyyaa format: kind: ssk params: format: mdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEowIBAAKCAQEAxz5hpr1q3n6XotU59zDx5oXx7hZil0g4WOX6xHhi82scQ2dE Kfz66yEpuGBktOq7Y4xhXC1w8BXY+rt02f1t3tQpmZ77mAZWbUmR+y9nFU0MWs+e SHKTSKynUT/AWy86PNdQd9bkvhIynGq2WUwHCpE9ZjilkZOZTrgjBs2IavmkkH++ DVje/KBSnzDebi79/0kBa+O90BiBIGNkfNMy8nsJJn3NaVwbsm4cVgVNjns0adDi ar8fxlzyKYlwP/LvODp1u38I+/9sxStKtzlVYiCD48ILrwUtu5YBSAs89xnnRIrx VJztUMUfXCY8czyThxEj7QGuac2HH/o1EWWDnQIDAQABAoIBAAWoxnJLgA0Y0GrO HqrgR4ayOEh4d01vvjX8Nk69UqYyXSem978vkdr64qn7hvDCmBcARkWtfr87B6I7 j9qfQa8qHJJh+yh6yZqqE8zsMEPhH+A2dHr/UiKm0MvUgTy1zzQqM31r66xgbPKX b12C9NI+hN1x73X+mMStI0WOsfjD2KgJBqm4Bl2b/jBEnr17DOVGAHIvfSyjrObk hChdjgBQaiW5L6qv97f6B6CTqdk9Z3sFIDQp3oD9veim10nK+6c5akQQ37MFERdO k4ariqAunYP5NcjUeZWEu870eZFoQR3DB7ucza0Zt+FAvw99f6ijvX7nteVUpZhC aqLrbCECgYEA76SYmfHhhBHv2XRI/NwY3/ACcW3xxPDpSLilT4NuFywDUrODDVzj //s+/b8PCUW5kXvR0MXGKXh8ADNBcbhym3nhcC3XGqCo+M2n78ms0yRF9DzWDjn5 yt0TQsuEalHMk/sL0gz2RUarcmyt0RrfM7WQJQHdZ+Vx4EaQjp5+S80CgYEA1Nfe PlcxwbJ2MO6PLWaoZAO83wrQBes9CSgUk8mB8CEtCvA3sC6gEl8ihZSD7RuLjzSZ nVQZQdLmHbWKxSnhFRK1gfIqnFaxr0HAtrDkRt6V6OSAV6rHziNvYIgtXFKsw29u 04222C+XB+lDtGgIxt+LkQRq69CKSFCTQ++PZxECgYAOfqosRZEaZ+tV/86aXMW0 ZdQAAGJrQxcZKvH0yUJTbHoW+nymxkOULCI3PuMt8GW1AwRB2HSP9ZWqfW8r7bgg 51JXcq5cEfOmeOn7evtVGhCRIUzhN2iAeLa9h4nO1HvHR5wDbH1I22lrVl99El8F xameU2qM6jflFN+RgMyq0QKBgHymM4zU6dnjVx6PB6DyJxnzqnABWBSvUJ6FL4/h ikyEUWm/hw2SMMKxnnkWojCBWjky9+fQsb3/8i5h/HQ9c4kw3MXOei/3AbZ+zorv i7EJeEfdUmCFLuDFldu1xML11CHcp84Th4qSTGQgszr7VnCJyKXULX4PMnzpW2WE 7bnRAoGBAOMSiSaRliUYjKBF2ggI7tfVEjslmKfg6MPb8Yuvt5Q6yJ9U0qDqghIw tFgDh3H9OhFiJSx4SoP9ouKE93iQ3F1XPZa06uJ5sWVxnSL0tVDhqXHcEM0JCEB8 2KGer2nGo5KcgBbwxILp7v/rZdjXJIvCz/ktqUneyW5HQtPEffnI -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 8388609 seed: yPi3JHKKbWaEEG5eZOlM6BHJll0Z3UTdBzz4bPQ7wjg= zfec: required: 1 segmentSize: 131072 total: 3 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:CHK:r3orpfw7tqaxbgxc62hpmhihye:meyeb5lt7i4iyewahb6lxzohn6jxrqgi6b73zv4gxzirykpnyd7a:2:3:56 format: kind: chk params: null sample: length: 56 seed: YQ== zfec: required: 2 segmentSize: 131072 total: 3 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:SSK:zyx4vx7xud3ibmvazav7xf5omi:biaeawjobvdp7v26ed4re6bfc2mwis22aoolyz6n7ciptaqktpua format: kind: ssk params: format: sdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEpAIBAAKCAQEAuIn3ExvxrpPLR3mwsmSlgk6/qESjM+a4KTrWAwDF6W9CFl0e +jdauS+LT4yWOwXQ9jemGfwUN6/zNqZKwHl2IfK1nxdLmi2yDs3OTWPSNQ/BV1gU FTpRio2ZCJJ1prONHEvuABzY1un7CCmN8jKd+VhCPXMZ4AS/RI3nmREP/Hhj2nkf 7PMDP1RCvk4zl7YY+IqKCk7IixK/4DH9FaV4DRiQbk0W2DeYRtaGjf62AFaKSVY4 0PnVtt450CWGIl152YyJWdP83efIxT6MbEF9rayPZqxfGgXswFSAAKxtTY/EFllg OJomAqQYTZLo8Qh3HsM6YsTbrbdU9bbZoNnnkwIDAQABAoIBABkeK54okziC72FW 2UvUJ98DtFHv/b17wvIBtc2KK7tpTw0s4aGHKqHC10By+fkmAoTw8CU19X6FmXeF IkRD2WYnhhHAdvmRDLJieu53uXigILMweZNQxnaIbXMIPwamBn1P1yD2zDZRz2m+ AORy1U5yH+9YnQYBP2jSXoVWwt6ckolyduyhqJbyFtvzduK+RCXLtYUYSz/g2qTz TZLMeL7GC9ahigclXmEOhxrrC6b7+rNNMidCWR+S9JwSXZUUOld4+4jMDjC7f2+Z 2dQO3LcwTi6DJzuEIE+NAw0P9D/43VVjw0LXsQ+1b/Z0Y60SbTs6qa23KtvrnMyk 7tZDxbECgYEA3gRy7JugmPf8Ir/7ourB39ks9gY8Lrgx1OFd1jKDPicmm1V0y34I YcXqIVgwkui/C/PXigYselySyIun/hRI0l970W5Koo0Gizy1J6vgAJKsRpip8Amc iXKdjOZlJqBo89Mey3QIjUuq95bdwKM/w61TLb0us/tCNN9F1QRk42kCgYEA1Mj0 8SfE2UlERW8qS9nG00UUVxXI82CPXGrcYgW2zT0eM9TlK62eYDC2vmH3xZKQ1b3R GfBzIxFSX2WtE10hnST3CuwhwGvxH/sLgnxYeWvJZvtLsHLgG+HgKZlTkbcnj3ub 6F5HdijrpiBf+gJwHoIE1aPeelYtqiOcuLeWn5sCgYEAhsEoiBhFt9L8xJLGRzI6 DoYg1gseyDSgeld3vyTVqAnXUvzhcQnESKP54ddHVEPUgYq1Tl9E69f4d6TciEkD kjzGSG2q+1KhoC1uvu+BfJeJ3SeYLcuHqZ1Zp0XIK1O9oBCKZm69KhW9ZZ26Zswv TbOMAv0Ktc9Rdgn2tr5+BdECgYEAij7BvQg8gXtzirUNwtgLsGmaLHYv58edfMrE wj66JKAHxl8UQYt8cTxVDl4yD0AJL4UynGq3M1pmrSovB3yjgShqBMOjrhOzRjbh pHZLOSAJawnrhAkuh476B6zhObPIVRVXFuJiBWfSqk0wbgs1cuzAXVkpC0yAQKEA ipZkmu8CgYABb10dBN247Pr/knhFFQkXm8Zt/iYIo1aZfNEziZpi0CEewYY39ZBM 6OHKFNjCCsXSmxWDhWvLC8CHIk9ExdI+M5F2kdM9E3TSvqtjVL2e6qqTSTWW4M2H AsqoAnHhTsnxjfu2TMFPmdeCfbbbZHoeF3AgUTyWT6TaV5g6n5bw1A== -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 56 seed: YQ== zfec: required: 2 segmentSize: 131072 total: 3 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:MDMF:xyhviiiiqqoljw7poxi4uqun3m:rdzfyy4nplb47auypcxjdvxklg4wyseiitzsclj5aeogss674aeq format: kind: ssk params: format: mdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEpQIBAAKCAQEA0aImdZWIU3aUzKiTP/h9QMteuUvRtjuHxs5ysQQQfHQaoyHk rR5glq7Wb/9FrTOI99ENDR1SxrI6Eqsd7FvWeASY6fTULi7gSIH2bAs0Fwc5cZnE Vga2hMnTUchzGQCewrskGFG+JkBdLP1YiixBj/QI4GXOgVOkZQhtdjQ63vVWy/Hu kmZHy0PbHrl9n6e0Q/FCdqho1D7BJ3+3CbuSnUcHDuM/+kk/cYuSXHL0BZLV3vSc orEgF29LDRu7dZRMHT7ziP/e+LjK5LpL3XaaNkNfs5h8aXDHRIEjuOlKaVSHvArG RiHZ/4CiHObaQSHUJ0BgEFXwCPsKw5BU3woMOQIDAQABAoIBAARXlyyfYsPACCAN Np4KyiGaXa5V7knXt4wBZjYLYd5e82WEzNfsXL1Pm/ROUEd18roXkvRv8qMCzuSF CyB9VX3i3O+mF2R3Y2C8NtoqAgWC3cTuTpOYXbkvPK/k8ydeuvI6sfFXkaoxx34T GTZsIhWhYno9a2Fn5AixmD6eIWTRVVPyPotXdO5krHnFkgq6ZWvjzZEIg0JuV1yf 23S6yhqlohLRPGyU0uw8fW+O5gQCBp16KbsCwYI3sKxadfPZfl8Pw7I9DDsWSg20 sORw+9RWs8l6egCXa++3qYn832UMeUPWOarEUVO6u9RwYnVpVP5CCPgCEprV6eRU mDKGnRUCgYEA1X/3H0bnVQK2zayV0DjvZGkMDLgAMMxf+0low85T+DWx8zo11n13 S7n2CUOIqCh/C4EFw1m72ZygxRO2haK7q4JvO/a19KQhyhpzXrdS5cbcvyfIJwtc rTjX7UbX7r7SWCH0jI/1/ckWDHWs3fc6MJiw0YBMXW01LOSfH/XCj+UCgYEA+10l +2GvmryjypY1Svn7AaXkT9j95ZkRSO5AQZa6bczjAsiYv3BGODmsIiWGK2cOggHM XHtULBaD0nFaQalmHo3OB1/GafpDkbrQZbraLxIEy6VVS5sXA6qq2Isf3VrFo/gJ K53F9ti+7yLUi68NcTAAKFCXZjwaoo/OjH3s/cUCgYEAgSeeeX9NJnIz4AxNvN8U guvBbFhLVTntvnhUNk+1IGxrMDbApvbTmi3vFv+Rxhhpcq4krF62cxh7cX1RZ1pg qYqIe//tZwd7oWWK8Xt5XKOGmuUYAfavo+LFTTcUHcu2N7ai1/2m1FY3TmZJoyWS QB++p54zlDkid/v9/zmO77kCgYEAkmoZm6m0/e7vgSupczjVKoqUyKXejoRwewi8 SPghM5/qg06RGsGtRUbiqyksU8+9taCShzQXPW8H7ea06hZgM1/qKIVzL3vlK9ej V/5U5KIcRPrTCi0WZL5esa+oKembwfzSaqOGElkCLo0dPRgEPm/1R4ZaCeTsptAZ QeB/0PkCgYEAu/Z0j5XOD2jpzUIIiXOkV6mj2XoGEZu7KPSYWVpZooJ8rkc+Eur0 3y1KkBtCbfk4AtTtPNcBMIr+SCZhrI++3hbpvA6uBAyWdMHTeNwCDsBzP8i77opG OlssRXcBgtxaG0lbrYO5KpyxCSv57wwBmTX/VnRbxSEVnXXU+vpXkHM= -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 56 seed: YQ== zfec: required: 2 segmentSize: 131072 total: 3 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:CHK:jdm6ytmx2i3ya2bllsdzxurjtu:iqr6tyysaseegzfurhuywy3mbbwkbbsov5bt3fo6oazpyv7olvda:2:3:1024 format: kind: chk params: null sample: length: 1024 seed: YQ== zfec: required: 2 segmentSize: 131072 total: 3 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:SSK:5bjpib44tzcjlbiysarvufdaim:iitswphabeqczmg6ntnjeuxc5rfca33lorjsauils4bl6c7p5tva format: kind: ssk params: format: sdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEpAIBAAKCAQEA30QPR+SkpkThBIubgnvyvrEpBWFE14xSJKwE5x9RZqvrRdb6 Kf4YygB4Hp/Mhdw9E94EP8zfxLkYE9njeAzxPsnhckRZz1xmxY+jQTZ9kVs78OJR fqsa/HYKP/5yfOKxMFpUVJ0fJKzj6JVRQAnvJVun3YXA0I8eRkmDz/+aByvWQ99X xRzsvr5xsFmtAa/Du6AZBLhlP5k06CnqFsHQb9a7HZnyC1OyfMCyCQSQf/N0VH1X iQ3KLyWufJhq2UGl403XaxNi3Or59brp+R6wIQzIUn9rzXe/ftjkrqp+TTT9FyMB nUbO4HSU4of0uzXeYINZDkiN9Q17mpx5FSvzHwIDAQABAoIBAF5vIyd9hkbti1+o zTX7x0jxFjC4W63wJC5utAQuMvgCb5kyvM1WNJX8bNJHNPLJnOvyVEnIFj3XLF/5 IUV98+xi54C1eGdE5hNaFetXaPU6abgRgfbZ2KhAJUW8EiDQobGaA2Fms+2HUz7l KWC00voyMmZ4VH3iiyOfpKktq7CsB8gPIV8a+BJqCrRjRnI4n/+/8qfd+Wa655QC H9TjuGGeTKn9GwofeXHbwION+ygPOXQpez2YudigZBHK0wNQeJYkvDNAS6NdfUdY OtDOiQwhLQYOa3XeLGkcxLh9Al0c0cDEWLToNqPfVKTfsqKIiNail7ij+7EJ9rOB q0suCgECgYEA61GSBETDtsJLxoxMgGgdxyW2Wc/GdFBi/f3cHy+BSRHROshP+H+n LdT9H4U86PK1QmQZts9djBKh63AYhKgyRyMqSvFxpqywbznnkt3qygPr83VIXXmY 2Yj8SKwjgslq2xNl6kTPKdBv/r+Es1nhYvSZf5UTbgdQ5nlB2CvCp38CgYEA8uNQ an6u2OtcmsDrwOdNZ2o/3uddDtAkRuI8LFzD13rGDYS0ZE/OeIbDxaW0mty3t2l0 QbjW0GJ1wUFoutcdJmRBOj604Xb/NYDkF2kiqtYIlnDyVP4nIi44+0TIfTs+zXE/ s0xXnwpZB0jfuQxDT3g4U7fARx0K5kaWQxxzhGECgYEAr4q9K3QEr/xHPMkCdLO2 qw4F9v+ZYsFo52KN57Gvd0vUUk6F4bGQjA8b+HyTUI9mCi3URNxyQ4DOy2xmzecP AqRH83ojtxuRzpdameP0N1kvlgFCx4BjNrwKv0eygekxTlYtK9LC28WDFn3WR1jg WspvC46w3N6WSifgp5sVbY8CgYA9Zjcy5JlgnobHXBN8rTwE83f36ja9AuLYxGH/ uOeM9i1Qx4YugXopP7AHq67vIvKSO+c2ofozrWAlHVrTOIPW66sNhUKGaGV1agK+ 5EXuN7LuDNlFoQXVfyfKZQXlmm9y0bkPozHXM290Bvj/N1lgonxitWW21GGn+poL lwqgoQKBgQCiXSbwbmUtTSmgDUNdHJ7tkwk0+jP56Yv9feH9FUWVJE8UF5InBB62 8KckguXC3OcVmXgoPhMFgMwHJE0Mqx6FdhQPCM8oEMYYg+sM9kTHiykFfTmdt2hJ L5A9omWNxiWRKgIerKOvO3ljmYsSXWp8VQJjvCm6DlznDg5SDSQrUg== -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 1024 seed: YQ== zfec: required: 2 segmentSize: 131072 total: 3 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:MDMF:pxo4i23yfsjuv4ntm3xb2rwp3u:3vqfgh6u6k3qh457kru5nxe4enc2zkmnb6jmkm6xj4x5qshby6ya format: kind: ssk params: format: mdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEogIBAAKCAQEAxr4IL8ukM5awbx8SFDOqHXyWEEm72ceADkaBYiwJIemJESPx 9kd3Vvc3Lu5clWbBeyMvys6LAjcMz7xWlvDZ4/rUDMRVzY406P8c0dSYGdnoJBLD jr6CDz0JfiInMKGsBaVUejnKoW4l2PMCj1tbl4v2egLKSW+XzNCfFU7kFJHfOK6R MrRq/HVMt8eIaHQt92Gziukx/AgoktQhGxmxUJwd3ETBuVYFAqmPBT8uuf7bcAfV JCfjrxxH4wAwpWXuUSu62Q1yOw7FqVQ+k86IUNFVoJQMlJHQscXTu9EBAboAqgM7 A4rcgVMqo4QkqvhX0/trklqyU2kwslODo+pv+wIDAQABAoIBABJCYlPV1K5isVoR k40kmVlf2WpXWTlvJq4+abkLIlmpxaHmrgkk/sC63NM51hVp3UEtdet0TM7gH5QJ vIuInFQ29tAnd69M2e1FhwXQ5O/hwSi99UFCR9u+OAbifLxElkk8gTBA6q4ohbSo JaY7IVqhOCd0kIDtDkLPMKvPVY19bG1FKiDx0BSBm+O393i+ht4vIahZEPhxPyWS hF01XWqUyeG2EnMzmAOmdaTUck9hbu32It9IWD+Lxv7mN4L5CIeRKVRTEF4/nV0+ SpMCg5U4ZcVkR7Lm1k3Fl5qUZe+W5j0sb6z1r+M1jKgFwTmDMKrmU804qABDw9LK PlfXe+ECgYEAyqny0/ff2y0sq6TDGq1+tE1hgu47fI/arQhkNR6GmFJH+KZfQPSU Uc1TF9XQnEweLr1/U4HZE7bGGVUfbhAALmvZVKT5E8gfYOoPkm1fDQoavFJW4+26 ZSPu4gl0d5Z/AmBPk4BJRdHsRDDDKvB0BdKiqKpSkMTgnNklznVPeVsCgYEA+wvg l10zUPML3sX4EcduNfRR5kXCAtLlU/cnB5lEZNpEDesiBMVvXxmYw9D9z9LUiX2w dTcruHv3mgsESyytThh7idyBv8Nw8bP7BSOP1TI090MjztPYByOjK9LqPLSD0xhN 2sVgH0DN+THvD2aV4uCEWLgKEC9bD0dttC+2BeECgYBDsEmTdInHCaqO1aP4iBP2 opW8BlfF/cIa4t+dQknQHEM/kEnmRwo23C4xms9nNKEsGUyqlobrZ7N4iI7L0vpM hub6t3MdoUyhsOtsi60gjMxrM9EjpaYI29yQkHne61wWbhaF/GX8tOWFzQeSkucd fsGnNeQHyEoA+SIAd/wIWwKBgFdVf3FI1ARSOQvr1OvidB3C/Abet6qh0XPPZD2J fTiUkd5BsVj1klQEJJfiiZmV36hhGFT+t2/7eFyXfovkY/nqHHgORPkANbdwBGB6 SZxCVhi6u6dFHT8Gj8o8Go65wa5bIyJ7TYAx3DXXwDGcX4JI1uHCTIXq44PCNpDb lDghAoGAYdL8Fl337G/8wO6e1cIbxLWYWdtQvr8ibsQJCYznbQkligQ7+Cb4Ch1F ceMke2mC511kxIdPbpECf8eP6JSqtAd/zrBASe60N+QSvaMcQwRqtoVnduERfjDH thD/UyoA81pRl/hGX2ddp7tT/Fu9A8/6FLJzd7g/k5ZgjkdjfpA= -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 1024 seed: YQ== zfec: required: 2 segmentSize: 131072 total: 3 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:CHK:7wz5dhkyob3gmkydzgbptk5qku:cl6ovq5a3km7rpfhyb3putg656sp57lhnp7aexbocsbbahdxnfia:2:3:4096 format: kind: chk params: null sample: length: 4096 seed: Yw== zfec: required: 2 segmentSize: 131072 total: 3 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:SSK:dq7opblcokohsb2hhcuej3keke:25zjdhlue2o56qcu3teoqltk2blks2qm2n66rmd2qcwfujbvo4ca format: kind: ssk params: format: sdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEowIBAAKCAQEAqF3wcmVUimtXuQl8xGtHK4q0zCOrUBkBVIMakmXC+jlLBnKA pyNjX8S5JP+nrQy2ZfsNONSGgNS3HoReEe/U/y+hb3hXHTyXpYrrSEgbKRgTxREQ uoPW8ljJWyPhe+h78v8EooMuz+5E8pGuROQba9It3b84rg7TKRSAEHS+qN5v1JTa B5SkfPvrFj2BrKefGupn+ac+lX63B4a6KxQtVrnhHZ+HZJ91AU8sNXnIRLVc2jdT VLfUKHjwpxEXWnpZdv4ypUfrjj6oUfn6K3y/vPM/BR68NadLUx6+EYX9Y4ZBaCLA c2pNzT1gmVS5KYvNipt8SEZu1wjj88/8UxSoMQIDAQABAoIBAAD+kwu97PlR/Sc8 Ntwwd6+6ZpXOanXd5pvcQrUq0Yi++2I1nWxp9q2LYNORiKjqlQ42T3juimqANyJV 7Pn6pRL/x0U8XE6xe6zQN1Jf41HYu6R+P2TL9WJM43LncUQVLAbpT4VH3z1+U7Js vq8sphEli+dOIz8l78SDmOZf+3tNrejZqHPfN57WzZ2iI6XdylU047fk9l6XQvcQ II9o6LfgB6pXeAT4yhMQwK8LGuUTkwQBH8D29cAe1Vqt2uztJ4NfiEd+n6EsH15m MyAuZDVWp++T4m6w6TJbcFBL2LioMkRb6zsdb6XK2QsTDDyEqSGfBxNPIpsRJTqP mEjrOuUCgYEAxvyWv3vRyptCOZuh9e/6Pt4eYoAk4gLVFuxKNR+WXjSarPYAVtt/ 0u5FbccjQQcZRTej8rTVKHbRA1jT56mPzutKFlftpFuoUGoeDQ0Y7g2nVVXnHHIR KeBgifVwUIJQRWfsiVt2Gq5ja++ccGoJHPcnCHokqZ5UIAuuGNEIR70CgYEA2Jtv DOnIJZ/4jU/IDnYeR+9Fbg6GioB/s9e3j4wJDNZXuQ/ju1l3kBfkYLVaXzgCNgFj UECedrt3OUxWP6cp2dXM67rQ9uw04Fnl1IG/Uxp4icxvjqEGI7IkxCFFAXnRdA4y oONgNn8jCLhuD9PqUEXAKGiHkLr+FygDye0tn4UCgYATVn7L8xuLRhVkhdRykzTN oUZwqiVrdX0B8kqv6PbzBse1YV7dEg3VEOTca0royejRyjt7nclNWmarnZlSXS8l m8Yib78fhuzPi9CJ0ikHEXqel2+TWx6B5FVdcuXMXS2x4QyiuKm8pA/zcGDSp+tk zjwT3dLsTP+98YSk0sOsPQKBgQCRpKGcyyy6r7+ONNDNeHqP38CNadLpGdHD+Q4B xSTors65Lofvlw6fopD6vbYQRDaoXXKLqYdjSlW1/zAXCK1JPUrWTfznqpc7Kvcw VjVxCWF3NjDkdD0Oj1/NSJl/jotZP5qnN3uf6QiDeo72sYThiKTWBsLwe+sRYuR5 R7LfiQKBgEG5Zdbp6tVMxI6CPdDpVnMujkoGHHdNwVEX57I2a4Est9dXp37EzSi5 /HUwzsS7WR1VmSxpffg/05yFBW1wpPW6Eydd0tip6V6u3XL8CJDgxLUIg5GMWFOk tvWYmVQgapm+yt4MNe1VVui8FL5V3jxJrlBJoVherf4GYF3N57Qo -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 4096 seed: Yw== zfec: required: 2 segmentSize: 131072 total: 3 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:MDMF:tvewutqp6vuesamnwdvshuhqiy:43e5ze3qcoatis7rfqugjf2s5oty73qr7bcfgschuesycfsi472a format: kind: ssk params: format: mdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEpAIBAAKCAQEAv7qz/SkzzSobYo18DO2SgJN2j3JO2XWCWFHBK9bO6R/SzFmP OY+Gx7WFU/jJD0Xzs+bS92HcUgcnF+vC6gAAqcRaqiyDIuAWumrQquDcUyShKwcJ QmWBX0Y566teaguVf3RKlLA/s/Oe0b0CVV0mFmr7W4aPjszTXTH54whHAuO9hdha vRZzqXWKsgH6KGp4RFMh84zwF2Cq34YSKSg2+cOl8at4L7lW3nbQJF6GKZke4hd/ FtSAM1U/ynJQcIFfVODmTVLnj5xXpLtT4j0Em+EpDVVuk8vQKOX4k0L4Wkc8JZ5I sICrCqCEDdmXxOGRsFKWEnePXfl0QdgMDhoCbQIDAQABAoIBABEcX6mAJAQzNfwe 0LYzUkf7oRuejHnsz6QD29UGl92x/jVIPYzaCcxzSexbaiTc+H8S4mYQi5dIgHfV OHEmFz5kweCpFfpRUcXzosl1RZnSouLnqa8NcbUn283JXqThLBzoAICc6s/WYKiq 5nU+2cIrbGECineollPIEjCYUa9orc9Lzi2k+SSvfXUd8bWJunCmNn9fOenSYmzu dzabXlDvQnaA7C3ym/2NsRl5w6q0px4lPqnL8MJowNsBYRkd7pgCRSQqd3eBCeHD qbOT1SXyjCklnse5bjEP+rMUcMRDktFCBw2WTa5GZcYgzoWtDL8WCZ39jJQTImti qiU3tPMCgYEA7NY+pUBRrtkdebwHiv3zvD7pKzGvUxxyY7Uc0rtm8zIvty2XNU/A +3Yle+7ffv63Z0ub/0yTTFw4kSl/jE8iQwqySSmDKZ3EhC7dWbbzOjHIvtVvcCaR 8EbX/jpMpASX10iYonJXAwtrtn5vm5frKKjm3sxGNORBK1M+OllbOosCgYEAzz4d quPcYInY99yYp1g992vuBj2bSkD/9u/lbCwn1Z7gVFJDBVVec7FAXm+3Kc/zi9s+ Leco7DBkz1IHcVGchF5lGaUQrOXV3mHZEbp4mXfNHLkwMYjFCwZ/70ktXm++AmlU 6rFzSVtQyLj6EUBZZ84G0Yq6eQGqZydo7WjxbecCgYB7hASp5FB1UtAXg+OfLnBm FZ0/JKteOfDCZVtB3/CCFwNhkgpRCGYJ/wTvjJXMwoTd/0W9MK+FXHc35Z+aik7B DhwLIfZAxwINOe/A8TQKfppGREPZBpSH7jqJYNhFlgumgDryRZVxhgxH4crNJ77B tsypF3np7by7Hq/OeHmmnwKBgQCAy1CyuINn96NAfvbb4Uo5bvjxJe5RWk35ECPb cyGab+9oV+tQ8DoP2lNvnSwOry7jdvCQpH1ZM8Yi1g7MDPUhimx4YI4ZdYjReKvn iaSTc9GkDS73SdFzRanScv7gFr/WTdG5PWixaS+uXs8CU6R8j5zLMtUqiK93BhX+ nV6VdwKBgQDBeauKIaw6AaWFY5ALiKmWylQp50hF5S/dFocCzVm+isnmIhogjwu4 cYG7zKtAOU+6a5UhZLw4j2Nx/YFjExHpY31qEap7CAZ1u8VD/MqVzohweh908Yd3 X2X+o+t2k+sxaRCNJZwYgpxpThULs17PoFfgKS7hYUrbwk1ev4SLiQ== -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 4096 seed: Yw== zfec: required: 2 segmentSize: 131072 total: 3 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:CHK:aagw6esdm2msphvgnr3rlzg43e:zqxtrlee6hh3vtfg6ihtssjqvr27tpvi6gkmngnhiguttjz7yyja:2:3:131071 format: kind: chk params: null sample: length: 131071 seed: LCa0a2j/xo/5m0U8HTBBNBNCLXBkg7+g+YpeiGJm564= zfec: required: 2 segmentSize: 131072 total: 3 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:SSK:4r4hejfttcshuxaatgims3x5ge:25nruf367dyhnsykeid2owcdrbtbg24yuvynqqswvbs675ieaa7a format: kind: ssk params: format: sdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEogIBAAKCAQEArW/N7PGvnQp/v+hiC9FjnwBTA2sTB7SRXQsta3Up3vBHyQFk RCcpGL4/oQA0tXH0QkG/nIyjT7VwUHWUuTs7HuKIVhfg42p5aWrwW2lWbZpCed3Y +AJCp3/pqSd+7SDV+2Tc15lKLsx4bmN7+hnVpMkTDj5bnpTFPkjL60SL/jEduOvT i/K7hJoIiqkccaA/+ldlRwVq9GTom7RG+AP6l8/tbOUyV6heYCFOTe5jPEPhcF9p Knsvwu03rF2HRjKqb4TcdqGjZqJbcMnPIlJRbUDwHOwKRpx+2SqeQFe/jrF61uZi J6EJdliKPIrGU2GEEz+PCwQwvUXrZ78+lfKU1QIDAQABAoIBAC//ZhTx2fDzC+G3 VRMCMri5JUAn7M9QGH6QblRzy0+oXaVyHiaCU+xGEmPjI7MnQzrm6TlQ7o1LoDaU I8GTc8wbUzHIT/N/7vPOpZAWudWBQG5rh/Uy4vFqA+dNR+ImGTIGeturz4zeB2I9 Y/0WEzsUbpKdjUt4zpFrIgwNH1UMGsyjkdgHOBFW4j+aia6cV5z1Yr/3vuZ61Jad Jf6Mz5+zyLNqKB9iIM4HBwJnohvrW3n62o8Fxu10ZiManu6DG7X4xCUrMk34mq7S F/M5CR5a5ZN6ZFO+uGlI5Vn9kKXylYDyJqezAfS6eho/Nm2J6KNm2lvr1URI8nFQ T/Jn+e0CgYEA9GcoSa/Nt2Ulzzalmyi8Q7TfcJCMmPVCsMYe8J55ncYmBwyyUy7u TKd/osu+nWsuTxtSQ1bbWMrKoMghMi+mfc2cDGIjvfanmr4jYxpiiUy8UZdOGkRo QBovn5jWNk5yhC6PksUp2GR9ykezD1KWSEK6RrlzrqfYTF6F3nv/qNcCgYEAtaqY 3NizH1tF7uh54bqgke0fxVMN8egv0uSlxb968l9B7LXkQkP6nQ9s/jcSnlDUpu9g ulu+IXpeIaIDMff10gdKDEfSjNKHwBIgLe+BD3xkGYbuGSMeffO1f6apCNAvceoi DfxgyEHL3HGnweOdJXdoMf4AkBV7TWRzlMSDXjMCgYBeCzvDbvSXt0IfRAXheIFJ BFZeOCEB0o5A+1t4d2KQxWhomggcXhiwQluoxwGoDVAafIbhBpEMz6u8xoWPjCpi ijWbxj++nyTLNJLlVYfJEU/9jV0uWlhLIkhk/yieaP1Dw67XaSq666BDr+dE5CCT 2alYAZB0Cn3+lPiqLciorQKBgFcOb761ofEO3k6E3ZOMydHkXmtDR9V7PR/FLqO7 BQINIBx9detDhF+rusGARs3TUnTFFgd4W37TkzRu3TUe7JA/qf6ElKOjaCZlCUK6 GZEAPT/2Zzdomv4kwf9wMGTmzzW5y6QEI6UT0svLM2c42l/P/wCmBkMF6FbIIpNS MNn9AoGAN5Q51YWtiagbvyAzvanQbUPumSaU4e2Pj02sUj18XiHR0H05lzcQPa00 R3NRqz0/V77zVH78+hiUwaMq+cS98a9en2IYNbIQRzxGKQx+1f9+0XlpmJUe93k4 wC7hG8l53bXeMj5NXC2W2RtzDRcfnEiwcIYUkfmF0DWMKlVdLAU= -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 131071 seed: LCa0a2j/xo/5m0U8HTBBNBNCLXBkg7+g+YpeiGJm564= zfec: required: 2 segmentSize: 131072 total: 3 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:MDMF:s4hyspsx3kyont4eaad5jecjzy:43ak77lwoa3ent4nmpsb222hgb5zcwpcmhq6ct376qn2747u6mla format: kind: ssk params: format: mdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEowIBAAKCAQEAoV5+u55DxsXqpuB98wwyL/XrkYXYsqX0kXcJ+uF5XMFkSd9+ YneCtRJJHeQvilh69d1RZcisuVpXxKFa+IiTZChXWih8CTtRQQArp3XlSllYlM6N bSQ6h3GOU+M+VCo78AJJBLQ5MIJRJFzJVOSbtA/b1qV/tZ7NrXkA0c7wQz/bwwO+ hlXdAuwPlcR8vyDkO0ZapQBlq2Mp872l6S97vjNX4mmWmJoMRuHwSciyntjdn0Xv QIMy/tmwrM1K2yxjjLoXU6anMTNrxdGG8t2N8j5hKprX0UWsiEAVQ4otCzJvmYwi hxCccJiLchsnSVRhO56KTUO6H9PmqilpKIdPgwIDAQABAoIBAEY61Ig+JHw9hdbz 7Azb2XnTGx9986YooOywNKk5+TI7vrSB7sTXA41ftG+scF5TDMy1cigMstOGdJ1Q pkF1W0RjZEUKSpVP+hiChP1AS7bUdL9qt9Vwx4JMEygCRg2mRei6beH8t8kbZkof kcX/Kp6uqjxcUd0PDK+7cnZdjGaPkuAaiMFe+xpw1OQBVz+g7AMMMfWzSpPdU5+z 2mNnoPzcePbFojXbv4YQb7IExJxp44cS407IMrM008s2SN5XpNEsM62JdCbvCQuC LQJXvlccg6GzOv68QFtiShAWhskhsAcDGFDZLJyLPCTTUsav5MJsheoD8gCo1ogi 1yNVFqECgYEAy+ZMATNtYyRuP7w8sW8FbK0uHRwtA1XaSfde5Edv9dy8bVn9iDik G8pnbYWoUOz9ol5PWG5A44WiuAoO1/wLBsbNXyBM015SN/zwh0YHV7G6I5cs+018 2RiyPECdQjYNPvO+JaGJ8KLLwh71rn5Jijs1azQNHWOoHqz7hSjC2iECgYEAypol 8eS5bo+Tet9UuNirpozlBbjZxad3mhlNbbkn3yCdlo16lTv+BdTT9iUYwqdvm7r4 f1qLpNSDD1fSQtkFwLKVJMl2eHt/kFxZ6i3Qp47y+x/0CXRrwUcBrOKXRDisY5uo KahCoRJnkcoFXbSTUd8feJZKdgu3d7j0Qd713SMCgYEAvihJOdV8brnLGCW1dMTV ikT4fK2KTVIEAndxR/RXtjPmhxUmHaS1aDWbv8im8NIUuRi7Zv1sBsTavEilD0k/ /1Hoto6pF3cftpduurnUnzRhJFAY40Wg6dbeYtLf1qASOmOXMgE1Y/ZvkNrOxa2B aClP6Gri0EXgxLsO12DsWAECgYAhBcMNjFRVGv3U1zX98wL3YJurtRd5yfQKn/ko 2zcOfUhyU5kZXe/nj2sFAcLpZ1Ufsvfx+hYsxZ6fD5dr4ee4IuOAXX374VVHeGYH b3RE+13LZMfoCpvNov846K3zJrGigqqEL7K8gj1zW9RIE7i3bYC5rPVyDDLZRsI5 QlgctQKBgCPvT5jiJXsM62rhhO/7HxrtDO0o/CijpZiXQkPaRuchNdNh+KaleHg/ HxYQwPLnDaqxKt+FBwgq0Mu3cRFuAN1snsjx6dvpFtIQtZi/IbwSUf2zEGfVwsCn jafG20PQo9VmtwInm90Kqbz+qTXTTCTzwAFGHiq6tPoSggTp8AYv -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 131071 seed: LCa0a2j/xo/5m0U8HTBBNBNCLXBkg7+g+YpeiGJm564= zfec: required: 2 segmentSize: 131072 total: 3 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:CHK:e6uhixvbm3g5amrzdzd3mnmsqm:4n5aafrb64sqpfnhcdpdfrk5gh2qznprjky6q6jybuuqy2q6z6mq:2:3:131073 format: kind: chk params: null sample: length: 131073 seed: /N4rLtula/QIYB+3If6bXDONEO5CnqBPrlURto+/j7k= zfec: required: 2 segmentSize: 131072 total: 3 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:SSK:32tsxhwsbircqp6wt3z37xh4ga:f3iewkiw2njala45sk5dmmjsl7po57q44kutdflgfudorwxox5yq format: kind: ssk params: format: sdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEowIBAAKCAQEArrji79TMBXxEgTKfQOBjShN0/1TrT9AGpPdxIt7VEUP/pTdP r25PjSpO90YXJsQYExyf5Z/rp88gHzC8hfJ7JBFsRi6FNG02303zhAzRydNn6AW0 X1+bfjH3ELdf+skvC8mcT6jZP4mRZIQOSAC6e426FeLrjB2qbHVj37pXyKSi/OvP Hs9esqG74KyISQ22xD9X9aPE5eK+1S0V2Rqbh4NnQg1qoA0iUiAbRJyga8TIpRuN KFcprOLd3POeIExviCPjVosH7Ta1SvDH5nZ0qsV76tiZGBzads84wFzg6RMdF84d EmNhb+5crWs0ImtXzdSWDF6+ql56c4wYPUANNwIDAQABAoIBAEwKNum9qNUyUfYQ e/KWNWAFu8Nrx8VCecHN1rUgWYZcG6RhwBJPZdu/8AH5xRWf/gJDUOt0f/DWWdp5 MXLyJtl5o6+fi2VXqqvglvx/P8YgdXYrFWb0iw2O3UGvLNxONmVg5uBcUcAvNNGU D0sS1hXzhmsECRM/ze3J4R97T70IAXVoW/1IBFKVRVS5YHEYDy8IHRm2TnhXGMu4 tp51j//PQwR6TB6vh5eBGqBQKSY+Iygb3r2Doy6ef/nFy+NmL1vX94IbfueGNb9/ mNB9obqiqikzHV9573x4oahvinJQixuWkmPzoVDKtUsHlO1xXyulZjVmPuJyl5lZ NCUGFiECgYEAyHt5brRA/EWoFSZ5/YdGZa4B/p94qj87iWCilUkxrIDTYtM2iMiv oGDp6waCiF+QqiDAFY6Ggq1n4wyMiYKwfD9ggGhHLcdtnaRaeiYoOBNPNAMvLixd saw0g+GOn9P0QpHWanqIFF3UBhGTNLoWBKI02FAhPvbruMTpM8Cvng8CgYEA3xs8 MBPH3ggBENqg7jgye2fZ+IKaQWIxFKa8kIvHjv5dasY2nBxM6xcZoIyz2XwQIfq3 o2OU3nt9N+GQ36Y5m46igYTGkAlXTqMuq2ITlq+DTmLwLPLTXRRGPuLsdTtLJOAa Sh0Ec5JqtFVlXNVmtsWMlrPaXty9ik2/S69mRlkCgYEAsT7g9CvvDFo1KUXUMn5n kbvOzaOF1daDt4g1FZEZlq5qtQORQktTYpJsHLqrqw/6YT8FM8nHSD8xCr5sfaKK j76kfcIzs9iOJAJLb5TOmA0SSCTMkKDu0QczgqlnJA0K9dPj4k2kg4UUz6y4HbSr hLs3x0rIqdc6PifxGS0w1qcCgYBdWwkZWP2WA6VmhwU2CR/ekXscyJGBcHP3Hzni BgtP41H1ntE1C4aIDJd1ncqX45jgjweOf9nIKsYfvuwfGXAbjlijd4qatL3qss+R eS2XLQP1peK3/DfDR/uIzu2AtHniCUAW6QN21Lp/kQgkC0u6iPkmCkYC0b0iBRxZ sCBMqQKBgFysV4/bJM86H6lq6Z72bJPoFCrkvdEDvWPyGcRAsdzp23AsxzZyXwe6 jOclUXpeQZ92YTk6G0+lijFZTclVLVyF3OuT7lJn7iAt1nWP4CW1UIDn3AsYiB8v TnXZ588sEdQUC1ntog+ikIZNF5am2fpC7uBLUtvHCzgbf6Zg9sWB -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 131073 seed: /N4rLtula/QIYB+3If6bXDONEO5CnqBPrlURto+/j7k= zfec: required: 2 segmentSize: 131072 total: 3 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:MDMF:chrm7iotsma3yt3dw3sc6tr4cy:dmhl2yrc3bg4vjdyuc2xvcdqflt6zz36mpchvc4bulhu4c7dru2q format: kind: ssk params: format: mdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEowIBAAKCAQEAz8YPORhy3pEpkHDKkUXz3UJXmGEQ+88Xd18ingrmzACZ52f5 6R6ZCw2F7cAVgrWbvPZ81jClKgmatIaFqvN490NUVGm/BgyLd4kDVUxqVu8Qb8MK dQJS06N/xV8ZrJQLFp3mgccH+dY0OIZPYMwRhchACbPWlgDngIwXkySe+O89b1Rq JfCjj+t4VMYYLDg4es55CFum8VUQmK9Gno2hZYGoJUumIlS6bhQ0OUq44Rp0lKqy 4pB0e1EV0MnowzXb604xFUTaWN7kI/2Y5DvjgiwKMNWsdy/yECz5gvuesr9zw1OP VuXnAPt9W9p5L2Uq0xHjMd7vE0ns6CRlUUHeSQIDAQABAoIBADGDa7fJv4ASHFNX SbK2dp+s8mZ2BT/Y0WkJUxzSEL+fSg3nDZt0BvknExo+CvI/+JnerI+3fnim2sb4 As0jIdnc4fEO1S6qGSdWj6SJZhMK/AICOxD4yYe1YBrHJ71kd0L/xIF7ToeHeCDb QvnYj7lvp/EX+gR7uS99UxXl5XuBS1eZkw1wc6DW+Auz3G211ge5iewdG7IceQqK clhpeLMbi6JOieeAIwQFzjLuWr05h8oRSU7f1pjBsy1oXSc1jqbFSPqD7LMv7gQK qtdz8yLr6zewUlVdljoY/Iuq8J+pOXnOLwtN4/aeRxHGNVd5/fKKZy4GhtM9eCIh +wVkE60CgYEA/dOEl8KGyD3jpEvIUziMpe2MKO2rWtGKmzM3atXAMyoQamL2hA/P xEkNC419qtA0Su5OP260vJlORG3DULchOv3PxirUOnRAgQ/WjLVNqmjypqDch1qt 56KqA2KMdqkRAzGh2rVbcS2RAGJAsz9qK+RK2GDf/mPvoAglGBJu2RUCgYEA0Y2T ukrd9AjBIQk1wsuQVXi5K411RSy1cjZKDMoWjkntsuEqmw/u1JztYzwDcpxe7oUG Af7VBMlfqC4/ip3+xc/H7g5VxYfKeRfWb4Kof/hBcnw1WdkpB1jkOyD0crqGVNFQ yNRr5deCii+Ptw0CNI2gfVumKV3rI3r8ecu5lWUCgYEAzy33+ztPcmB5z9//alYt Dz6++aVNCXshrJ54bF5+XATIk12lo5OYXbnWyQg7e9In7MVo3wPIyGxF4zrIgriH ijQjhLfutKuw/udjk0RlWWQCM5n3hw+wBPLT3KFyz2QeO8ImP3NqaQ3tXifUcbqg OaVEpiskieseazxasrRRACUCgYAUtpfZnDjgPFVKvKxRuuljaXF0OqFS2x58UZ10 Mi2Ulv4l5lfdx+9lmLVmT7JPi+FeVkvHura3qGgKKbgiLYfXabhrcYNM6SU8XvzV POcG7zUY6eokHXEOxtyYc6N2C7XngGEp2MVpjKncmuLrxx9mrnEkswBzqlLpyOhX 6alIeQKBgCDaVMa2HCfRQJ5p3fBT9aBv0huV8LsyurN635B0rF6qUhLA/bhYgDfc ADjsEsgz38LahiGqE52YtrfuoJwX25MdFtrDjXiGVpIwcByqDu0lLK8ctox1OufO xRSKmBf2aI6Ruhs50y/Hq6Q8m5Nshtkq3Kmqd0oMEuRn3pynTtHO -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 131073 seed: /N4rLtula/QIYB+3If6bXDONEO5CnqBPrlURto+/j7k= zfec: required: 2 segmentSize: 131072 total: 3 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:CHK:abfmzuiczjwt6e67f3uoyg5i7q:rk7ie3afnp3xgvnxu5e2vioq476xqwslqxgyx3i5xxevsjhkcvba:2:3:2097151 format: kind: chk params: null sample: length: 2097151 seed: uqWglk0zIPvAxqkiFARTyFE+okq4/QV3A0gEqWckgJY= zfec: required: 2 segmentSize: 131072 total: 3 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:SSK:fqulzqs2c3wfj72rw5qxe6a5bi:3uld5nrzgjvcmsjvesheeuowyhnmhmz2fdlqzvrkzyck3g7se4ma format: kind: ssk params: format: sdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEpQIBAAKCAQEAraetHF9xlUFy1ZkIoBHJ+5WhRkta5xo1WeN4/1UdPkqzbhUc n7ujkO8D6XuSuhzDLoTUjzYvO1BVkIegZilF/seP5EJSkGlbhwjsiLmWx+U5Ca2g 0Yt+1dhgx2uVapn5sxlarlSkexbjx26+fmiibDrasOu84R8YvMKybia+H+KX3+Yy z55rqjqn2B5iIHVtt/IOhHSU/8AIBwig315Dd+PfseAiocZhTEjloWpj6M9ctIHP Npp+WoytoS83NmqduZOh4cLRMBNxddAsXj/XFrEforfGWavcxsUpgKapDecUmZHh meWvE8A3w83l6PlDmFQ3kmDoD5o1PXCB6kLTqQIDAQABAoIBAAHw6+md+pHy+LRW UxUOtD9SHgZ79iaqKzuiKGZR/XmXnKM+NNZFL7VfZg6yLI2IFz3Ftbu/aaw2KLSO 6a63df2rhELgFsdxM2GL7uPdBvMDIsMu1StlqWowUn8E9mhORoIpf9MXo1HWpRiQ oaOd5AEHo4UqSa+j/E/V+fk3s3KwxpP9Gsgh/83xfb5U2D2WyXGFdZiEADwWu7u8 Pee1mTsi5W1o8lCoRI5LIVdIdLfNMUpLlXx1eAKhv9N6IcJYn84yTbGKJSm6OzO7 lR+KpEZcT9fovahhw4IvYyC4kN8IwsnISIeAOl1LDEPnSC8ItRP8Jse7AuYuox4C ki7LEnkCgYEA11TWaXl2ltiQqMP+UnhgjLixC903Yv4pVRPHMNZG6azylMNhPDZr HLdwvO1Lc/3WqVsFssHGYJFtcNMM2gbcNB6k9Y0KwzdDe+g780Nj5oqpmiYC39as 7cwXl99wQYOOzwVSjaGAZJKaKJKCGF6NhAz7QNh8oH+xaKPyJSxh6m0CgYEAznPO Oia3vGx5hRBa8VLZ6g6JLcL5sp8Ql+DZE2xh0UJ4arTqF8XcKoONGG0K48KPHAQW NF4VRAoKtEZrjP/BqOJtjXV5CPj9NBVWI9x4XLbsh8ZIyk15lZgz5TSFrO5ENIWs YKk8RujLB2S1OgeWbJwBmakb8AOUDmiztBVUCK0CgYEAr8g18IyTXrkT/nFhH/nc 94OeJE1Gda1+GFG4/gkugnwI26BTtE/ISP0HL3OXcOz7W+1OTYsaYqLVcJEZoLKQ +Is7pqio7IwkrvX6Wq/c0crIgWoeVpRtPwKpD/X7McAvyJhTuALrSS7UYeKYCUTG ydG/GkSgGHWlYgLUHbyJglECgYEAsF5IOG9pGYQF0EInnu+rkAN492oQjKLMtyLz 717wtac2XdpN/Z8fNgaKG+rTmb1VKpbnLTeOrUBy4o0iRiMbmx5MfsNzcdHb5Ymw vBQVkwcGS/t9pa3IB58t/kn/RLuL8t6bYzxQbTdkct164Kcov4IK7+2DG2jDLAgQ NPDfiEUCgYEAhlf3Y70hQaGmH8q4sQ1Zzv1KDk/k8vxcTEoqB4rTjFuV7ueqCtHt Oa5VB+JofexDmfs9TueIwTjFsFaCPw6PSqaish7ZTnzOwr8mzHsyvp56c3c5sQsk +ILbxcckY65gmD2ZKru3gW1oOpOpk24n6szu3UXuiWp8+AXg6sTsoz4= -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 2097151 seed: uqWglk0zIPvAxqkiFARTyFE+okq4/QV3A0gEqWckgJY= zfec: required: 2 segmentSize: 131072 total: 3 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:MDMF:wif4gkouxqe6bdypqiqjmkzy5m:lhv5p6hp2ok6iio5rar6uu3n5u37f4k465o5hqxzgsaxly44bowa format: kind: ssk params: format: mdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEpAIBAAKCAQEAuwTMW8VXBzjm++6G+QDQQBS9NRUNiKNN8Ni+TFmSH+mIYt5c uV+zZnZyvSiHdvcQo3VrM7lko5qWORmfmt+pLhO/BbTl8G1u0+RNnEi/++1f3Lji UyL+U5N45+HhQpIIrIPqp9JfbJwxNMFZG20JGs+HQgCJs7APgp4EQgccGlizJw7u 7DfJJb1z52y5t7rESKlFPh0hhYR6/3/nHa1i/kuigSDtE6HZHd3cx+BfVBE9zUqy gkU0sdiDK8OzIIwlFkOf9wdkl27HwfRYSkbqx4ke0yejsdEtnmqwknd4d2uDnWBY TdcXn9+3kGknnt31GVz9AMvjlNHhL+SBq+wBMwIDAQABAoIBAAD3+5UsjLtTyhd5 Dunv/07PFe996DDfPwwR3Arrh85lr+ZwRB/dGmbs/Eq8YEiVTB0LCE89T1uug30x Z0Luu+4kiNznsWYjqblfRCsn/Hc047IrCW6rXMPldNzgH7f7TE2xQGJmvzr2k02P hzfzLnbhV5GThEU1iitLIQ77yHdb7UvvlJ2F0CNpsc6bWN3udjg6iUPeQ9HXQeCH YakYdqZ2iGaISE8Tk+GngrMY8UEDrJe5loUzR6k9IG1NwLbIbbf/vE01aMMIvYqZ VTeJXcxZNet8KEv+i8rL5hXgTKg8cJPJ6ex9HO1+aZ3J9TDS4zFl6Vr4YFAOW1q5 xO3D09UCgYEAzXZw93h/CZ87rBnP0Y5LZWFZ/+iXZ07+Q7N1GM8IWA960EkeEb4/ ImgPg+YD/YoOi4SWq8Ckv54JgpMUr5HLdo2KTmB2TVjEgPCsPOIxO0SbVhkI/kyu FfdkiPfDzFuBmtdNLDRSqy5wX89OAgVN4N/fkO/Nf7rHgnluQoNLynUCgYEA6QT7 TM1vhCM6UGUuXC7NJfSstMkaNAmE2FOdbcbWgWXmc2pAFE2Al8ypq/EJsVWKVDeX G70pDe3iV76mPFOUGhjIthdb3J1eJrP29QBkuCGLd4rCcbJlhr1v/f6DI303zYCZ cXZ1loch5psTFaA1F0TQxiAg16XZ6YXX5sWUmAcCgYBlnrUU0PYULjt3TXTp8nT7 +YBoAAQSRpGfrny1/n/j/hQCPIewwuW7ALjbxcInfkbfXn6fCDLzyxhtCo3qoDN/ uVW0miUo8ESQeXjWzBEJfU9O8Cbwj8BygN+qltCynHenu+Ehged5XwiZepDckv8H v/J1XwXGrPzMXX7ZStMLmQKBgQDdbMJv5PeHFQKgysUXC9Idszc6Q68Gq9T0y9/Z JQ1IwNAP9HMX193OYckJfm67eJGOHZUV4tZUSiy/PIcy5Cjj85Eml2PPbCq/lFuj zM/ouNeSrOTArckUFIeLUILFAoQ4X29wBiUO+TIZtFqaPja0+ct5uaX7xbog0fKr dC0TmQKBgQDI46RUW9S72Rtp9DPSyfqcAR9vpdS2uUKlLqM0h7gyAjGe+QARst3p eeL1WFMMZKbcEgqjJOL549WKSt6P0LsmHBgKsYX2ZeKxC9FOqdthuwWLUwMoJwA2 z+U+YG+nKqIWQJh3wcUEOE6f4/FbumdaTmgmb/vvnJZDJhaBSMvMlw== -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 2097151 seed: uqWglk0zIPvAxqkiFARTyFE+okq4/QV3A0gEqWckgJY= zfec: required: 2 segmentSize: 131072 total: 3 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:CHK:34bzejdzpqinmuzdmqenkidf74:pcecbl4ulygiadgdagonbgqmpb4lomjz4v4vqyssr56reyib4q2q:2:3:2097153 format: kind: chk params: null sample: length: 2097153 seed: BTBX/ampNfLU+ox7xipBGiaSbgC0kcB8Gy7BkJB4oKI= zfec: required: 2 segmentSize: 131072 total: 3 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:SSK:opwonr3pv7h3vcqn2on4ucve2y:necbkc6fuo4ahphse3nyfrcsrgjywctzxnwzt67qxzwewpklmkzq format: kind: ssk params: format: sdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEowIBAAKCAQEAzDTLbYcIX95OIeuVqrdLTNNVsE4d6/aQkVUAHpl6b7w34/5/ jvACFGBSmiJ70J66fDpAHrnTu+WxZQTS2xpi4CIla03wkEiZ+BrlFekODpot8Qtd CqhGD30Xv3KYIG2qo6tHQShGG+9GY9/hswSzfjaxMeIJIjy41XzTPdrCFCVzrLqD y2vpEbDYhsuujSiBMlOO9c2l8R1DAb6s3ci4X9X8ITp3NT7a5KlA6/Bp1obEiqfP ljOYNZUWC/wyBUsVXi1g1iF9wK9EbVcWAgYmEKL8d0wwBNkGGpyMGD4clxPS4QzS NEyh5iJN+sCymNWBkyJj0AWQT3G9a3I3G51ubQIDAQABAoIBAAWbAWaNSV6QVKa7 t80K4QdH2ddQHaQnjYpfwfQVFHZSvVoF12yODBCRIFNY1PtCEC5uzunJAhXrVTZH rp4TGFm8tjg+2Hatd4SHAHjcf+VIuDAgtrofKmUscuVveNuTBxcdEYSpXVtQ8ya0 s5Zdb6vsRmrvIH8PGafKmGXfRmqVHAmT0fmV+aOrk6C/Q3sC5JqsyjVkhHUj+OSI fmgSzFAizPPEKMVBwbBDHGkWYdvPp+szWrIWPareT6JtnCNL9mDFZVjmVSIdz/y0 phHU0D0sGT0lJGdKJTRv+6PVlL/y/6BRZ8b9zVPAHrIpTRY64ILa5Z6OW7PYlhQP P9a/14ECgYEA2ec1RSbVj68ttDqNFwqTFA+1wf5sYtlzEaCMUhTsv15xdHWshJTx ibGyWHBeVsoPI1pxNzTcGJZCrS5J2BvmdngGyYXf2XowaHGFtXkg/Zmwn5GHvC3X aHM71vXU2P6Ye7xvPWQZipK6IWGudAFKvkfvyeWjELlpxgaj/9ZaR40CgYEA7+iL /x5nf/+5dV0NLk9xw25oK22r2WlI2qVyQairiQYSjUgR7U1D7t3BKjKZe1P4vD75 RzSrEHgxLo9A/7S6MKdJJ0fDjt3E/8UVbde/J1QI7icHimeBndsBo6hJBVWi0Xoi VbekLOCBlSY8C7b+6uRBebDl8q3BHHFICk20GmECgYBF9zIokQ6TgykGrKIu2stc 7qpqrrm4h5+l8kn79RILZFTDkyEgtP5VOwRL11DDRz/TFzAxDLz6/AxOtQUq6dJ3 CZUMUfsNRmmSr5jCKzGHnDiVE9JkfseilxWIsQh14FGvsVJ6gNCeqPwwyb+NKfkI 3epFhoF0VkR7PBiehgIY5QKBgQDMWUl+KljAt4MyS+thSfw+GjoS29zoWHzc+NYE xXYvRgPhYcUbW5gEy9Cwb986JIGXXxCYLW2UnrxNy2nzJO7/aE6wbblOZOpbbnVd VcsV5cehi48pvhay7gxMaZihOZtxUNYUK1Nlgmn+ME4vMFWcoIaA8EQ93PDDmF5j oGJLoQKBgCOlFH7EUlugO878go0qGU0E/t948pBGGM214H0iNkxGEgA9YhOfHVWk cNpJQcFULt1ue7yDapBypWD0Xh97bc+nuTV0u5mR6jt+E0qm91T74czMCdVKvAzN CfYOaXmNMWOJxdT/e+VJr9KDeF5VDnUl2EX/sGJXQETZQRZ5RFNf -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 2097153 seed: BTBX/ampNfLU+ox7xipBGiaSbgC0kcB8Gy7BkJB4oKI= zfec: required: 2 segmentSize: 131072 total: 3 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:MDMF:cwkenndny6nlpubc3mddn4jdh4:op3xzbo4xhsmqwgsjfi4oqztbb23grvho7mlmzgzaq2qudpucota format: kind: ssk params: format: mdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEpQIBAAKCAQEAs6M2GcOwvLQXv7fPJMCmLUSi/ulwirddTqaAIKVQ/sxEovqb 7gx/yhUX6p+K1OaQj/Y/le9oaJR0W0bD42ElSjCopcyDgg482S56w5vUJ1zGAjgD PMGBB5vSpkuEsuuuoH6sDJEW1fLDxraVt55vK9fwAHeGYnvGkTfwi470ArIj/EkA JIYKOsfZ25rJkvaQcJ3iHJlNGZyB/mmFXpl56exkPeHdgAelgmHOgb02uIzoyMZo KK+uzzwkCoLtWQutRIowiYUH1HN8PvRhgEQ7k6HbIYA/jxT+AuqrNCW1TDCqLJjD Q8KTVt5TxLTYb94+3Ndk08czGjhmit1gNcjgpwIDAQABAoIBACNytxfRdnxeW3th Jba+b2xqaXG9FhDBi1+cWpdWmAuXuomgw4lvnP3/OJd7gTVvBCLseHK5ahSNCwMC DWC+yFGCFZ2WJHNTJO3EjsQv6WcVFxvT+suP8crTFHftWhPGj1Crfn8CWIvCmqCJ YjT4Rj7UH0+wRmwDudTpQYYAoSUwtg2XJv0H0CUL0+S+Slu7mL0ryHhYngOK9qiS GZ6Sf+ztYWTW0HTHun9jVzobInL4DxpZJdGJi2+j2WLdBQpUkEnmtIu+8EPHU7+o 7hpqMw1s4jHmPcrFBVaxfXhHH9FhXyhdWXe5BgX+0jbeaxZU/DRV3M1MxTvhYTDI VYKozvUCgYEAw8W3d9m/t+qOLX0nfo04S6kUb0/QaGvxBa206SzED+L4zUqI1j3A oQ1Aub5kRMnzuJoXa6kZa9JSgbaMEXe3sDhxlv/yMOuwDDM78xtWXdYkAkQa2tx2 4O8jkf1+15p9+mKkY4JW1Pnu92kPZwyUqnoeJ/bX/vcSnYJRwkc50SUCgYEA6ubH wm042jqwU4b1FmMHSxa6CuchWbQsScwpkM8fgdocDeorKsySeIYTt5axRQeehYVU 4rs9/vLUzyPOAPWYoCZLhg9b6GxwRKAhbdfS4thJF0DblgbhDvWIwrGY5g62HEf1 yGae9+q4bN0Dsrdx3y9YFzMcYqIKsb3CUJo8PtsCgYEAt6KkoBVuknO//cdh3oFV BxOIiYkScoCdyrfP9ND67/P1cYuyo1O1dtxZlGGU6DmPFd/kjCZIJC1bGzVCWbg0 Y2XulrdqVJ0fu7HrT/SapNaTXFTJ4/XcxM1MTkq8Sj0uYklY7cZ68LeoggbYXc8d PHPkCZSvswfLPFfbnSL2hskCgYEAvRg8lJpCGwMFsKfColvjoiHQcDhxk3nD8UBl 8Ymaznha/ySTzWdTPayJMNAhMfWZOdkEZWTf2l12zK0BB6qtS7aoM2onzWmF0uip IHiN7ki4RfzTB+nPwLANgNVgxUnwdcHD7KgXrnGINzKP6I1eIJFHM53Uat4RB9Y/ F42hk+MCgYEAn31DL0OuzeNEaR/Lb4nOJFJWwCGO6XN/oAujp8bqkuRpkS8WzgWI fkJK7eI3TI0c8k9hF4hpt8y6Nh7aG3FZdXTHMBS9PRHaLgpHaIFCihibSz8a6kAT DIGjDMXH71CluU5SquvYcb1OY0ruKEMlDv4POOnUQiy8T4NPb9K9AnM= -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 2097153 seed: BTBX/ampNfLU+ox7xipBGiaSbgC0kcB8Gy7BkJB4oKI= zfec: required: 2 segmentSize: 131072 total: 3 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:CHK:7egadlquaxmh52avu4kcwamo5a:svlhtfwkizf4z7adzqc4f3ijpkhhpoocvemtftgwatzbhxhxsyxa:2:3:4194304 format: kind: chk params: null sample: length: 4194304 seed: 2NVWOXvtwcAFf0J/g0fDC+ypQbnCU7awZoCxcnhpkVM= zfec: required: 2 segmentSize: 131072 total: 3 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:SSK:yfkgi475avdtdiruhn65b72vai:v5psft4tzbi7bdvht75xdp7sxosfnlpr2hrloo2e5zkutvox6kea format: kind: ssk params: format: sdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEowIBAAKCAQEAltENlm6LjBC6ZIcNJlDZpnYREFWSKsK2G/FxzXRmgB7wGILu gg5bAK2RIv5/qj2iuPc1pvc0XxAEiwzarWW/Slgbi0p55Jnj1SU2IO/Go3iArELV pvPT3pUHSTx3+mRYcJyWgeofvC7yzcMoR9Khb04/KV+/zkgfugivS9i8phjt/LBP hzQ9SGRLR8CDTBXGnXJd7Ym3Fu6pbnlS9s6VJGjGoymqyjJts5Icu10BGNrf8cUi Bkfl855JZJ8cwpyoK9HT8tirHGlhuT+/EFmt0dhsTrUcFZwCqai44L+KWdrTckyA qrlrVG10WEjRba4Z5dDFla57E6dyhRtXTZ6NAwIDAQABAoIBACjevi/mBSsP3XMg pg+cGV9i33zts46i9XbdF1n2EVDnEWmTEc9s1Hx6jLpO/YnE6jP1yjRVCXw5ewGz mg8jY5NiDRTSOfYZPgSk8OY8FDh4j2YfNobnzKKlADR4jorsZosd5CuQpsj4cBQS rvfHvLfNHJC5weDE6tQfRmHnejgIXvYlL0XVZokLzQPpLmvqjA1ueuQp29pUjUQt hxcsYwtpxf+g/LGhRxBl3HPeKID44bdsN8Zz/hzkqE9F24vKFNcuvjMnK8mM6oin tu3fFLrYhXHzQIjQjtK5UNMFxbKZe1Ya48njuEBwGxraYP8WL3MjTqPyyjDPb4Wt tFZLUwECgYEAwzQHBOE9Q95aosur5sXawVMdZggKK0HNx8uoOjgEsX27gL63emSj QcCv5ccPX3fbuYKbUA6W4vQ44z5/0EqJ9sJ44631FigwD3U3+ZZzeBuzGwszgHHP Byj/CoiQfCV2lZdwhwWxyHRn6IKvAuUnfiaVzVHE3EJrZTkx/qD2El0CgYEAxcn8 PZL8VMQuPbT0+z1z3x8NWlZwUwl5y5mqpkji4XaFWoN7AYp9Sa0MARO/J2g918jo kDMGR3tveMX5x9Uqq2WbDU0dQTmErb7skB62qZNcNA2k28/V3e8/ijAwi1EAaIcM 9PaQmZcFnEglKuE3D9m2j+poQ2+KABOtd/uJ5t8CgYEAkoW2ExKi8xOvgu1Qnku7 dUvXEGROhcPCHAuhvfmYhEY1fWEqxgNOjCd/oQF3Z5jHZItF26Tn23moTeL2+7lH r+Kv7W8BPd1yndfF6WHmUKyyF0WkJfDHjr9WGWkC0z0nswfWnnNGzImcCWo2xfyO VWHPJiwPkamFhZiWD2Rw8L0CgYAXPQgZ4+8ptnMIZP5zlmDK0kcrWgSQfQiGV2Op bd7aRqacX95P7AmUYnSKm9tVsfWyKLTKXHRcabBLLFeQlwcQZDu3cFwDkdJ453m7 5R/pBJtMsl2wRdcG4FlCzy6k77twjI2FKoMKyKesGP3k79kcT6QXfJ8LbUt1ftpe wnsNWQKBgAyP1Ehw6s89JJMrVQ4AZwm0xsBBdLQGcFKxuSz1zfujc9GT56nY+sbr 2PbSIwCfNirtErxPvznuciUwoHJVLUiPryQDR9g4+fq7+Y3TkeJXhzJM2GMM+se/ JRDDbe/EfL8E498RrE0BvyzLnPsj1FWHSRUpqNFsbI6Qm+lcS//2 -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 4194304 seed: 2NVWOXvtwcAFf0J/g0fDC+ypQbnCU7awZoCxcnhpkVM= zfec: required: 2 segmentSize: 131072 total: 3 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:MDMF:qv5ksusf4pk762lpi5rn5l5eqq:pbsbizy6dyjzjhjr3acia2hbfqlim7zp3o7ljxlzw4vh6x7smk3q format: kind: ssk params: format: mdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEogIBAAKCAQEA2iBFBQhED9S2ED4ZeUbf2wLzriDl4rFDVEecZdabAKqh58zq EPS6fqT0pUAH0zQV3RhUOEXLENcro6ilO6gpcDminsEsm7jgJNCvAfRF1JCTgMQI eLuDE7FvA+ZCdvHQRyok3RIn41qi35qVftx3TzQjsJX7qKDhnrfhuKmFIbMYeQhv 5PZ6AFWWvdwFQ/3fpqC2yRP/dS+7Bub9JbXm1t2qVaFEzFj7mwSTnhP8SMT0I7rE WqZqRfBx6DFzlBshzovCHT/QhEk7d/QyviH20cqPqeQjNAImsyfFKjnxmIBM5F7D eXsfQSbFlkGP7JAqfW990KoBh/9RxmBd/5vYEQIDAQABAoIBAAiyz73PIhO1ilsk dtSYyHWN7RTNGA3Nvt8eCfUftUe2BkXdrJnngIZrYpwybP11rseF4FnsIph11DYv FAPIhXqFud/12ScOnNWrAsejq6M57r/sUWArLiN7aG9x38WpiAJGgnjUcAXHiAY9 vmd0OEfOzvuMR6BmZgjz0UsRa483589VqSk4y1v7v2XjqJ6ubWPlXvvhU05dwMEh UmHHGi08/MLdsDKaKUMj8Z+sq0TLu+hDyew+fYEcFb3lNwwkNgPxiShJOp0LkhGs Lx6nt7f8FBTYU0x4vcrvMPNGz3cap9pJrmHuxhHrxSC56lQE7jYBiixgXBiFKflS 2nKagYcCgYEA5TzUGcMPQdNiCGOGdzSWsmrQNhPvA95MK7ChRI3aCzi8/f230oX4 uJVJ3o6CJ7rX00iplZbASWD2EmKF4Vr2ko34Z107Un+0HdiFrkYPcjl6iG+JL16T Lotx458Taa3mbHwGWtH82z7evWZtTG37RqXDQrIocRwJrkNPEjcpuS8CgYEA85da PqKeI3r13pykjZYI49XR4shpMFF7xFdLG3eJMKgNxLL2z4vvuicP5oP1QhG4wKqD Ie4gCxcSe+4wseBGMOiPVBtSITrICkDVVuM/r8W70ae6luVx4c38tll2fF3IKeC5 533+roN/OUL5h4d4UG30/g3AvxupYggaxVjIsr8CgYANQ4fCNdccJ+70LU4Kd7CA gk2p011xC9u8a2vpW4vSOmY1DAkm1Tme9IRhrD07r0PtpbaqQR6/IC0cwzab43eA 41YMJQjZrSnu0Chr/QHHyiuc2VdGtmItv0PHt9yXsMg0Xri/aIcI6IpayyJn2bVA UTcLFOPiJ40n2B0rIKX5YQKBgGMFRrEph+FibapVwOqxb+G2HMD0uRXkOczBs41x 1ToLRrWMDpqmBwiEMomBYOS/sXvYlL/pPetkMKZiWDcmtUHSd9k31fYeIA1S96Z/ cHcyiTwb09TdZqLlCnLSAUFjGigz6z54UFx+pewQFsGKR1VirXHNA2psgzmPk9pf Ug6fAoGAfqx8mQe5FKrT9gvO1TXIwFZopjd4PGhCF/QZx/1YhVh7wkpocnXJQRYB LIwKrkybc6c4pVziJUKWop26kM3Hs4G8tZIWy4D2GQqpzTvrabvU8SKOKcoUssof pMF5GV25DLc99P/xQG66qK2+5kwqU8oeaf8mElO3Um7YAPLpzYM= -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 4194304 seed: 2NVWOXvtwcAFf0J/g0fDC+ypQbnCU7awZoCxcnhpkVM= zfec: required: 2 segmentSize: 131072 total: 3 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:CHK:ej56vl65kawat62wsxuvbcd2wy:tv65cgghaddq2p7dyvimmff24l34wsx6mhzd3dyprfvrdo52qtma:2:3:8388607 format: kind: chk params: null sample: length: 8388607 seed: w6uP8Tcg6K2QR905Rms8iXTlksL6OD1KOWBxTK7wxPI= zfec: required: 2 segmentSize: 131072 total: 3 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:SSK:6h7uw7l7w7kyvwnfbvdez5iqii:doonvzgovuzwr5nzwltigr4s5qnsmopyeh6a47dgi63nn3rdzhaq format: kind: ssk params: format: sdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEpAIBAAKCAQEAsugmXmLZaZ+2aa+sz48xKVllHO5++98qvP9YdGMUa6SdPETB lzIIrtvff8Krl8KjaeRy0TW7MpSw/QO/BK8aDWPdAmLhSn6gayt4Zy/r3pCgM2Ed a0lkTL+d/+J/tjgGf4u3h+SPktv4plhqfU8ZTHi9PMFzkIdz63thUC+RSSrVEP1b KHzIXU4Q2nKVG3TCTQp/yW6ytv5OBqCu6Mci9N9UF6fNecoSKpLCYl2iNYLHp5Z8 1WPwLLdKoCWwDGT1ufkyLb6nroYB6V38IHgb7FKFGXOzcHBPd3STCY/oFiaTvmgn oknsBEnunVa2kmbyOfy26NOZ+qdqmpPGVuoxMwIDAQABAoIBAFAtZwiUvz+tUmgh 0U4Bq7QOuphRL/p75KDnxIIAZ0Xoc4jvfVzfkPGgWxTcLt9n3KlXtrcYn+jGp1z0 oVYdjQzkLMdlffbPMeBljmOcH9ZSNWFhS/hpXzhgBZSIMtj8WbkuadVOcqOLzm7q H+tBmCJj19cTEVH7ylEFrbJsZu2FMipm+Eirn7bALbUkPlNlzmcjUT4Je6/OOny3 rjMEOhQwnfhcxTL6IRDeu7lJq4l79+1+7HlRGazt4QrH9fjAWNJaWZyDJEZ9wmge TQKueupXiqWC8w+IL/3w+8arPYG05iSxCtdb7ehzF7FVAKQ9sw80YahsbClQHNTH XAwZl/0CgYEAwHwVf8AXyOIDgGAozn8IcmICdlXBf3Jr84LcDlAgtNtbx85fZGT6 r5aMwpEByg2RyhDCzx4S2A2+FGhRbvjUFYV5/Nak/5hN1leTVAItnzWGTSkcJuVp 0VC295BhRKBjdmZVBc3zUIwGKanD03NcKJ99nxXGm7bmmJRrMO9bxLcCgYEA7fEX ef+UDJeHRxI4oH5lAEGwydwJ0kIRB0DfuPKdhXFEY4ihW+rppY3A/NvEUZAil08T f8MUJEaIF8MeaOCSvY1M+z0ynykEAkFxkSfTnCsJqMh0p/2aNfrWVDm/60lbsGzd nT0xMwP7RDn7VYCQpzFSlbgOTwJ/LEFiNmTEE2UCgYAo7LjtdnwYG+W+r7M9ZEj5 eNkpK8Z+QGevWI1NBcBOc60p6Djj8YxTNOEspQQKX6Q1oCarPqunABT/5cYaoBEH ml97YG+oYEt8XRZX8Dae+RRa53iy1GgRNuYP8MSdgLRlAhDlsQoggAT3ar7WAFsB 0Bc3cbvOc67HlhbMSrfqNQKBgQClIQ4z4oUyf+6oCiM2bsFVfkFctdIzExqSOBmL VwSu2T6m/OlOyya/eDMYyMPj/u2iqIRVxGK0EibcptLx4fi0h92G9p+tCV/42MYi AMvAs7WOZx9efoeJMr2P0kw4075IICVkvFTqnLbCUKL2YbUB8x7nPMbhWlA7vFyW dzQEaQKBgQCrN7DDrxZapHLMnyL7Pjgm7st0CZxMxbcY/dkmw/7yo4X4tUiTyYoU oQqtcEzYKFrgQ6OEtq8i/9yVmtgE8k70XVJqpUGM9VR97MRcdzEKypGBnUKOamcs Z28avBZ3ygIN5L48UK+vLMayI4m4RmUtMtU0jFYkhIEcuhTWEyGQcA== -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 8388607 seed: w6uP8Tcg6K2QR905Rms8iXTlksL6OD1KOWBxTK7wxPI= zfec: required: 2 segmentSize: 131072 total: 3 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:MDMF:e6pfkqn4wpggq2lnvc2josmkpm:hcfjov4qzb7fkiucisoes4edaymbp7qzlmrn6rqqbt6tsov6ojaa format: kind: ssk params: format: mdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEoAIBAAKCAQEA4AffDXMatwajd6NXgtaCi9gCsfH/r4+0zwHKTdwl120zd/yS 5HZYFhO6TbCAuipADxUbKgaRTf7NAo9NY+T2Wke8UFfds+tNfKKlg8uGIKC4IsHS E0ay3GVl4nCr7y/Y5Rg9ECmIE0ROM8b+iDKVfgD4ztigPQPv8r3l/XAzU1yx6bVf n2Sz3QzCHlkjbmS7UpcEWifUG+8sgO8Ee0zBsOjdFyaOZMiaTUPlNvosXvKQUGoh +aBrEung6ZW9ahWKeBaqpGh5D+T/pqWIzXBX//W4gMS0v7+GAl8h/JyAz4yW4zac ox3br5okjgEffJbClMI8mU3ltxDTWbKNeps4tQIDAQABAoH/cKOEgofKwjtLGdwG kZ2SBZlM5is0PqNLjoAOyakvprMcdsnQNemTzhgdJhcqod0uoUIf+JOd6OJYMV8A QRUCNc/bla5OygLN8txmW7j4tyXOk6so9Igpru+Vk42lLea8Jry/9vKhnMUo1bsT GkLm5uFNE0VFS7Fl810+IS7/esTmSkv+HNNxFUHjBDEx6ntLH4JYvZ3Z6Brfum/N Mq7KBj26v+ShSrTn7uC1cQlxKTyRdudkpRX8qiNqPrL98b/KrEGaCBsyODx0JgA4 Q6mCt6zdniHrRAXhamFdO3f8MEx3VVYKVy0YyFsWjo4DNhWVKYQHil143CoEwXku JVlBAoGBAOdJUfNvYyawktf8LmunH9WAjpqDtCm8/XJomwAD1ozf3P522+yg/Dxv 2bwW4b+9iI/hXWuXJ3RoAVRfSy7TVg7xN6WmnZoNzv4tRGCGAOqg9+VYy57IC8Ya 7HIgcBqbcVvj1TMIZitRDl2t1sSbzdunFJlbHKfq86n26D7RRERxAoGBAPf4E99M NiY3TaaI3UxyY7xFf2GrVlwMENllj8RpbbVByQVJPxR4Hl7dj4aJcd9PFnAfaDNy SIqgbjcbibsNvpY/daFB/vtXUS+XKU3TLWOGHZ+25LKVOZFJhZfpSqcZHjCl/Tlg m9Ou6Bd1MOTcnUWfECIi+8zVpTd3sJQti0qFAoGAQHrpdQPF0cCCf+KXkn26W0yG 9T7omIZO5nmRVPS8+PNkajD66UKMb9EDE/QRJeKSUwKSh+9RGZvxWvNiQ4C5ylqn l/AWmh9laOl32a0iTkdoNTGHOxIsbiONbdfrSQ+zD9o50wtxaHwllCpl6NRDFQzE qmiDWbEgE295miG/dZECgYASV9egfRLEYPLtjtJQBWY7VyjFINeSl5HngwvPi70B 24vzSCfSa9BTVDB501EJI+CVCr26kImtN5DvoqndnHasxqT8+NTT4vGug5AaobSJ 2DH4zp68Vy2bAcVQJ4HOOp1xG9ZPmEXustGYaqLjSy6XJ90ZqVzXGjbOk5wMWhIj wQKBgHgsuqxEqTLQIqn0I4E/VAKdBuYaGngSUB0frVKn1N9O3HtJhG50L5yhWUUf UvHCgaP06ft5YLVUbTkelhgCbG+VzsFAjRFhesiHp+q65gCZeQJN1Q3JReeFjaw1 ECQqk2PgtN/3t0VVrmuyLBcw9RETnN7f2mHayo0uU68AU1pk -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 8388607 seed: w6uP8Tcg6K2QR905Rms8iXTlksL6OD1KOWBxTK7wxPI= zfec: required: 2 segmentSize: 131072 total: 3 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:CHK:ufshkmuewwql2xsyiecbjhh7x4:qoywwygjqrmifowob3mvwkhfskk6geomyw5e3qlgqzui3kymu6nq:2:3:8388609 format: kind: chk params: null sample: length: 8388609 seed: yPi3JHKKbWaEEG5eZOlM6BHJll0Z3UTdBzz4bPQ7wjg= zfec: required: 2 segmentSize: 131072 total: 3 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:SSK:zily66v5xodir4wz5conizmonq:6zksu33nods7hita33ju4wbvlnqyxp6qkl6mmtqyl7bh6gk46isa format: kind: ssk params: format: sdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEpAIBAAKCAQEAttP9lzqc+4EZYh3s6IcBlfOpaL7svgQRLuXBYEwd0GNz4FfN v6L68CgaD7pI6vgUZBOnYm7c3z8zBdWibpH3V0qBufWnfFdtYU+20ed3wWWDoV8L z60tj4GNEHxwggLIclX3IalHySFyOTY4K6lmYptxtFBksULuYqFIZyX724O17Hf/ H9XGx+D6ffpUyR2D+hZpXGy5ly06pN4XJsdNkgmv4YfboUN2Tlm6Ldg8b6ni9P+d BhjdyBqg6E7RFKG7Z1ROT+GzeN0Q61PNKPJ8/5fCYPoZEeHWWfZFNO/c/fAo7nsn 9Ei/HxUju+PZmEU0jcsHXIvL9LQY/2b59S1dnwIDAQABAoIBAEQ90HP4Lsw5nc3f uaP5cIAWGO++BAPQ5NEKdSmKf75ewMvGOkgDf4LQlRm1wK3jt0i7hUjadJrnrhXJ bf2zgg0VBGLy7Hce8vbVmDm1GiAX0hATuAbmbxEXnB3BNQVyIHt81ue7lc3fLBFq yYCSlGLN/pz9PPhlMTGjXbESnnWKi4NVtX/CZUUEymgfiynfdXCf7s6wlYUp0WiN LIHBRNwW4FMCvp9Du4W4BvJufWp9v4yCoIpL2SyGbYE/P1dMg+8ZLVHdisSN4+N6 V5+xVjcx/5IX+D9FR5wXR3LXZx/eMa+dsEvAPmQAJTYUAVKzMNOXoixCSsmyV9qj zRB7MFECgYEA8fAAPiq1W2miLyTR/bKMHLIbdrEfMpGTrSVPTw7PdYkiDbNzpTJH uUwxwhHFerrlPAnxsGbbVZG7iT408a9gBqL3fOCP2095f3wg3Hxu+iWtEXLuaGJw hTOKgN9mw9LtHh9WT9JBeQgHU3V199TYNdd7a435jxf/+77eUN+pYZ0CgYEAwXRy +Q5+02YChU42JAuCnOITnCLifK9U38A/YaaBtMYTpVd32GSefCv1emA1KvnTjddA m1br82oCz1US5YsZsfdmdnB7mA0AGiM4GTHfUBw+N2gol24lCY2+wsi07U4z+3K+ HmGcAP8FLXR7/slX/FwYlcJXz1qxYIh5rWnnhWsCgYEAj18PdcevY3WM4+0o9/O3 7kVp2wOJnlkAr4m9nvcC3/8dDAt9C7dpI5jQn9YSNfHNaK/n5wZ9Eg9jmCgiDdtE x4oJqZoWBfvp3y969c5Toa90CTQXrgov7e+mM0qwRnmXhNNDPdg2bnfgh4fDGdOr MPT6MbmX20F4tAHfEwQIB00CgYEAiAedLNnvfjC1xwzG7zOUxUIHLfwtrCURlkA1 kTGm9PlvKQ1HPUcLVh8G/uUVncGL66oXSOOnCENb9HRK1FOqXsSrLM9NaQ6DKt3m /XhfIZKqgQVhvZF6w6wDHi5JYrBhxwbY/r3+F4k7F8pXwkHL96y+sNe2LR0Fqu5s OO9GGD8CgYBKoK1W1QcPbScWEehb0BFhDfew1dARf0M9BQDo59n1IP/fJLAj+irm dT0Qq38vskUzH2nlohVbOiLRnKIqx0AJKYCpfJNe3jYkzTDenQ2hONwhK+0I4OWx MHe2ZVX046Ou4SvrFWoDJ1xKCeF7sjw22SEaF41OWyzyarhsK5pDVA== -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 8388609 seed: yPi3JHKKbWaEEG5eZOlM6BHJll0Z3UTdBzz4bPQ7wjg= zfec: required: 2 segmentSize: 131072 total: 3 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:MDMF:nk5g5nxt743nwaoqk46dl2iy5e:jpwpdslpyhgafsbny34auvuyh65h7hvz3fioq64efhijdz6aij5q format: kind: ssk params: format: mdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEpQIBAAKCAQEA6bFJtYwt7TfdVJpjgXe/PTdvk7MVxPiS8qFSmtFILf3Fvof1 hmCdLayS4QcL5EulLTtotvmFPb4rG/EHEwY4VWEwulw0FXcoi7gPhTYwbyf/MPBU IBI5crgXrkVRBY2dqVKlJnds6EjZ33jgs3sVx8q3eKUmLencGKhalUWGiGbEtB9z YgpZetlWdXObPyU+KSKB6kv3RhHsBTS42WxQho85wjMiT58y/duC5IvDcI+ibD2A Iknql5DnN7TdanjpufGYh3x0nUhOUlK9whs+U0ip4P+YooWcB+ySlgsHy2lSOYvO fgWFWt1Wm94fFyUoRwA5ZUSU22y8lkVpVHVj6wIDAQABAoIBAAgd8fsjU6kMqrG4 lxeKdZMGbOgKwrPPv9LmSuc5Dl0X68QSwYgZrPvxeuc5W8QMjeqEnhN/lo2EIHKH rFY7KL5rx3SBpVwlgmav+Eyz4CTMP9nkTxe6FP1owDHREzUH1opou3ca6NEnTqIA zH/kw5Hn9vgddwuwjBuIespP2udmkWo5UXxD+3irKP7lG46OBYF/QHQLrzklXeR8 fWyUgTxKD43D1RtyyYRhV/7hNTrEgNgmzEMmFVTpqfBMxOu3dmBuC3XfORm87rPu lxh1glQ6YlES5zYyl/tSNa1CjCP54k7z6Z7mIvXKb6LCrZYw3gjCayqbec1n/OMq trB+xwECgYEA+Eyq+Frp9KwiGwmAZwIEhRicm9e92fO/v3vQfR4Mz4u3rFSzTzQM P/LXRx4YqDs04WJHOy0+oO1tiDoaCQbNQLqKy2xZBukGiUX+K7koMLieC48eeZEF TFUhbuTDMQWjIMlFGmWDG/L3yU3HGPazUkJSetf3+Q40XkVhPUvX+GsCgYEA8PCm jwSP8p+PkikseJ2q0aKjS1RUZDqZqF+WeepQpGQSnMeTOEqh34fm6epggBbVHk7w 7+efTuAx+GRtcgF8anGT34WGb27TsfuAzV8jsqb+mnttjcG2f2RGGP5KpJfjGeo8 EU9hZsCfLZQBd519BPTrrnePNvlw8Ok1iQ2pIoECgYEA2KBC9YST4uQeqUoD3Vq0 SM5tK8Xwm+t92fiSr+X8tUInT9Fh0vMM0On0CdbnGjb1bsGIdceGgW5DhntyZXeq sRNOriVsEoxRKIiJNOpIdyFKubj2lIcCgVMwZQhuhyFs7djLUjlIRqUWq2kRD+WE E3tLbGNps79BzxFmwcyestsCgYEAmb1U6klE+NHrsJ3pLIWeq+mVPMnwl4v05EUq JVzoXB0m6zdFr1Of+pwjMftF3DW1g4Nnpg0r0A6qlA6w72AXXWxfqO7wm0Yiep06 0ND2XFbGexhrDVsf8iWvvN72DhSE6tJVxc3bHs+mQlUAoqyxS2pkwIy1q6R69p44 dN1soQECgYEAj7BSQZHlzSfYK6SgZTiJ2qDYxsIkcAjCicijT+5h7FJd2U8pBTaV 1/A6UbqwmoFlCXHuGUIOpI69V9nL4BYrzBL53RMkpx2RSvSwUeHW89wsgZh+EB+v sgq76XdP55PL+2oJNJEsB4vdxQbp40kNOfmo/bEIF/vurNYQklKb9FI= -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 8388609 seed: yPi3JHKKbWaEEG5eZOlM6BHJll0Z3UTdBzz4bPQ7wjg= zfec: required: 2 segmentSize: 131072 total: 3 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:CHK:sxrylthoxskesmlhrfuxnifkdu:kizgaeiazgpjgsffkotumbu2dtxziezw73ybwo5pfzleuckqaiwq:2:3:56 format: kind: chk params: null sample: length: 56 seed: YQ== zfec: required: 2 segmentSize: 131072 total: 3 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:SSK:2tvuk7uigxcngmcxx3nt53a2my:ssk6vy6plxp6lp7odlqhybbjxs3tsxe6j2ehogpf5b3ml7wgkevq format: kind: ssk params: format: sdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEpAIBAAKCAQEAwHgjo5YUy7LGiFX6GhFgnv3iB+yzyB6jjIrCjJe39OTI3v77 DWqgLyKSUApAaSuZ2bMqIVcEvTIrBuWczfEhPm45ZqpvR57UqNnAe2z2CDIlTNS2 Mg7UWNaQigrvrM9wOnNoANXb0Gsm6n1KkY3JTszsAH/P8vmRXvm+rZsjeLwAqkO7 R2eUYme0hZUldOHHZSlmz9FAge6ajvw/lIIVg8gbk56EHBlpJq4BTK48REm/hzVu xQV0JGeak3zl7GWALoJXtkS81NctORsg47EiTkTxGEgXS3PXqDImEKoLrX9Sfl/f kjvpxOsnTwqBTu2fBhUJREoXMbJbioPFwwPIpwIDAQABAoIBACFiVVAxHo9MhZot S5HM9NTvFY8pU+/AvL6KbP9k65ALRPpFAPfNSFaUqQtAE/cKDIgRxxt8VAKbGpJ6 Lk4cZpdFGCjCJEYoexuElZnzBuPaCtU+ShH5t4RnRy/igLsZSg6haOdIMPYAOAJR VCdWEBZefgsCIGg1OK1gJV5IfAkbSXbgUKQrt9FNu5Nbt5rRuuLg2CbB77UAXsiA tCynDL4kgx4sgDmU496tU1Wt60flwb2Ka5SwnQwkVDM5LLaubMyKsL9clFow1Oyf TwSsERNPxrYzkWUFhYxQmNUZggzdixiHdKsqYCnUzPJ8s5DV2iKSqQPVW5DR7FFN MC8ZM4kCgYEA+hOBpsfBTt48yBCPAUkeiLwcF0oFrQ2Q4wRZ4ldtaGlD5J16Jk/4 HIx9BPvfsm2Q7EKZC8CfiMEtBVhjZZmJTWbZz5dH4SNZwkaxL/JiaJ3acWylZmj/ 3BUj50Ysh50OCfgosXLUE3vALQQRGfQrYxV8SRo1UegeK7tX2pBZzO8CgYEAxQdM GHlKaUz1OgPgDIj3goXhm/c0qB5Nh5k9LSNsz9kfds1vEZNPBEES7pQAdKa4EVaw 7C+k4YpcWDAz3Nzt43bJu0CXi6wmAKEh9ocwk4f5Cms1s5uG8XpNcf9wcH806xu3 NxJgaMDjB7slDAW/DFVhPGum6E1z3fFYlpI+L8kCgYEAhzGgd++p299dcLMy/Hjx Hu7DKPwFkYax+2jQxwKIzVeLMr7H2IqHEbgJpnYcezOsk211m9ro5F+63RbptXWJ uuSNgCLC4z3fOp5JECizdudPvt4DlRfSqsJrBI71Z+NKQa19ImF3sYjHXg7CyAsu oYRuCn82sC8SkIXZevlq8tUCgYEArxphIo8I9rSSbFDtWbaQYcuiSf5VKeRketJR cEA/gCkysV66CyCj5OAAd0/JZ+KTS7WD3yQooNlaYHXWYb9nG/SCLIynIlaIH58U lAhpv3PkfMHzJABg2VMcaOffgdtLqHclSShnzjE+k6xarGie9dMba5sw5tuO0fyg ApFN+yECgYBG0t1+2RdeRK6z/hCoiX7kr+vWeki5uoJ2dpt1OfrgBv6ti0qtQFXZ rCcE6TzeFy/z54TG2kGwA4CesO5DtaTO6pXCHuMnP6KSm7pCCjVv2p3+7ZjxrRdG t0FDgTvyfy6hC2ecVKfdwmJtnarH5D8f41hUSzAnctzaVGJ88GJBCQ== -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 56 seed: YQ== zfec: required: 2 segmentSize: 131072 total: 3 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:MDMF:zy6rnxe2nwn2v6a2whohnb6qcq:gisq3xdaad4fzcle3plmfosulcioxsahzvc64aaftsjdetlkfvoq format: kind: ssk params: format: mdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEowIBAAKCAQEAxZUYtjBWRWXbZxD/kkUFjn1ZzD4xpFq2Ti0mp5z5OJYISpRN ssUP1Xb0YnPY9w1QID1/ZG99xvMyTxDAZ3/fZbHpsnkdi4MRmGS6BCpa55VTRSyu VKqt++uTyanxpMVX54pCjfkWsf7Lqw2n7SZ3DUyVd+FL5gka9Z1GwxjUJl0kTaDY to/EQtEImLqmPZ+Rhx+FWatN7dr8dfiJugGaXlkEspkCxXHKbqYKVpWD/6bkAVGm 6+ILTVwT0jFWIiY6ERAJ/74AUEudGKKbgQCvdowrlfboyPL9SXumYPGiWCn2nM5J tLp1px8P1WOWYrSL7SQHiNwLhwdeGt5YKOfeXQIDAQABAoIBABv9JbHDUalFh0nH oOiTwfiAHc0ev1IArqQO5dOnGy/OoxCLhyEspLRQxEhBEGpY1rGmfIoZ+BeLgmQs Y5EVzmvdwtTvLsYBVGgB1s75wARfxRq+vFhOkFRoN/iAjDRS50OrtIdfkn028puB 1Pi1cvZtk5vWjLWisxC5jZlcBkuDtNk7tUeADjzUa09krQUHNoZ0pXb3T/39shrV gbD+oz2H7K/rKUHhj8FGt0DPZUPqQjnIk1nnugIdPqWmwM29JA1jjOKw/DLam5O6 Mx8VwSBc0YeAr7jsx9xBvqHiV6esSmRcydQn5wKMNCXnbAatyfkBxbyPNRIp/R57 yZyFYdECgYEA8hqLCZpfCnx3fSUi86+kx96BfBXm5R4U7wfVii3mTBj+Uex1J0gP 89KaOEDYAkAQkgZ6+0RqXpoBx9rRSH+X5pDjN2l5Hs9CY4ZCqeymt21aw1D+KV3R laqF9fiJz/8YaLunOgu1WosiPrBupIEjO8k3rogbw2tV4Zt8ve5zJRECgYEA0Oxc SJ0oFMveC8NzSXllNJACPUEgTC4qMExOUWWd2LT1tmI9YxUD+5shTsWRjNMzBL9w wZq9TBFz2gPlpFN3HuE3Psx1J3BYPHqFiFvm3V/erZ5mB0Tlx5TEkTI6Z4rZlr2c OOvEEoqwTDO25V+AS4i10XLUp/Loe976zMylNI0CgYBrlqgbCGMcAdwH3Sz/JhsQ Ry07u2/0eb3Ly6t10Jf7UVATkAUwA7IzJHAsd4SG23mBqyeT6f9rMv1/lxpSIYGb kN+ojFKrAmf6Wnvdj7E26n3fNmr8bxjobfNCL8TujeqHAH18Kh/ZsOLzAOzqZgkG VJFOGmZcHaL4s2Rn80NwQQKBgQDN0k0H7Gt0MXPLOv30wHeH1Oef2O0sn75IXqQ5 ZFahC4WV7Cp11lpaIXYq2FCP3/E/GCrJUNx0eC0d9wDhZqjP7ygx4dL4y6Dh1AKB V6iVJsGFYas6NhH5EQKl2EnZf9zkuF+TZBGCAse0Cq6AQhluUHxunyYJXzDR99Y0 tNd8cQKBgGhyn0VLhxicircIRSq1USwAmq1447wQ9JUFlhVlGDnScG0K/8fTKqA+ O5nP8tMyJoTRrTxofYQLl9PwENP3wCXHQNM8jUsM9g5nHJrc60cf9OZooL7o+y8/ Ntkn/f1DLOpYsnIHg+FLPBomF0jX6fZU0uxymF7XJU17ZW6ALVrz -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 56 seed: YQ== zfec: required: 2 segmentSize: 131072 total: 3 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:CHK:ld3thziutpaqv25nbtoqebhtru:ri2obsvzl2etyuv3qnchn2wvw5mh5rjuvbjaqkqbvyprn7xwamoq:2:3:1024 format: kind: chk params: null sample: length: 1024 seed: YQ== zfec: required: 2 segmentSize: 131072 total: 3 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:SSK:dgbwqgau4tc3ndunfr4nvp77xi:vpob3vkn4ogz7vbkdspr2kge5mezvgf2sb6ghpjtboafjih3patq format: kind: ssk params: format: sdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEpAIBAAKCAQEAvE+dyJ0fXNy4h7KR9nekUSjgkbKUektsFAbSPJdyPI5Oidyu CPCCc3p2dtyWu7IvB7psDEqaZpSc/brUxGxtW7AJWybtIK1MPCZ5ZxnOYUDkgaLn gnkukWTB9+ugXSD2oDqK90DKCGQ4PykUynehYJpgS3B86hdS5MzFnrTcchzU/9SL Oii6oykEtrp+u3Gx/Nzx+n62AWlcvwcYlVN1wwHBRBSx1qt4lTUh1crqR/cLJnLu nNFuNC9rh7ucDDcml0HAERddsEwXxgzNLt5yPmGSlTTP0q01mlBHfP4FMpijPQP2 KQUULouYCebDEhWa3A93XU80230BQ8oX0bGmqQIDAQABAoIBAAvCuSr3TScijc/D wkPvUu7Sq7vNuGIu5bAWgPjRyIupo5QOmTvrsWn+4vkna66LQU6tQOQ/oIb5jxh1 m6Ys02OfieYMd1DMIe+7w2dCAFaok9zYzLakVNk1vrt6FsjaLyzwmw84F6YQhEbF jQvcDtMWsR7lBpgkHsQ1Wb5As2fY86walvgkbcM1pJSwan1HLDUGNjTGyXQZn1A6 HBDDpV8aHjoObUwUtmcYZKw7KIs3Ft681x2ykUUPcfVPDqCYFCqM9M54QAMg14QL 0hw5Tl/nEnPuiWRQgiCmsmAGuvYJvHvkA0QzjBo8B7S9wC9VfTaAHX33+IZ9e6NQ 7mXgCrUCgYEA5gLa9HZuKSC/vJuwj5yIp0BrRKyACOl7YYej4+/TjGiMPqqHDU0c MGTi+W11tAnZIwr2xbgg+rBgxpWwrlCbwYb1oayhHQfbsMrGc/RHSl48sjrDzgQG OrbBU3uWIHRRsikislaaCls/LUFfVddEkdoA3PqWE0cg0VRInPe3oqMCgYEA0ZaS MjUpd9Nh+hcZPyLPLh/toNvZYmu8e1HRZKCDl982kYmQL1t2e80S0D3P8NshN10d GWkuW6VJe649+FMshBwMXLlVQsLHrQozwz3BFil7eudbmJ1PU2GZZYsgB3h+sy4+ Qr7KVtU6qGe9+KOZ5eulJruyaGpEKWF9vrnM8kMCgYAtyIG2yWASFa+0pjTV0S2u RPdVGxT9MSRa/HnV5CXyu9i2nJD3R9MFmv9G8M/N/2vWOtd18bm2zKbmwGMDv43R TsDT5p3HPoovPZ2U9Rm/ptRkEahp+IkY5MnEiUQPv7eHRALhBrXwu8rugiWs24WN lpw3YDXBLpZMtH8jp3dJCwKBgQCAg+2db8+/tBRt/9/xQOz9gYJ6kpSXryxiCed1 5p8Kb0rMryeEgncCrtsMafqp3BRgGG6ReFd+xrlqZ4uES6wOTgyeht5rE3jQ+GKJ I8LUThdzY45c5IkRvdUL2OWI7y/xuzdeQhNcb1+KiCKK5famb3pTZ+Cb+h1Vqnwq iU/MdQKBgQDVlggsXCqDvj9BPvdyMy+PpKKnYlixAt6xdrvLfMCImhvkYvJvmM2A WK1SaZqQ50ETTaxMyBF22NOpm133yadWSpBWIA59MBi4PAbVkItAKO7MDbvSXNOr Eqxj2sI5gwHV99+NqMcW1psLcFDb5E/N4SZQmuOKblpaEJTlZ80L3Q== -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 1024 seed: YQ== zfec: required: 2 segmentSize: 131072 total: 3 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:MDMF:6jynferbnzteludtwg2vd7thvu:oek57a2425optdwaggj3zxaffx6i7kzlbk6sbvje2lphuxdkoakq format: kind: ssk params: format: mdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEpAIBAAKCAQEA4110ZTjzqHn9RkYJDatXxPP/GuqSc7RFisqXYr7BnJ7fQZo1 W9Qij0tIMi8wgAkb+r+oeLaTMQqZtnBbnsGoWEk68kHFSglMInkTq/MZDgwPFbKe gaMdGQU1k0IAALu1Du4RPUWPUTsuxxh8u5dPWelT+SH99F7vxv6WZ4pk0N43hfrc Khfha+oTgBK0rVq5DmmZ2Fd0EHw0PCFFR/8BLrmyZz3jvuk1K7StW9hqXWqNILJk RL3FEy0rkYqxZuOXQ49inJE+o24hNYSB01H+IVZRUDB6s4/ZZBRHhrj1Mr75KzpF y4gfd70N1vVi/zOVj8iMMCvEBdXqDUdPYZTSFwIDAQABAoIBAC742jilTPV0CmbP xkQML4xRilUhvBLqXemgFCmC1lYInoAbn0Vy7JblCyvPAvqYpy6lFOWndn5Nvdbq nIsOYDypGGP/QYabqB6BHBbMmNMFm8I0TjnjHHpUUK61FnIQVYYZmfcqHUM/clkQ johk6LBfG4mfQ6uOR4Q5iZInjc92jYi2XAsDMaGWefVsiTFGpUUjlYCZ39QWQv8k JoxsXWLM+qyxYs+A1xyqRa+/mJLpuUV2uQ6H7rDtRlLiHuz8euOibUQDDa/ovXhW nId9SEatxYs4E34JBfc1ibqasboBVtrWhTel1OmF7QnNYZKXBYl3Zf6t3oD64m0s lkCOcfUCgYEA+qkiX25dgXAqyDrivkme/u+PnKXhxiA+2wdrY93j++dhi2mbIhxp 4KwySBQoCkHydawkdceMikS1UKSakRlJl8EuIntv370gT6v+22QxpWb/M9GYkxZL Vju5/8Dm8QQ1W2LmmaGjO0ef/+J3UCTNGncAQ+ZTNQSmy2Mi7uppg80CgYEA6DVJ xH1/neG1mlzg9GX8g2Ckts9WtcbBK8Nifoavo2ZwxUEAptNomdURkSx5vFaupWkK LsoDpg2mEfZzF/QQ8sqvqZGsC4bvajlv7YGJWFwkOwTasSYVQK70wcfy7aPeIG5U LpXbFixMFgsPbbIH3VdUfiLN4bzkS2iPiYomEXMCgYEAyijOyBjC4ToNxx927/GA givDr5s51Aj9qLj7K7gxv2CFk2LA82nnGoTGqMtY360AV1dWsIcYGgwAD+IxpwS2 DeaHxte3CsQF4zvceCT+xV+kQ66vVzGL4SiagmKZ35h9UA8b3Jw4gf7qU/3aLJEB um5vkFOamBAAVdjGu9ni76ECgYBDJJVt8XPjLQ6b0dtiD9NSEbHPAmjqKsxUYSyr tTo4HzjgcIlFs799K7Tmq1uP7+iT/6loGhWwACZS71YcSQBVk/HzMCH1O1Ei//Sz Uk9qc0ounpq1unNOvsga+DvwJv/llMFWrxIoeSqO/Se66k2H3OabimjJqxrACz3l 4UJcrQKBgQDk98mfTuvDdcUPQPK+eYK/RNp0W6jNsLht3s2823K2K2+R5f7a4ug2 SjW9v4cT+IVDR5Nz1XZiYWNuTv679v682o7c2SoalWne6RQzickG2s8h6k5D2qSj LyRqPtyr/3ty3pRT5ki10yvLvNbPusfrUMHYvRv8slJt+1NsGo7svQ== -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 1024 seed: YQ== zfec: required: 2 segmentSize: 131072 total: 3 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:CHK:pz3mga5pvqd4bpsz7pbyna2qtq:w3ptt7xwwgywod3u2rpgx7iggvm2kpwl7d2ryr3u7mcaxgqotlaq:2:3:4096 format: kind: chk params: null sample: length: 4096 seed: Yw== zfec: required: 2 segmentSize: 131072 total: 3 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:SSK:gzcs2in25goucrttjikx67zehu:sbnsmodidyov64f53aqpdkunrpx6siqw6v23h7vs375ftre3i5hq format: kind: ssk params: format: sdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEowIBAAKCAQEAsy9CIECvk/doQ4KfrSYIwRNZxr2JmOvfdTA7fJr91c7znNob 42FTrDmG41G/Kxfx7S3hF6YBbmT+Nu/LDs9aSO3mPoKjiLldKL4rqp3VXLGp8pnH EDq8oYLSu/mRaR0yjtrVqUsj8nhtwxGg4kMbORdNe9J4XtCCxQvMy4h89qiLp8uj 5fLGIFOkfYlaUQbw6pFxT0THVGHyRx5Mzzrdivdjeo8RFJsqJoZdrQyEEAXKkeAZ +njTxR1jrUQAx+RxtZygV5X0urUvSt1cVI7fAhbfID2MfszqHa8JAXu4DxnkBCNY ukf8Y7tt0VmfM4zWb7wmubQ3FzKmp1vZ3QprGwIDAQABAoIBABSlKkpLCa/Tvrig kUNC8ZlFYH+skPEEpE99Si1WMk7zNFBrNPFi4mAilK8WWR9e9+nq8ldmMh4FFuE/ ibbgHzft6SxkovD64ofyOVfELbQraDhijXQKQHefeiZcX+uriIq7HgkOdkrWo1bp bg8DylwumifdHS9XeOm5LVR6GmPU4D9I73OuxyiQZGY9OBAoEHa/5QbnqK5Olqa/ 3xdA0zsrHHQpJk2C86hR7+qshKBSdj8Z8HvsnMjUrHST/JEv4ptCCi7MdvXtlQB/ b79Cs6Zp7AGZH08X5fSnb2BgEB2+c6Sz3gRWBnNcNHXI0mty02G8MX++fTwXBJm3 7bHPrEECgYEA1GkxFGYgHtYL4wyBINP40fwbQRVjp7gQdKP4y6/SCNaIojUTfbGn U+GSDtEUchrFqeCEd7bJwZPvTI5mz4LJVtBiikznqk7YPZc1IYIRl/qR0tLjPaE0 vLPNbe0hUWaJCfuEGy0J4vOKjXTqNud5qen28dqHp7alw4+jbP3GV2ECgYEA1/SN ycHbk0aiT60e7d9ecwz2R9z2ZQLDcIFlPz7Oa89OUqMrae9V2B/yO2Dabc5catGx 5Yg7Z5QGahJ5ers95XWy3ed3v9A7GDZwc41HTqdRbwsaM3b/YDcU5lD02HFaUqeT LjiEn9mJ4JlH+LbCOESORqxAoZjjYjQC5U2+H/sCgYBKjf1/FnVxvVmAwRPVzPEJ 6z45suM+rDmCZ0ddXwIOvhZJMO39cUy1AXi9oJ9XiZQVk0uLpWndeypEKbtmXJaE 1TGxL1slCPWXcKpib3/zYyyp3gGK0TlsfoO9cL0AEEhLa6+rxjwxH6BjFEVdLhQj Eo7txvFUaaR3JsSK7ewfIQKBgAq9K4vb6wpg2dNyfXZAxFaeT2T1dP9C6useVCWX /vXXgkKTwKXs8+zicc5IG7SYLXpWYS1T3/hfoQ4HSykyRHqzpqhoSUktlrK2ilME tIYRxffqqmviwAJN2uk1H2fgAyjXEnea8eVtEPEtTintFK1to1GaYUBn9O6+PWKf whOfAoGBAKSFBWcsn3qk42K7ZFdurqj8FbipiYNZYXU6AdOWxWqIMzZjEi9VBPNI rCun8bXlBFUNtJqLE17n0qzpcr7GJGcy6pqFcE+U1Q3j8uxJEqG/z5ZG4phRxVd9 gWP2U/qIZivdIymYn07e8hlpTEZgJcWjkVJ4AdB/PPqfa/m8t8Xx -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 4096 seed: Yw== zfec: required: 2 segmentSize: 131072 total: 3 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:MDMF:uvvcinwvc2gxig3hpi2os6ajva:yphtwtymxs2t7kslcwgdjmx3xu7wqzknff2m5jte4meilabbmhpa format: kind: ssk params: format: mdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEpAIBAAKCAQEA8mW7LoF4muvoZ0pfWJW3wsY3VWXLF/4RgeJvrBQy8k7ht5Lp 8qM0z9SJ4EgryFgXtG9e9n4AN2dPnbsEpozcxrhYLrQZPG1F9FwcppKPZqqGoBYw D1qeUHTqrOvtG5xHmoTCD2kySrzn2JA4pbAXvVn5EH077LgayDX9jaZGofUsx92d THw9argVEYhVGv2NtAGvMLgyXBSaH6SZe5X5r5YduLE7d/maZ0e6i7tktjLAyZmu eDw5QE6AVfy8/2bf4hCbMACnuhMSu2Mb29zgIn1iOgr9sgxRcnaKRiTl5AD+Dc+q MqekwTQP9KfgV+JM/x2vMxL7IO2YZbdc6f5Q6wIDAQABAoIBAEY45pNAetoWwcs6 poiZRxUsK1eYF9ApkJTaLpPhfijoZUezTgc29NPItPC+t8BglO123kH2msVyLoR5 a418fXEsco+FKVJyLbPvA3XWO6j4eeviwaWRERAp7tqNtrErAytmjnm8dg9kzp3U mjSV4Sq/6AG45iVb6JZb3cqtgwTj2+7JJIzk7MsG1NsL4CH3bQTkcfK1XHX85WD7 vkmN1D5CigOJSxeyltPpMibCpuZmyUbauVYN0X/0hPCWeEy0u5cnyDL6eKqfnF5U OhWnrghTqWcVStXbDwlXp8g0kO3hwu3GIMrcJPjQjCJn7Nfq+c/IiFEuiwdQSPof NdmBYgECgYEA9epQwTVK3JUYgFDmmR+PjYTAR1TNL5KOqt8tqsUXEcZWNYa4Xyfw 6EJdvSRaZCJJd6luO82QsqhQu8t8/pD6vM6qs5gNwnQ27bYHdy23gmP1iXyHTW1i a7nq709lDardbJsriSvk/YMCH24SFRgyozW/3vrZl4Nii9daPlxXDIECgYEA/FZ7 2jPP7EF4bRy98/7HkpXfgNeV6dntjR6h7Zb4Lija1IIS+7459Ttgni7fKNpF5T5t TKHj3cgnjg1hBCcba4nK7sGvX9noBhcfEbzD8BGDUd35PWY2W4/zR3mkx/ueCtxP vapwWf7vuOEj6+GZlKk3hL3/PrL9ym1MpIs8l2sCgYBRmf5QDpIX3jWyJqZOe2WU TU/Mm7w2pAhJdSNfPmVoVYs32cuGb+eF+rfGUrDX93Svi35zw8PXNPkNR/njM1Kw oleMntE6DHxJpxSVHIt/bhIFHFh9feWh36Cw5oSe42r0Zg0tSG6FHRrwOQMxEsWC 2QwhPtZDa6qgwsZEWTndgQKBgQCs8cU+/uXRoemctudFtGgqAkhF8PwRY8iZQNZw lVkRofShU2kZWv40IncM00klobvn64pTzFz1Yzog9PB6PSdg4/bO/rZo9ls82Vn0 +TA9eHNNh9pMB6LXzGhLo4aZfc2K2gZZEtigBcddKglJoLx3FCc19lZbLagdth2i ZL+pcwKBgQDxoY2CkBAuOcyY29WmyBWUEO13odEzC3uUuvLu4mqxRYXvEIR6OMy7 BgPMOFSgqsA+qUTzahSAcXgNauS9oks9F6uAfDNVVuBNok1eBK2XwaoepxRFL1TN YBqj+On0RnnKTqRBNSFjmtopTDDtZUzu4qzpLzBgDP8Zn+dLSrgoMg== -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 4096 seed: Yw== zfec: required: 2 segmentSize: 131072 total: 3 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:CHK:rsh7ysvsgbwh5g3xuj6z7zky3y:ykg5rhzpzqkvdy53qqyghfbcusibynfylggbhypo3iibsrrzvlkq:2:3:131071 format: kind: chk params: null sample: length: 131071 seed: LCa0a2j/xo/5m0U8HTBBNBNCLXBkg7+g+YpeiGJm564= zfec: required: 2 segmentSize: 131072 total: 3 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:SSK:qidwnh4eshzips6trdeog3tczi:wqxqxiacoq4zlm34kw5y36u6n5qnwusscm54fqu32cwk4v2df5uq format: kind: ssk params: format: sdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEpAIBAAKCAQEAtbn+aJL00xu3hzUW76iOnI1++uKgSaLljxTqKEyuMdLq5ba7 zavatvLxnDiscV7FKTRQOh9c2NEnANA0CDI9MdrCDFM/SQvvU2X2s71F1T6HrgWg 6bII/Tzu3YdbOatvk1nbSKWnDTuBQMqdLKojT+58lYe6/n7kgo7atmxSOuRG8bZN sLBE0Lab2EI7jYyO2fKr3PmXxfCMqFvIVA7AjTfATs0ekFIMO5Xus2w/Sgz+plCU 6hC1S9hGMMLWtWGTOIwwlG5hDknJ7xY44F4o9M5S2fDHkdXChGDSpsd9dQotFMZx Gc7iCNHBf40yYZPKrdZqaTnonDUtsyZsoSPj5wIDAQABAoIBADWxsv3rDfOiaOPG R/Sf9SNEm5Q9iea3/uP75gPqRD3seANPrsXiVUlhFwp1pF4LBm5aSqohwik+Ayw2 WGljjrlATb2ei0BmCly17+LDtfJ/+07r0tO6CvXoHxvNdqLfiKQdFLGuYGGEh3hB ZZdg3fYsHRuBczrm+1WoJ+9mqhVEBM6fx+JI/KDHkDu1aKWEeP2Cl7Eol/ZglY6E NEodDfhGOdAaXKCAYR7IHq7LBqHLCK6moWXrM/CQs6E4Fh5mal5n7jeRzutg7ZPD /tWJTWRPWVd2oBYnKeGkgWJ4ADGddtLKbaumxZ1jPu19fKIoU5Sw2xn10vVk+/KQ YKP7SSECgYEAuR7MpUtt4/jsoJrvf0idHvRVieBTxYVQ5nBfsHghDWtag4X2OkGM dX3QU/q4KMbsHB11xg8dPp9VJEH6wnqYLp+TAjTwetZE05SzcyacfaOZCLZSAhew P1T/uIvcg4OYuJyYvaciIi7q4frBANGIC8u5JQHgbyDZCayeMf/YRPUCgYEA+06K ztAsqIbuMSKztzTzQ6ljGxANMKjmJDHTwqLZ+UnknP+R0ZpGBFK5Gt46EjfNkEgH 6liyE8nM2NMuXGFUkTgCzCAjpnsTR9Wh02bWGH8ZgUHr/Apq5IeN59QJxWkKHlJn jCEgHtMRmOl6kbTJMpMoAkJ6kPi+N+o8dczD2+sCgYEAh5Nb807by1NaEYGHF2QZ 1jrBjrmhAI7TogD4w6gnJMnTv3FT1HR/JukesvJy/0I4V5rnz0bwdxV/6I791IKu g67QnpQg7wWP4JkOF65We9ld0bidNPUeWjOpGQItXI/7QHFHl9YYtIpB8YCQ60WJ aoIoNUc7lIetDF3Eef/S5yUCgYEAwmbSuAOP2Fpwne/zSBEc8cVx1fiHy5GMXolw /4rMxawkvlJxccw+x49ag+9OytMCIM+n19/++ZHM9hn/LhVYvvGuMEvYaCujEZmw EoHlspN3nmbpb1J7uAcofiKn4F9OJYCne14Qo+exIDHU0CwizA3MEFtuxwC03TpE xPe+tzsCgYA2lWPwtLMK2u7mstYsYZs+g2KHlZzKf7mBtZzmmL4IkhvU38wuKMuw xIlmsOIHjkXsfPT0tBKD4mAngPVPN7Z9sgJGCuWqxXoXxQltlBSH3WzQEGvipt/T i7pJItBHrPl1Mw5m2baRRIPQMREtRqwHR8VRlXy4vCVg9Wky1RGCHg== -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 131071 seed: LCa0a2j/xo/5m0U8HTBBNBNCLXBkg7+g+YpeiGJm564= zfec: required: 2 segmentSize: 131072 total: 3 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:MDMF:qxw7uvecwrhqibsdufbzs72voe:6goxtexixkeemwwpf6xu5is2723hkpzvxtoy3d7772h77jn63ewq format: kind: ssk params: format: mdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEpAIBAAKCAQEAyGa9FcNFgZ+FZa375YVsROi+KaCNNfZDjSW4xOhvrAO2ch2Z cY0mQ9iAcDMIE1NtJs95HqLGbBzkRF3+XxLN03aqRaTai+wAao2xZXiLstZJV2tc GIFmoKx1pGlhHpk3sysQMJTIgg49HsGxf6KW9TFLMkeNA5kvE0tSJghQ7G3SSKm5 HTq5ZFF6Hh/i2zy3G1hzwvB/kzGmQb0Aj33C9IbjbtiI8wz79+TBecEMILwUHg5r NdwP+9+i/ZoMbQ1RW5Vzfvcb7w+/yp0GtvZO/6yUihI6qM5ZZ9q83O575/HY8R6t LKgSw7A81WVL13vwDHZtcOwlGg7ElAITsRRgRwIDAQABAoIBAFTWjajnbIb2IfKM R84AgHfhshOMWQulsC5ScFjH7/K59aR03G9ianchcipNqFcKI/Tgs21hSrAdQROi WRwlqUYi/+2Q30aKCBkT2CxVboqsxgrAtBHZwjk2GW2bgRaDlb1/dxCiBApu/bgG Ft8qVU8C6csk2FFUKcY4xlO8hIm6KpnYoHJXmtxPctJTYmRS0zuvfV95trfooFdk z/0rHorBq0xMEQR4CiJgrNyvdVPXnlljpmjvFOPCBwbGjY5MfmnYfS7dKYdmyKTF 4yndMKCtEwC917ncUo9NZ3dwsaG+uxaK0F8gx3STP0daE1Ct8hSiydCgdNDl97E+ uJJUbzECgYEA5DpKwzgXb4xrTq/03DhzX2X4uSIh9/kxHW17BwmOQ++q+lJtM+63 ND8tp9w6LiUTe9dWPFebBBrQAmsVpTEw9/rLcinFEJexTto2SjDH7dt7PXgt9dxs ki6GoxgRq/AjL8i11gWlz5ydH3/hQnfbQx92Ddcwt9nc4Q9oiSkGWZECgYEA4Mma rhAPxqY2kq7jq82HYLywXknqL52pe/GAXn65PTjVREBllKtgY+woZsXzw8L30jer jsX920uqpMunrgn7wrvrxXbjtlPFp5JqDK1f2uE3X3o5MLMxD2M4PLHgXAkFh+7u NYXJqaz/tiDniiTaf8EFnDkZbFyX15z0k7K08FcCgYEAjv7O9P3iASwz17t7abec 4frcGfL+4YWqdkuwN7qO/pXdxLV8YnuBIiUrj+72LQ9h48gJ6gjhwXKjPcCmcTge /GCQs9jj9f91QniKZ3Wk7q0DzIHOGiufgv/Pr8RW3im5gij5dT1YpHn2IFRZaPH7 2VSO/SEFD0xbjk+/KaEgr2ECgYAcuAUoGes6EQBF60wxJfgW1uSdl0nxPW5q5Gbn K7+U3873gla4ENEm0wQyZTYIm783v18OxaLyQo+RsGdC6AmfTo0H1HGxWLCXATDF X90wRLfjXeUyoKIy+hU0Q/GLMKfhPxh2BBrIr86XwUpzrtOvoMSLugvSeV348Rea SymszwKBgQDeESijXcYtztiM4LJZtW+lFRbuUDk96VivUNAfWgb76uonN00wzigB xQrq4VIkmMv4EAHs4WDzwiR112BAPiY6Sg4YoCaA2VEH0Lj7vlGv92CZ8YNyjNpt +zAhdRunSIgpgw0ovhCe9tko6PX8Ui4zdvd94GxQ8FWmRovm5LOBqQ== -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 131071 seed: LCa0a2j/xo/5m0U8HTBBNBNCLXBkg7+g+YpeiGJm564= zfec: required: 2 segmentSize: 131072 total: 3 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:CHK:7xqejfqfej3u3zp2qymqd2iqeq:po4t2tzkh4d34ku6gdhlocadfwdyweiyckyqz57zdi7ndt4ikj3q:2:3:131073 format: kind: chk params: null sample: length: 131073 seed: /N4rLtula/QIYB+3If6bXDONEO5CnqBPrlURto+/j7k= zfec: required: 2 segmentSize: 131072 total: 3 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:SSK:nofb42x37o3rgbt665fas5kzie:k3s6wi5ohmsqytd7lsbsagknyg3elwphviakrrsazcsb4l5qgyda format: kind: ssk params: format: sdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEpAIBAAKCAQEAtxoNXh81HfkF0TLH3nkvVnHZP3E3SHFzlYTVt/+0hxjqd7H1 ML5/AzYtDjsFamK/GM9PJw1whNyqqt5lFwJvFku+qk9XxGpj1JtHgirNnGdhs6Rp KCQE3Wx7gQAI04Z+tHmqqYdZsEefsC0n35K1d0P5n3zToi/sNeW8bAXONuRvqNxP ocROo60hKIu4NvnnZ4mAxLyNge4DEj9MAOAURqBW7ZoyEjaZ/RJ2XZvXhm3+oKd9 FyYcUeRzsSLTE6H5cDsiVi9if+juc8xhd6yA+0Kd6Nyl6FkxKo24tCGQp5s0dDdp ZmlVWSu9EzXLL2ibYC3lKRWKTpln8NiczWf5LQIDAQABAoIBAAYvKZNrv2oHPqSN sAV6F1i2mK1VYBYgytQae/NufgTwGP848fyW+og7vLLV2H1631RxsA00HYBHSbZi s4xe5yycG1D6RA8cvslwAy7IzlABh+G+5FRYPxfRcaxuOV4XlVD4KQT3ztYu3Rxg sg+Rj3J7R8OUvjsknjhFzaLiYVAmQTDJDuUofboApGD7iZLwvNRBHM2LjLyjzoDU v81XItqMUix/PjeDKc3u42/R/+5P6CV+j5iVqjG7DODKih+jm/lMxzKPfGZM4XZ/ 8DIPmoJDxZtpDutB6C2rEH6UE2oQreWG4vYLCuQkittaTzAaLabMRHfQF3E1HgtE XkoU9SsCgYEAty2tYRxDGiLviTyPfuLTgHBRxayZBVivzdHGph9aZf32oUsTlmLS TiU3IcsMt4x3Pv0Rm1V+Oj3capC3QdGyL8hQ/qmDMC88+tKd1gbxTTliDKULoIQp EvN2nrHKi40YjxaxKU5kVD0bIZu8x1VQnk24vnJRZIQu/vlck9gLd6cCgYEA/+SS uF7U5OA6ZdRJL2KSGkm+WGxaFoJZbY8z0YeqjU0YbChnpV7aSNM1UvytzU48jO+p artSjltckOw2QsQFOnyhgkxPyy0w+ykzyMt8Kacg8ZVkJxFmXf1Z0KsDWFurplMV MzTf8eE0ZvjP+2te2uYr6Vmoa0OdNfoJ/mHCIwsCgYEAjzzBmfFOq25sHsVjdBYM yx+JYejAU4TxHCGQk7BqsNxxcdjSPUOTLhY90UgE7raBPJkJnoywwvxCknYNRwOh sWmTpD+LXS9jIMN3NriBEiDwAfFBcUhHEhGdTSS7vHodnS5iZGlvXMvXnmU4riqR euhNsWaVLOOMGEeH0/gZp3sCgYEAq0X5MjA+/KZcT/XjujSWp8O+BH8ZWUGLy7ny rAbLD+KPOy1cGiK/pcjAQzheuDDqdEahNZAFtMTP0yxXMR70hO4QSB79tXcc9q7g O0B/bX2wniIos8GAq948NF+SUJyi6iNn6Cs2zTW4FkfpJVX7WjZ/I6PgB1NtMUiX Uc1q3HsCgYBdqsVcc7xtNsoa6VLWs0UT4ddnesOGNPh2OE+wD7GAZubT8A6Ndktw 8P17MAMpFjhW9OunA9Vnat41iHdqgZtyKUjuW87O+1pRp4l9MvKal7Pf6pfQJ3c0 FyhWqljYymTn/LFep1x8OzMC/YUxclpLu4gUbHu5bfqdd2mZsxW+Eg== -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 131073 seed: /N4rLtula/QIYB+3If6bXDONEO5CnqBPrlURto+/j7k= zfec: required: 2 segmentSize: 131072 total: 3 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:MDMF:h4khks5ufaa6ka5lyr4nz4hetu:g3mjwxhi6jswmwppy3upio73yth3jfx6toguraonmjysc2boat6q format: kind: ssk params: format: mdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEpQIBAAKCAQEAwSglyEQntPe1VGZwA8UR04afb3YU14LM+yUtPmF86CFqtkYF jERO70XwmKJDI9qPskII5G02KMr3L6L8fXvBAgq6YBdJlqNGpxnS6YlzrPIgxLJE 2dcxoezU+NQTA/3YSCGukmqBqrLyHOtzlpyU0zo9W3crsUbdM9yCfy8N7+98vLiB tGAtjDDMsg7rU+L5nKJRwNSV/eEw85lcmj+6Xj/7JZOLisTI4MAspt3yG3YmapQi xpDif7Hgz6d/zKnkys/BAw2e7/iFJyK9hbPDtobtJkUK8UAs0d4fbcrQZKg9CsM8 VDgp8bR9oBqfhbDHQfNtZV+82M3qewbgL0BWrQIDAQABAoIBACB0QfDlvrQx4KZH Ne/0NzwOxRAhy3uwbweNpg3yrF2Ga9snZbw9J/QdEMFcliJakUVWwg67aNuuypyW 6oyc8/+HVOxbTVKBqZffB2iU3zpCTo4uE9J0TVMTK2+Jlo5XovTvr9jLC3Fmcra4 Ouol8f2RrgiFu/Ij4Xvaw5RiEBntnRJxUlvU1ik40uDOc0usbkqHTMS3drBSDU8a 09317Sta5iS1EDcDDAg2qOwXScy5pzVAhUapN/2HSvckbKJkjL1KuaAq3Jk5y/Pm FaHls3slBPsrfeT/MW61PcSgraz3xk/hw1ajeB+swxqQfkhwgAFe3RxJbOwi9uva gWdoKRMCgYEA3mDRx3GqV7B1oml6tuPoYwONKmXSIDh4HLDX/qxHtBLHIInNp7vc T5ntId4EXvz3DY1PcRqpsMpGLpkMdoeqzE0eS8a5BAK1I15uSZA1sQAjWfu9o72l +wjjaqAuBTm2k8E0XJ1osJyvGG3q0zb9lvBmhblV8d+Xst2iEATfXZsCgYEA3lxP g0lADCRcCrt8/SlSVxuECrORryRCKISjYInOzVdcsEJO6vqHFcdrJ0UJv3oPvR3b 185qXrBJrxCM7frpPTQ0rIjXYgwprTtyVW8nxCLpn1IWNH89MNiHSj9Ag3pTroa2 Gbig19hgl6YutX2b00sCT1hI9Db5Thaxs4mChVcCgYEAg2u+rlrLa9VaP+iMYEei j9mKdNMF8orM2U/d5qFUIuSyD9XA128bjWOPk+NMvAJN0xF/MH2saVGxVlqW1fnp g6HT1L6VmvwqpsNo9EqooHlPax9ufLVYwVoIZHxTljz8XKfi1RUlyLJgfFSBYd/u 0GQ0grT8SNx2H3wCCeuHQh8CgYEAuZoVqIEI29mxlie/AVVvbFQEWCZg0O8T5dwo vtjobE+ih2EhnFN3U/97enDO3SuWXYXBzhV2hgjhyCWpbK8F5ldgLC+gkC+UzgsT uSop6DY4CQssi681NUNXUesP/26o0MGS2E9aui/bGFnXHRh2a9xtVitb4bTNTZf1 xeVes8sCgYEAsILzo7kk+SgV6ZqyubbN52t/q/0xyZoNFRhlNtlJyhbCE7YwUY8B qmbXrbuJeiuDgopMZ2OHUWh4aLMpBxbYGdAlCz0vPS7a8XamQXizQSD6GZK6+AFX tcrIMtrx++XsrD6RkikqaQmgPq320rMJDFETZOzxXMq5Hr9NgKL9Nyo= -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 131073 seed: /N4rLtula/QIYB+3If6bXDONEO5CnqBPrlURto+/j7k= zfec: required: 2 segmentSize: 131072 total: 3 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:CHK:kzhg5zp4hcs75eboack4kqpcdm:g2cwnhpfwxrv3c4lrmpjyj327a274oa5qt4isu4avjixbu7vblrq:2:3:2097151 format: kind: chk params: null sample: length: 2097151 seed: uqWglk0zIPvAxqkiFARTyFE+okq4/QV3A0gEqWckgJY= zfec: required: 2 segmentSize: 131072 total: 3 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:SSK:smbhexz3nnaz27sp4ycnn53qxq:v63l2vvp3z2brgul3vhlbudiybaoxo6fa6qe7n6cqrmbkbtq3pjq format: kind: ssk params: format: sdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEpAIBAAKCAQEAnluBaJX/nthl3VTvSPm6f5LZ7ZoP1pXr8dmUZYn8cyyAmnLR 5TLc61DWPVjD9A5fiBeF8MTgU7h/RZeb02MaWbXtriqarpvvPqOgsb6P5J8viZ6J YocarZ2ZJoBdR5kJ1sNbSf5YAABEZX8KbZWU+vhGyx+rRpjcsM+31ZLxSEg4kmX6 W1YyVBfwtkYLhRwaC9s53ZuJUfQa4H5tynbgrYwh21vxw2C8NQgwPLh9mNkCAJar x++YD4ipkEidwlHGSaDQ/miYN/fXNbwOBVk/VOsPEyVrPLQa/dfsguOZV5ugIOJK fu9bdAQQtstfGeryekVWvvq6fi3bmetXwjiYuQIDAQABAoIBABZ5XBtQtm9/vK01 waP0tTAn0j/zTm4g5tRzEal7dNWPqkzBIOLLXikTVuRr9ZtscshotjyeZEvdckqZ IqdUeEflFu9R4pQHU2PrawHuzpMeuGtqkYrnK7UaGcMqEpL3uDq/jPQqYajWYN6a sgstYHBhzgJD41XomeGKCUgJS677P8X3+g+i0MxHx8fgnVLxURm6QCXeLFzkgqIq yt92rNkTNa9wyDdYcvAcx4b5RLOxXrkkklOVc/oJlteM/X07itbuYDZBQDOYpZPP EkTckkBSbcoPy6nGfcHDintuIWxKe3jbWRcRl1uCN9JNx9rn89DQzEAfeFMorROs 2DSDI7UCgYEAz/eEfIZ7EJ8VZ4CV+4oK6s6+UEKuoTW4kN3tk4BoBuUD/kaeAkMv q943rKeDtcOYOS2FxSu8XKX8s7Ad6IJV/l7RMtjRy0ihTsklGUnQYx9ptkBvWYnR M6SwLfOS4etG/+4VomUx/YGAsffZjInQbAqiZKhVeX7qFr7YSiBfkTsCgYEAwu65 vwTOWT9pazDHER8UC7tM6uxOCEGkHZMcGFlDftLaB30dEVL6pCeXW1J3XrFVFe6x IdZdPuTyywtpWfJrlMPGsjHE+R1x3AB1NaLEeVGIjh+Y0Zt8nDNwdK+4u8Kl8B5a p22ySmcPpoOPFv75q1MTyrs/C5VaRuyU8SKYXpsCgYEAkGZbzp8N2jerhAdrnJF4 DRvqVw5F9Ne5RJVj/bP+BzODN05PLmD6O8r7O13A/TdHfgQWyxYYHvh940JZMfU1 wn6RoU2dNhpDLtJJeSqgkALiwtIwvqoL4WDrl6x1g3p6/P+SdATx1gTSmD/xBT03 w50KrvuXBdpSreJrieS6lrsCgYBJSTUeIrFtjlCU0xbUUgnYS0ekvsirg/ougEM8 yDp+8Mi1rg0CmV7P3m6iD8P/Hs5tW3rOzOfroGnDenvWLDTUDjKiheGXAsHuw2FN k+8n6UZcoHZ0v28+znwF8paSSKDYQKE2dyBjppGUubtPGvdEuQwk2Pbf5Pu21HU+ nxIH5wKBgQCJbSa6TDgwwZmqOCfMHkBLXH+up9o30VcREGHu8uZyybq5q3va03LB Ket2oM88ot04aF0wAOIJqo35FB51Jxgi9GNq1LE1U0TlB0o12dz+yjFGWy0A/o1Q aCBedc455WFm1R/BeP5bjaKLM8suSiVUIrHGTTKhtobITgvrmIZOLw== -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 2097151 seed: uqWglk0zIPvAxqkiFARTyFE+okq4/QV3A0gEqWckgJY= zfec: required: 2 segmentSize: 131072 total: 3 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:MDMF:cplunzl5zzvjzx3hurccgxjaya:ew2dgbe6r2ivhbdn5nmueb3tsr7dziypwpxzqmzowwexv73n4aca format: kind: ssk params: format: mdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEpAIBAAKCAQEA5rZ0EoSYGWTYYnRw8LBLb7XKPgyB8kENRSRc8C4zkQjGlxay rBEdxXypt5KoQeBXEyfwoALfuuMA7lP11Tn6TnO00AXvMEuvqwspkdIptBddnkGG 0hfW6liNrH/cVbwOkz3cdxhf7jL3hTTw4DeJdGepV/ey2K1qHvbd4ckgwjwj2hy2 6PWzVVIXJe1m7crNmB6DG374rYlRgh6QnXBP09zOi7eo1nxGVPbu/jfIwpRGC6W5 tZCr+ShQenXofqbZ9po547ErafUaeRsiWMSMyzIT0uLvoKv3RAS93bkfqM1USLR5 7f4Bp2yQcNjlUUnarZ4M+Q+PWTQOZLeX9mLrnQIDAQABAoIBAEdxj58eZVVTx7gx U7oM9cdJla/CQslIgLn9CTStMfXDMHAgLMMg58W8lXfN2AHSXVSGxTpfuXWPjz2+ TT2y3wLFTOQwOkIL5gHDCqPn31cv9yMnKn9Lt5dJRdH5pDr+acsJ2IgeybIjIUgk PUVJnWypHyUpBL6ZcOfWzZ36IQVUcME0PMjykTav39K/uHXWF7Zk1wy8FFdtsh+7 gpq6hsKzZG68vbwySml1V+0utsiNArOTiudzUEY3NHrrjAYgKgFxX2C8tSUzWTEd Z2Yl/AMnfL2/jtQjjWoNGaG3WZdkPIBfrgvF45uy0QJ/Rm3K25E+EyQNkz2CdFi3 vMKui0cCgYEA7qtx0wbCw7wPML86BIp2y2tIo8kmGeKn2NiCsCZl1bW4lkfuRmdN BYTHqTUaB4PaHijsXrLcxuGqjGQ7/OUqwmEz2BOZZT0ehFAkkSeA/418ZoqwbH7K AylmDTtZ9JF0hXDVF7Wwwx3RG4O0OYROie6Y8RXvdKE0DFkcoiywzN8CgYEA93cZ Q9m6+OrbA+KsThqC/vimTGPPWUVATF/yPgwSBT5pwq+JEjIhmmpaVTMs1E7h3/ob mHegt+g51fJwc9M0h6DcjGJ0ek7Qlv/TrQtHCzljqQporuUgqIfaU+82Mxxemwdg JqfwT21KCRBvMNRtQwILRCM/S7SEGzVmzobrGwMCgYEAtkJIfu1XyF+RfhlZ9ePD Sh5Yb3MJXJUgtlDIpDn+ZFAMcP7nL+5s+/zk+AtsIDcJVyTLNJoETQBB3EojIUHk AGJ4U9bLumsNJd+JvStcsErcp/XbOk8sd3Oi0hHz5Pc68zgyEpQWMzpO2GMgOxgV XfHN67Vjkj4UYCWg3xufvCMCgYA7nmDi3NjT0VkUlY6nfnGi1erSqpUwz6NPAyqM UkIhK0k1ky61yIgZ+JdswViCicKXQF1XnTKGPBd6+N6ouPCF4HZiB/JB6S0Nw/KO VRI3nQrqlcxknmUA1UH/SLlJFQOh2+QJTBp0OENG7cOsAvGT3DE0qD0+ku3k1DfB d/W6WwKBgQDS8pkBm+pZ0EbZvpYTy804jUtErk2Dp5M6m+KEFE9NF06ncb8voRnQ sYg4P6NuweJnNlQSUIneNwQ3liuZTdLz3WDt99+DLlcToB6+jL6ppXs1h0dYpkHW WEQ2KDzRt+BEcdPe/sbkF80jhkZRPos5gMmy5rgcDiQ83aA4RYPoNA== -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 2097151 seed: uqWglk0zIPvAxqkiFARTyFE+okq4/QV3A0gEqWckgJY= zfec: required: 2 segmentSize: 131072 total: 3 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:CHK:6sf35smixwvpapf2zw7sllxet4:ks5zd3hkd7ppwobhnymezckszexpychngnkipxfvmlcng5fgjbea:2:3:2097153 format: kind: chk params: null sample: length: 2097153 seed: BTBX/ampNfLU+ox7xipBGiaSbgC0kcB8Gy7BkJB4oKI= zfec: required: 2 segmentSize: 131072 total: 3 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:SSK:lbjianktlydwc7qn5oco6sp2xi:mqnj4jq72xxuwrff2f4kyuv5xvu5yzzkhlgxumm44n3uwvxeelsq format: kind: ssk params: format: sdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEogIBAAKCAQEAuLx+Rysw67iIOHq3IvRgaYAn1OT6xugUvhXPc5gvPAS599bS rhPVP71CazMQHzCPTx8KcDskNnNb08xoeae5DfNVCs/E7PBtgIy08Xrdv/HEZ4xm ksJBLydt2jGvvFTbCHmNuywWguHHtUig1IrYZVCYMWUABGkiZ/j4UqL377jjY1BI Fb75Fud/3hiASG07BdL8hzDTLqmhdXDwQAeMklhaB4Qw1A+Ubb1it/SoOI2CUzhZ /fqAJcQe3YGbVtJm58VWy4dw/AzcGitya7MxXHXrjitWqnj+CVM0myf3vbvaX+8y YQlv5pATfhJbCmh0vj03oiaSZcj9IS2fooV9UwIDAQABAoIBABCL4YuiVLloR9s3 MpwQ42nPrsGk2MlkFCeKcJBb+y8XBUkrlqc844bX/tD3O+RvRwbBMwAma/Hslzb7 QghTe4HCX8WeIndOeaBf+fz/EkmU8BCORMm0WH5Ou8olVSY7O3sg2A8BvepvKqIU JUOkRAmfFGKoNz4t5IUHicZtDmQMNmrsb/jiEslKqcroE+7R4bQHAQtdLCwPgQSW AHh+Ft1wh8lGMHXi1EIiahVqTF7EifNJ3Aoy/O9LybXEMYwy+fDN9WtCNa7Xl5Ab K8QXgtTLg2kcAWfsBy5aM9vghN19bDV8OPgXYhFqK9AbFz7nGBO9VhLwVHU0NbpD VSm88sECgYEA3a4QFEE+2qn2Z30ar2HMrEAh6/w3KSi2SKMfLvnPl2M0B/YqLaTi YddrdlDcKJGUEx2GKNmJFUqzFX++BD5RE1olNjw6RW6D4RDpCwoxE089WN6gFiCA wdG5RXflHhF4HUfz3lrewelbP6VM1q6Rs2tA0+Zn+KTjPSIGTl9mAdECgYEA1VY6 O3K4p2TETW3n+T7+CcLITnSMp8n0hWOAX5ASetShbq99ACDQ0mzcE0/KWyV+2sNx JxvwBBgtsGsGsdsqGNyRHXQw9LsY4ZSF1Toda+236OwaMDs7WVVIuFutlD4tKTX3 u3UDEsjGWXXohZSE5ajtIjd/IOHtCT5YGaeDEeMCgYArlW5Z3R4TdbkZTbJyauMH trA0qmjZ8cQs8c1OuhTDaeCv9AkE4lcT73uUTn+Khly7iWF4JJTcF8yv3GaqhOoB yQZp7Ft0jS7mkCGRZxaQ+lJQZ6zHzOojsS0g6Fqml76q2xuqSuli7JNhJwm9Z6MD yIF9Z95nN1vqCAd/Xyg6EQKBgG1wsqbUj02wL9PY0evXGNNBDSjSOWXKAJp9FNnx OsmwUrBJbkKmkvmfxrZRdGmVrqHjKST6/AHdtXKPNPwAhnQCkp8dgA/L+1OdsZpV GcrIRFRE6ppbiHKngYqx3TXzP4+ok9GikVUNklNKXWJJcnOuWRf4iEsCG2tmhCOE /4QpAoGAEfdRRaGKv4w0qhxS+RdEaTrDYzEW3xnB56ky+E8UJEy+YgbGlt9Mz1aT Kr8SBUp84Qx6EDa+BA60VnkL51E575uNY1/xICufxwM1WTWKnplLHmExSzzYv8me hm/J9LA/rmqRTZFd1bqk+M7qAO6bDzKD7WNhAt+kbdaFOlkO+9k= -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 2097153 seed: BTBX/ampNfLU+ox7xipBGiaSbgC0kcB8Gy7BkJB4oKI= zfec: required: 2 segmentSize: 131072 total: 3 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:MDMF:khtxspsd2whqf6n3s52i7osu4i:w4z3xljy7asjjjev5zicdzf6xu6xvvr3vkyghu6w7bjitsthx3pq format: kind: ssk params: format: mdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEogIBAAKCAQEAtlZxFsekdTXUZFqXRPIXWmfAv4LevgoECO6qfY6ZN4XxMx5J ET5EOkkqBE86YqD2nZukuSzaY1I1PA4fHkwiVbdXw8CCEah7foRaxaiW/a3D6AOO L2JydZls+CYbimMxmbGFQF449dRtEMzQ5wSmPw+bfHulD6zHYLWgV94qSUWR5pry LpVtH1owGid+xQqJncxt1G/WYoIPDfEwHzLhTlwHYCEGLDEHk8UPNC3m0N8Skevc MPCPk+/68L7m79SBN806eeH1jkuMIOMYMPX8+5PzT1Z2wE7IgvjzXQ5bNL8fAk+t h2wvcClECFXTup9qWyFgu430AOX0eu8ITV1B/QIDAQABAoIBAB4YAT05wQ5pTn8r pnjGHg2ZPyo8jse9vnG89l2XqfkMfcUqk/OpG7ik967TZrb9iwZzOEopuXeYC1o4 mHE3LpmIE4+m17DTZmJ4tMSXsSf4RHOoFpEChhKbumzwWS5LddXAg1Ye1vbX0xJp Q3dFgKy6xjZS7+i44wU2pNqru73w2ct6ELmNp+2hZIaQQmOwRwXQeQFLi+Yyt2In g8tGnKaGTGHkNCxWIyMLc5KFgxPpxtFu7grJdkfsGQ40ykMPHQjSepAa7tDAvW1w rQX9MVkqvKFQUZCcSi2LUwWXZWnaMId5FPvWHSdy8SS3KWyD55Pg5K0MA1Jvp+iG I9LlFdECgYEA/Xt8lCn5mscVw3mF9s7c59hyIT3NMj6JSWKNbYVRlpcic1M+aOjJ 88TEZxukiZd+J12sd41J2krsvVWkfANMwzdIqnaxG+1/l9pqfFgo9skikirUd8RD SJSoUDqKzB5yRkD1eyk8vEBxioUbeXi+Km22A0wJ+BcY6/J9uufvhZUCgYEAuCYP Xn0RRbSZPr+8N6WlPiDdB/hJULTlsbV/hf5TzBRMeqOD6DerBASeikBwJN+cbOKr 69XBWy+DhO7CgV1GBuxaTo7RTl0nToDnYrWjOy4fHapNOGRQ07TEbdx1vKo4S1N2 FwipDdQiQw2For8xFwj4F0ZLR5pxtDaNPtb/4MkCgYBOS51QWqLJpyLWzSuO75iW WGnwUJmYIm7fZvyOTrbD0A0JGDZXy0fN7wJHYudwxIVn/WwvRUoBjlEPrmtvDsng JqxgUucj3DkkG4f2vnhwufHeujIEiG/L9HcEyQBkSic8AgaRM0yaTUGE6tZwr9X1 XwvwesU9h0zgXHdviwKV/QKBgFoLUkyTv1RkYOLMAo77Une0vh/diowKSJ7C7x5o JDWQX21Ac4mjXt5SG+viYnPFW8nqdMKW/TtHWnov/bAgGdPc0rPDJhm5dzTt1zbv NmgDv3dUBPpkIxnCNKK7wF6GpYw/vWi59WArsK00+XmBH9Hxss4+syTKIntKiXqG ywvxAoGAILJ2oWur4lXGipG+Mk9dceCSUNzF1+8eHBL6wcC+iNeEJx9vj6T7Sgv9 MlQYizRaU7AmTOa3jFqbDv/tO23apagwjQtIJVMj8DWQbp5p+HM5dU7kFwGQRscW iMVPpVMHqS4F4L7j3oQ07Td7xsLob0mM0FxVlgK1pVUTkcpQoRE= -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 2097153 seed: BTBX/ampNfLU+ox7xipBGiaSbgC0kcB8Gy7BkJB4oKI= zfec: required: 2 segmentSize: 131072 total: 3 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:CHK:yjow3adqfodddphsctxenji5ya:cgqbbbawmgeeacol6umzjofyxzqeegjnl2s3utqjchh7vabkmeja:2:3:4194304 format: kind: chk params: null sample: length: 4194304 seed: 2NVWOXvtwcAFf0J/g0fDC+ypQbnCU7awZoCxcnhpkVM= zfec: required: 2 segmentSize: 131072 total: 3 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:SSK:ymk5gokxdb3hzszs2nuhn6sqvu:kigmqblfazd6wpwi52p2t4lzb563naw7hrm5r2dtfmxheedra2qq format: kind: ssk params: format: sdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEowIBAAKCAQEArVHLgrQL5qjt1rxV30gfK7+AFv0fYYTcddOFF1g+03o751vD PTkhlPNGkK5w1gveH5nTq1FnZyH3qLQtWyxvY1pnDARDs2xuTbIQbbojeLjzgk5P 96nBujrI+TatjbDI4vuW+3tRQ/WJiE8dEV49r0F147kBgNC3al01TsFQZP7DR6Yw Cqkp2WYbfEj6NtrsVJIHykzDifzZ22pcm3SWFYk3N1VeWIBw1OSRh/0GtnoqiWdo yztfATT1d14uubxHIXL44hz+M8EnOZ0hwevJoFHwnUWzWP1LjrHxPhUwteTg/MCg nzJcVHWDVnxpq8G4+WTlQrstvTIqB0L37pA8aQIDAQABAoIBAANBotXlKsa+Acn5 ETv4D5iI0+VFWDjtgB7j8dTgdD27nDMv8YPNoP3lcZFNAGhlIgVB+fd2uL9NT2k8 tBAfI5tK8DNfmSNeiYFYM8rRUSf21vcUdZgs+QDWEz9AxxNUcPx4HFFunbZDKhRa OCztXQdVRDEadsFu1SNgdkdGhot/M0vQrl/0He9DomctxnvnnBKXgZo1PLHpsmdx s9pWv3rHT/k62FQ/d3TDfmSxc43K3OZRfyER5D70eOlpomJJdJURUNOTmTBpPlLk F8mzaB85ojihNtCcQjQvEskktQhk+Ejbik6YAXv9+jSf4ncchymG6LYX+S/G7yL/ ScmmJgkCgYEAyaMVMCAT+lLom01LjJoMPxXFFZV6CVTZGuDwxhkt5URZjXWLIL51 ZdhdBhoBd8hGm0Lkb9yZ8kXwpt7FhZLjt1up0yKMRhCw7r+zbhb5o4AZ4+JwzGcw DSyp+DzinJgun0Xup0eUIo2ov4TaPYfz+OUxjj9yUtnWI7KOlziupOcCgYEA3Aw/ C7RJZJm1AtsZFI6mfI3A2ObA7+KW2/yIJhfXtRLbpt9H6CIxotj8+mlw52C9U241 VLVQWHVC8t1eWeMaN6afCLu8FNxJLBzS1GvFjjlbXrplBric1yuSX82aogZJltoz TvicIWICOYGkBl8O1j51rmbMQwfs88W3Zwermi8CgYEAwcEq9/6rE8ydZbZFlYrl n60Mn+vtw4+7uz9RPhot5vPh1bOQiFtbtgzNfrJ4nKBfcIw7tF3XtF2OnNrOFMeM d8HmE1NMVXtueUzOX0hGg9zxg/AwkcnJ+67ieP4Qh4cYrcXmSOnYJ8fV0osXpy6/ uniKQPUopwJZ6h2HNTqrXxsCgYACsVusJv6m7oKakFfUOpKq/4kWnmxKAznZY1O/ M5d+LcbmWeElZBW7anBeGCA7lKF8feLFMJrVGkpBcpgO/Yp6l91mW/XHQ5LZqVij JNZ8EROfKyTFWkkBERVverKjvPP1lqH+G2i9t9dTINUDBvLFiGokQjnJsDUkHo1K A3wEHQKBgF/7DAw9FaDTbdBS+FhWErCYHHZF1ZQxUmcb38VuSCvf8fkmnEpN0hOV niDZyim8rB+ggGf1symleU3LqU6et1oaP22ORbiAG9uliHaKsDU5xC5YHY/LGI4/ xQe/x0dlLCDV+ZIdmNdXeG9AiUntkxkCzS+xLZDXuQxDgWn2YN+2 -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 4194304 seed: 2NVWOXvtwcAFf0J/g0fDC+ypQbnCU7awZoCxcnhpkVM= zfec: required: 2 segmentSize: 131072 total: 3 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:MDMF:6rqx6odplnpm3q6mlfvyymo3ku:3t3xbyqbuz6ykrfnjmdccldcobpsx3giwhz4nsyeeiswb5tlf6lq format: kind: ssk params: format: mdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEogIBAAKCAQEAqzmFksZy6jN3ReJ5UycelXqDDZkZZ2F2S/wp1LNMo6Tepcw7 g8X9/kNE0+5RTY61VFHSibeYwyV+LCJMAhIH4eDKz9puD1zSCgRF62/qofsqgu0p v4zvAoccEOfp1zIXIJ5YqvgsOLR3JLxlCQmbXOrhasa7OE6qzpedghx9FocQyPMH erzwWXknLxnoF8guH6EAsnBCPOP+Y8CSCgmnWgCE0XgF6cBFKGh/cWicM9lqZS6W 4sPRYYqXDzyYArYqHCSJ8iqxzx2/O1rIgo8Di0xZ8IC41V/Har/y4oBZjMDkaHo4 ov/gCsVr80Tu0maK5/NaUT2XGZXqra4FG7fqNwIDAQABAoIBAAjmuaHyvSCdwlKY vnPrMbTVpKB9WAu+zlaO6mHLXG2ZcZWu810bWuPv/VEDL6jXhWe3xTkxmThz54ZF 1iu5Yj2E4SZDFbuouKaaqEPgEpOPKhuaVrRFkFtSSMw8MjTkvr0MXlGtCyd7gkIf pST+IdyHvWY+pJb9x/Vrfl24O2yDTT6TfyhyMXqUIgCo+1Ntn25V4lFrn92AJPRU lfrBLf0dCMSiOJHfdyllN2UQkD/juqos6QBlRPi8BVAq97a79EvYMBD92Z7T1Hkk 8BKggbHmmoMlvJfPWdSrncMujLnPcEia/9fIxUSc5rJHR6o/l3hsLEwg0SLxS/Lq 9YlkMnECgYEAzBhyN9KtPRiNFhkU36SKCfYgkTm55A5RNi/vnRYuRBnZ2ReORy4G /16yBxTy6RpxbddYC6XN9Ame5NJX2sv55LUWGVWheJDWpCrTT4dGHbkb2enKFPZ5 9d2UPior+xO3F/sP0uNC8LDKrWgB7Ji86Ed8xitO/LovvTUfRtBMI68CgYEA1sUJ IlBsIJk9eWUkhHl7sObBFNScuVEPx6QIr9+1KTiPbI8VnHZxSVR3hONpzdCZAsw8 WDILeii71C/MU+QIKN1dV81Oy1aqCq+kwrnes5jpKWjAbYD6SJIYqcPNbPKZ++mG UM0kUqgv2Dpp3Co9YWecBOjpBdcQTSnkvmsmW/kCgYA2V8QBzRzHicP3QFJogf0n Tdu6D27JpG5HSVg5sXA8Pc3dmgIOPdkrIeGxNQjAvIO7RX1yDIHcGruuHbu6zFkL ZpQtxrkpyxb7u1Nsd45Z17Hswe1Gy6IJrygLrVrsjYFQ5059TnnCcLBmn6zzfG/A QVidw2ZSsJiJfp2HU2sSjQKBgC5J508TAEsCXCKG7xjySft1sJW5wVGbrAf+TbUC RTxuKVNff2vqhz4jy2LD1PD8DY5x0Gu91YVttBXme2Z1VmDgXRbodBwVQK7u7lbd 0qboxRAcuKShUNBFVLV6MxNRMmj+CuntXO/HuhAjft9p5zLQLutL+7U7hhLrfZag 53KZAoGACJa7CP/07PoJg90wbxAqLxxQhxTNB226nSFhhQI04CqTMJGJm7i0okhR qjTG/lfoWNB5K7y0nLhS91cOaLKw2SM7Z9Ws/fMdC7WtNTsbHyjqXPFyrwMsnjKe eih8ic/0hKnIkV1SOjU+rgi4fVeFISzZ2qtjuc+CYRbXPKayMhI= -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 4194304 seed: 2NVWOXvtwcAFf0J/g0fDC+ypQbnCU7awZoCxcnhpkVM= zfec: required: 2 segmentSize: 131072 total: 3 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:CHK:dnmjqjku5vyrkeaulae4bt6juu:d47airwaqedeji2omacwsp6yfqp5mpurj776gmzvq43c6dovtdca:2:3:8388607 format: kind: chk params: null sample: length: 8388607 seed: w6uP8Tcg6K2QR905Rms8iXTlksL6OD1KOWBxTK7wxPI= zfec: required: 2 segmentSize: 131072 total: 3 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:SSK:cqstfgln3ty443es7iodypwzky:bbaqquydugrdq7yuoxb44oathv6x342yp6o2j3eofdqac6rycdfa format: kind: ssk params: format: sdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEpAIBAAKCAQEAy8EGzJwnWVqWP6vX67QnnsvA1zli+zGdsr3x7TQOjyQDo0cb 2q8WgMqjZxIr+74v11UVbv2T/1rncbdQo3EVmkGM7ZdHIL+kbQMRGVQyZMIHIqpf FNA0mGKkLPUUVm7KL9NhC1fs7FZSs8n18Xm7SBpZj5DgihoFvJ1I9ulhUhLrme9l EEeYUl+MFZ8g959r2dzXoszOllains88+DzAM+X0dg/muYVwQVmSl80GIy+DNyjz Y05f+vmcje1LhdwGxndRJ4QHfgO0O7NdFLeD130ZnuQMo8Et+PHQRvWbJyMDTfTg zWqJVxELEVR0ylZThQA+Ubs6xlU50kliJXSICwIDAQABAoIBAAPKeEPqS9kfvwKO 7JotZJH8cdQPSfDT1X6ehhfHC4D3nJG3IE+LSRHls0XsUqErwrFw8HsmK1C+8jtt Zl9Zb/DKHhh1jhSP5uaYU1hzS+oP/239BCkw+SmJ/EqnLKpcWVVWdiHDOwc9UUCY a1kDeMvjTEidhhqhAqZQZCvOinRVGUcIm5jYGDfeB7WS762NEwA6r5Dlm9S+HkYr iAoR0bOkQuGDDL9W/7itJLizj8lK3Xuy8XUI1yHVfKCRDqpYES7mCAYWsGj+EPxh 3OwVyRvQvjtdyDpshSHDLuP8ggIEocQzkW21XpIbWsuojBLak/I2EHYd2lSB6nit kxdNwwECgYEA72Q78fCx0yIJ4vhcqUkWUxdnw5hLQB7J3+29OUVVUT6byabMZLPU SJ6Yy8hAy0OR5GR1giplB74KWDceX3wH7OULwuaEj2ImiIqXplkVAFe7mh28Rwh5 jv5phhggBfvjZLub/LGkoGkBFu3E1ZBa5gwy7L10gE27HWRYgfheIUsCgYEA2ePY IPAPmT9CS0eIv4XJgXYZ3NdzjAOHFpyJOPhbg6WEJNayTrZiRTys40GzmSwKffPI nVEQOHbAR/eOjvIpBT4adgOuvkycylIIQ7DZRShOtwxrdg/a9bLxaHNDvPL8wqXx QMdYZZxWTsPSTXiwzBBiYerZmaSj19xdielxvEECgYEAj//dRzWf4f7xr4PySSpb sXO8yR1M9q8OhBK/5jlcjth4YZ5iCJlbsqskAkDdKOfmVFpRjRDvYO7hzhqpvIoh QlCs+Hotdwp1X2Duw/OF/ITJpnUIkjn41RkYZL8SVEcmi6uGs0QwYQWI0EAKTOTe qM7huyJjd+JKEe4Qh23dQW0CgYEAkKMepT62HBRR/YbOz9QPn1C2elLK8PamhewD az3yAcGtpoaedoG7Whqc6X6DqfoCPPnHAib9jX3Gxf8fMuStNj2zcwOey9QvgF5T /hs3HyFSn1AvRX/g6ZiPh7Z8EMF75/of29B4bXsKD98Niz/CnLODm1w6djNET9aI gTPlvAECgYBSUMCKSAZvVLMQghSrmVUFas74O6/9eOz2/T3YyG0hgr2RehxZ5iBx c8GPRbhAvd48ba8DS/hqTeFRX7+H8RT+hroM+dv1NT4blEuGJDClo+ioC4cJZTI8 thRcwEKwbjeyXjg0JZ/z6oAH77Fhg44r8nC0zDwonRkQegf+iJVaNQ== -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 8388607 seed: w6uP8Tcg6K2QR905Rms8iXTlksL6OD1KOWBxTK7wxPI= zfec: required: 2 segmentSize: 131072 total: 3 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:MDMF:nyipqmlsbqj5k2vi2jbi7m2ymy:dsdawym74j7fgry32phhntgqsyc2k7wgf6hbs737dsrrriu6px3a format: kind: ssk params: format: mdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEpAIBAAKCAQEAuzWxE9RCah2cgZfL7Ki09SgQ6djyb/xW4AuE3a7LjthXWmFp TL5LBTkGD6G1KYBw4BKgrj4xl8qpMheIvLP1rV2wCcfx8z+jr77YLrh4VoPINNND BX2+GlwSSYU8R6pWFnh7BR/RSMNQ9sXtTDRxnJ6xshIOAukOvREFRZGaO/sjLlL9 rg8R3oPvlSXtn7fdAXnxwoNnMeX9XEHoxM67MJ3kmBWb74kdVYcD3CimBqUQqkDs ZFWmlhVGhtvB38NDlBH3G8KG1ebN9icONcrRTYB8y50FieochydTSBqD7jx2I8ca LMl98BfCVsGav3yeBXetiWC/6FdyvWOjass/cwIDAQABAoIBAAzSVhDmGjBbW9My WsiYG2CpAFOLxLrvvOF2WIC4Tn+3iHALuOMFK20tpSEf8aDoh5KJJBEa+FmNiz3/ h6Fo79wSTRK2a3c99g980iCNCMzgFK+tgmsXXBRBFw2K/wBnhaLfWImWzsYdfmeQ UbrE4r0Xz1LDUstXO+euCT7lBHu0DuLrAFaA0Js8aklPm+eeOXyU+j/jqCOZVWRh dODy0KpHS2PcTCNzimKnfzC3DNsCedkIhMvqgIoXUkr/xqQ0UmZIVO06yPCfarZ2 wbXFip6A3gsux6O5899NKpJ+C3Mr/h6LbNb2p12YGe5xJ78kFR5IkSlkirDzNpJh ENybGnECgYEAxQdb6ELI0VyeCGIcJRP1ujtN4+FaslKssvzKU8yglb68ubi1zrLM nSGtQG+VexfaqLspch8JpVsY/Mp56d0Els8J1fMjvgbwuoojSKpY3s0Xu65rf7Zk /Mw8Ro9lEtCPPnWMYfWNJ6xYQPAzU4pn900BtznycuY6EHkClE2R7CMCgYEA8z38 LHX1ry4kNlJxFt68gw9DlzKyBAzLyNXD/lir/JpKeOpmVQ0S3E1orCugkRtfchXp UVogQwO6E9HhXYBdn9+19Y1cITeoD9XmuymsYq+pxWrT4ctUjaZiLHsTfsq5F7Xl 4xy3Bzqf2YuQ+f560GRaRmOjVpcuJpqFqVrELHECgYEAjHa8nQ7PoAKJX6yiKATc 0FHrK6TDRhIOsOPrUma1rUv3u+flJWDu4q7ZlvB1/vV4m4Yi/AsIk2womj+3PnSl Cua7Ol5GgvjrsfE9Sla3WM+aNeEZHkloIZlw91TPV+R72qlu1X97jGcf29vim5I2 oGWz7W5QXH2ps4ixwAy1FUUCgYEA8tlE5sLSipa/srh2jgXNIfBgZBlKH78Cyj2a E1tGQslsZvJnPqzx0p86TQK1qYoxrb5wljcsFJwo8FbP8UESuGZqzYDXpZZipYTC esRtho2pKx+v8TPG9DFUvOIYIbOWPjTuEuR9W6tNIq40DVPkHCDE/JfH4NDJU+Nc ZSvoxhECgYA1yJCfzoM9MAv4yDu8bLfqS8hUopUHrH+IREbSLverA+zQmh5+5hcF W/jOQdK/ow2wmCGSBo3TQla0Y6Zm0mA97QQOyo/N/U2aWWnV4FHa91uhI05vljAP /SQ8XCPJD4OYbGIi6lbdwVzSpZVAvPjiQ3nWEAiU5X9tRzBbHv9Gvg== -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 8388607 seed: w6uP8Tcg6K2QR905Rms8iXTlksL6OD1KOWBxTK7wxPI= zfec: required: 2 segmentSize: 131072 total: 3 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:CHK:bfjedwcwjehjzpwjsgwkfk7rja:jpwea2sgz4hfohqab642yj4cmrh64w6nfo7lv3mtacfzicurqkva:2:3:8388609 format: kind: chk params: null sample: length: 8388609 seed: yPi3JHKKbWaEEG5eZOlM6BHJll0Z3UTdBzz4bPQ7wjg= zfec: required: 2 segmentSize: 131072 total: 3 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:SSK:cq5nwf6ie35evzc5v2wkaucyti:us5gy5xdddvyhdg5ysritrzcphfquycmpemoviqbfou7heowq35q format: kind: ssk params: format: sdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEogIBAAKCAQEAt8fnhPTCcY+kGOiWih61BQKwwcz3MysYGYWkvIvoH9BqiT/3 MoSzJsx+qcEatHIHAuxqH8r0nAODgQ4Bl+4xGlU/+Px+l1KmR2qDl8ThwLFQDhYK kUtlb1kLgED7NrLLrplw1ElI+wdHwActeOfpDAZlxbZGohyq0L4n5ovS5DxjNEaR aFA36mDDYmnSkqSIdEGepVD5iBx5F34JckySpuLVBTEytSLKCp1S4I/uEG19KigT 5oeDlgp/xXlkhdF3wM4z8zJjCC9Gla1KATJa/5Js5oX9xMHi4ZNpfZENSR5zDw4w c/GLMxc5m/zNK6+lmBiUoEixt7kNaDxnf/XkXQIDAQABAoIBABdw9tmpW4zfGLv5 nMwAxy+crH2HvocnCcOlnYHUKZc3PwODJm1p6iz6e/R8lkKqYbUQgSfNfB5TP2iM aA0gO2Cju41vbVkxWFa3IhJPcUkiBLdLPe1S690EQ1iIUVKkgyDh3veg4l9sig0X DUiE8h+PyFbr3T4LwIjwHEhGkO9+AZujL8/2ikzcqFOwv7AoITQYVM6c1amvkoom Ag/p42iyL8ywof1oblSxBUO8kDTFxhbazaVQARwWQHIiX1AGdg5za6sq5OF0QPRd ib/hoUNJeRehVFUMoNSYCNdYNHYLHboTFnemNZvPLSBGJ+A8iBJAoPPgO9LSEyyU diFMkGkCgYEAv8uKz28ps1feQfDHF920ONHxIK1JnvOTMJzCWUOcXZv/9hVlt0JB 7fLB/AJt5GkdbuHx6dLkju6sZGPxgENtgyq07bf448x3ouJ3Xt2RlfFHbo7AFK0e cVUkfskr/ZpfCMi0tg8GYvXF68MJj/tWx8/4yMz/zSWNFAlzao6LH9UCgYEA9U2P bBjKVEudYXf5AMOJeEIwnVxnmVhUFL1i8Fnulm06HvQxCzFEadvtvwO+1J/JjGYH L5vMGjhLvR8YkoCFCv+puFI4zH8cVCvrF09o5+D0E5Xrt8pKB9Uvv2HKbXC674Zi SAwpoaaavJCsvHnvAJ+/OuS+KBbZV0ye14rPfmkCgYBMLV3u0eowL2A5tJZ/JjGk t84b+nfZSElX74tJxQ7gJ0vcw9bomMpy5g6iN5zKMe3c0qUxB/B7zNRv8zpChYWD qXy/Rmj2oYmLCoP7C+n9Mh37DXvBOplyzix2pxRv39aLOJx+Cy2wNInuAENWCrAH INVhe/rF0npcUPykgAVGGQKBgDUy5ejWk4Kmh3Is96aPwY+AI1TtRlZ+TnXVANEJ X/HlrFYsNTqtK54doTjs0gUAxlAZjHNpwWDqVpqkVMro7nGNMryTsFfBNV6Xy7tZ cHHhWm2o9N7+EwIR3PIPfjwv14q8xTHE2X6CSEqewad6djfXbTyTgR3mnqoNJuGt 7AQJAoGAYtfnWnH7kqQbesu1b0adJZZsIZhestP6CVxk1EmsPjO9RUBjl90f9arb H5MnbPNIkOKgLLojpyholHKlBRAeO+B7gh6zF+PuBeNp9oADegovCZOhOlcG7jn7 VOaPPT/jxE2ah9k5mdZJQNt1tCLbAB9Mw1Emb5Ku1lhewvWqWog= -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 8388609 seed: yPi3JHKKbWaEEG5eZOlM6BHJll0Z3UTdBzz4bPQ7wjg= zfec: required: 2 segmentSize: 131072 total: 3 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:MDMF:eo7strdhhosajkhzp7x5lqukue:fmqoufvqqrgeg5nxxzvvypblc2c7r7tqx2xn5l4gzw7uyuitqhyq format: kind: ssk params: format: mdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEowIBAAKCAQEA52AqcL8qR3Pk9UWi2qJWey4+VnwOLBN0MA/3tN/VxWszA+D6 kCV5Gr1eepPAHYELRdepXeyGEg6x80gFlJ8r+wrVGf6RA3FffB4NmbA3ZLjqStQ/ LYptnXoJT27VqboqLXgXUZemGuuW/jN+NfMJhmi6PDpKLhCH8smVer/w2hsVT6Y0 ltVxkmvQMLA7DHqrgaqR2CADWYloyx1R1n7uSf93TK7Y8R5U3FihsXimfQ2o/DnM o5nB0u9QddlplGFeDksVbYSR5TSp79OrLi6MTF+2o2A8PoM9gXy+0lhl09At+V19 +r/VzrjkRXzNSz7HrSguoU2Ys7aY2fbwhlyPCwIDAQABAoIBAB5t+tbuMADKtgt9 7OT6jGDR8aKNLo35waz1L1DrTqGSr4MXP8zdE3gtUfN8DWcJSt1qIh0MrOCgwc2V pMo+7tlVkODve8yV81wPAHSR2jnVygEtDeFkPyCtZUkRnL8gO/N+mPktoSERkpT8 WmEnsBKBy9snmAxGjKKlLr1rPfa+cMDADzUC3vFwO1f+3RRM7PrMT23GsBuV8cbK uiTXWMMjNNbndNVVDFG9T8TLIAC6BKzzkXSWfT6oON1nkn4UZW7WVAuIikoBNb9Q +nkYMkwJ2BmCaMnKjMshBU3MuZFWrzF+T6LbhpeKRfm3DGpDh42peU+qQi/rFI18 ZxMUhs0CgYEA729qc5i5/6ENSPqThf0RMsV6FO1g8ygn/2ZZMy6Sz3Jxp8pv9t8a z2YcmxBiLLYj4mVSEk9zobL64Y28W0eFP3xd9MqVe/l+HuRFOrR/iJaz9T9HrFyR cYeekAL8z0VmQ6kA0Qd89ILC9h2UNN+RszYP7m0XuLI/PJ5CvvmJTLUCgYEA92IC OUzkdF/GA4dyor1T1z4nSKKg1LFzY9Zg0gtkYLR6Sud+TFAkc1hvbv3J3gxBTF6n V9/UoVc4TMf5Qk67/Lu+KdVGZxVBVuilUg3brvXG4ON5dBncpuTH1GecRsMTkqP0 v0BIG+Z4djzJr/go4VLpsJ+94ulEQplc9tJDhL8CgYBkFuslD38RQT3QeA8bP8Lk unBiNykD/JFbzmkTYDC2z1x7i8BqLrGCaWkj2SFxF2LAzSIVzWjE+5CsoRdQAQHO nCqaneUHQjBasYnPFI0LiBQKPT20661RDCRYhycvbg9l0UwqFTtC6zacs5i00ZCS ndLjFG+KIdkVegLk2mNu0QKBgB7vB34oykxvCXC5iDEnYYuBvyHLDDdsdRRf4z2A pS2eg8hICDf8sYIm5dBINezpNWUaVOydFZaTNHwNaXLMK5+fzlimzaXoN4Jplvqa twS6wQKwDyjgbwIDi6VYy2bhz9m/XMRpglrSx+9pDINPkbUTTBuE7haouptlWAWZ J047AoGBAK7a1AGzFE18nbWjTKeTQI1GWVb1A+8nUQMkyntaRBbTbKp0w9ECngQD 9HMeoTvl58e2VAjkQq+cLxMRgAb1bvNN2BCcO15RtUq1XrVbSIxYPwoZgIbKSwc7 LIRZ508gOcjxs64Ajkn8r6mmk+nYyjhuoRUFplDW2pc+A46X9Xq2 -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 8388609 seed: yPi3JHKKbWaEEG5eZOlM6BHJll0Z3UTdBzz4bPQ7wjg= zfec: required: 2 segmentSize: 131072 total: 3 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:CHK:hah7mxwfpqemm7icdh3hwsa5fa:6epvxt2uxh42obpnfn4wkrplqml7voh7aqpnqnapu7ffcyn2hk3q:3:10:56 format: kind: chk params: null sample: length: 56 seed: YQ== zfec: required: 3 segmentSize: 131072 total: 10 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:SSK:zphopulx653rikxed247gbiryu:on4kfty6f4pskitmrm6zbbqufh7fcnqoklpozwm4mlhxnmc6guoa format: kind: ssk params: format: sdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEpAIBAAKCAQEA14mWsmxQm2A5nZu2dQ7rdAaZgb9ohE1EcCIjycMMU3wH36UL DhhwebvfmqkQEaiwomYzisZZER3IECSwqbLegy6tAoqrj+aQLszjcSWVi25FzzgN IGMtej2PgXfoJO7G2rYLvo44rCYUmBFU2+jKcV+1UF7r3wCTzhxzp1o2aQFBPBg9 moMvlX6nPshjoXUTVWjlomiKefoO1pT2FR8cKSfgKSeuyNGXR7RzrdP7Tj2hNhf6 AsLbBY8RVsqD/Xec/pOm0RtlHdgXmbcSON0hL9TczYH7YLU+4E7tGR5gXFUj2zy9 zwEk7vJlMgYGs5OdoZhNcr2T3BNnOLnuc6DOQwIDAQABAoIBAANtReqZC529JZfL +UWlqPiOx8hU6DM3sJUbkKG1poHCQCPL/imA+HK+VzoPxrbpKVOFER111q/bkUBH PQ8Jes49cM5x3Wi8fq/L+arlUn5LFjcvGqGg0r43IS0foynfgkN7dk/cnlwB+jSr ybK3YgjHjbWss0khn5aV9iHn69K2467H0QNG3Ppufx3Gshxjv2p5MdWX18oVy4F5 tZEj+o4nTegU2NPX0BNuroP4rVUke0pX7T2M8tYWaZsA+tl3KqCZafO3A6lpr8r+ imVvLpOFjnTJ1K7Ruhs6A8kVpd2VJsNXBE0l/r+I53gm3HWNkEK3pZLGZK1I0MD9 v2QJx7ECgYEA5f7B9OgcEhDXvsvm5pOwoaPDxopOWaHt5Os3olT4FBfCrMf/C6Gw 5KEI/lc8WcKdTX+suBBkD/+M/6ua2VxHCnatWpXBRo1WHEgoGFGlvFcQ+OGPlm2d w2LPtFov7YYBDNhW0B6ETS3Pe9vGr2spr0V6j8fhP3BirvmKwap2gPECgYEA7+hZ 4DboBwIWI4iQPJqr19b/5B9t6c4HRpfLWizjurltDENpPkHXTjpWvcnxYEpgyROB 7PSavd1yMYmmf1EY68MNRkmnITMRDIPQyc7mop/O3xwyS8BPZ6+8nnOfn0bLmdNT kkkVEM4cEZFSwUUFoqQHaHtDsgwzRBcGXHmTAnMCgYEAtbWbA6VGWDeaXJG4Mb/J s0sxZ/DpigNXcp8r60L6ZNWI5v1z0XrDyT45XskJU1lg8lPG3/2DMOiUO4MW6lfv gKLWv1TFyLntqJaRpvUK3kxjil6bFRwxoqa0tybx6tUOi1l47SDPIjLpVFAFH56o 5mMcO/CNU5O1Q8zABdZpneECgYEA2mTeTGovVxHjLX3IMCNthBNI51ZlLI5NuUm9 6N0sgnMCfkNvryko4yHgjO0lOs76xJFpmVgi9ex9Y/M3Cne9BAKQNwgdiO9/+bCV hOFAu5JXNGvqrWLn5i/ouSXwjYJZHjNuxKCa+K1oh+WPPDmlI6XGyKpNueu5T6bW N6DE31kCgYB2l7HnVZoOtjgsu1S4PIok8o5yiJhImn4b7fXfWbT5k0gnBqpsL/0C uMk4uSMDC1zWRcQJSAZt/ulYvXSttth411Ljx4rzOsh8990/Br7xSngbVqUaj7t8 F9DHEhPnutBBMj4ky9BvqY6FSHRylmDnbx6ByalFkoKzHr2kIWzkNw== -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 56 seed: YQ== zfec: required: 3 segmentSize: 131072 total: 10 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:MDMF:mnsqs7br4xbc6ghtpfl5ewzjzm:jlens2myp4td3bcjogvs5gl5cbvnxl3fycfwqyi5qqxp6ddk7etq format: kind: ssk params: format: mdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEowIBAAKCAQEAi5KYNdtP7AeY7JsnuEUiz/VridJtZnGytDRMoVNgpVUZ7+uR H9RCE0FiwFBBk4hLw6sXCXMHyBNDlE5omYzuX7El9+/Nfwc6oW5APwb79cVcMYMU mMXSsTeyQ29kvLt8WaoOGWdF1SurdDajd3AQndcbNQmYy4WMupLvGE4ZCaogUGYF qxFqGwHLJvIigHDxERWC0PKQ6Z8PlxeQLC6GLVJDZCJRwNHfGXxkGN1Gue+JDh6b xmUi8/JftJwrU2ayamn64xnwnfodzT6bH6wcxn29pjy4BdZeKA+z6WA7jJ7JHLp7 nTEfjaPy48Ski45+O+if5ThvjFSah3YXU+PPDQIDAQABAoIBACzflFVYbgEuTiHg HmyVucQPnSQCBg9WRcS/PdXuVxfA3SZwX8fSd+316zh2dSboPqeppa3xkFJosyUG 8oVPtMIKU/E7ZZ/OJLELH9fDuJVDf0kh4ijeDUfR5tvcgBBX3Pp8/Kx5Mg//ys+B 05uOaaE9q+8o5zmj9eN0Yy+2yED9OntDYiBg0aYuB3FpbKEkXPkt502NHoIM4wO0 retY0PPZsMfkIxYy5LC8buJhnXDApBWYXdQYbs9tcXAfUM/Vwbl2ds0Amb/PTnoA gf3PNPGR0B+jNR5kjbGUJlGIdxRKKHbAb1y99qTAU3XreaCS+1C6BnTAYI3HbtNL 1v5e7xcCgYEAvXV3a1I5/PJ2X/vgLsDb5rhitMNyvxhepObYvMBpl9/GS1zmkxrK aBgC/4YaLEHaIH1gJO9wBplPHtuDZRgr9Eb6X81qBqzhpWE3hWBwi/+cF5blbHlB IQem6zOraUMMJiIKUCbShgqht5DLgHupEett9N54A8+OxaWHh4MH4bMCgYEAvJfI SL2QJmFg8RtsRb1e/CxgvXNjNNRHNrYCEeBJuSuZShYUz7JRnaJVpaMafHId/PV1 nZj/OG8FlRZMORYsDZOU1EktwdrfhXX/Jrp5QjIGw3yws5kNcl1rvmR55OKuJxDQ YJEHLx1EgXYWKnhv1WzD/T1tYU4TvwprMJP1rD8CgYEAk3FmdYwxesxLGZnQtzH6 MQ1QK/NrSpKxnU3WYNaxlrNdA+uRuewAl5AQTUHU/pplIiHQgA4jNc98Bry4/iUY l+vhEEuxdu52URledxs9m4ZauPUDKS8YY5cr7SFyBeJbAxY8xnHgJtcBUfWKmjwi sMJy+T1lUznll6Wh2vE7YgcCgYBqr6p7i9EKBThj7NF5OkGLgkdPpQDQF+4ZQyk0 l57dA4753Df1rriA5h5xTy1ijOPt/6WDe9OVRyjvR+fiu2o8W+prlOIvsfOUekXW 0NJb4hT1bYpAbyquMa8Ly6cxFhLSwq4+koxv2KyyV+z+JZeOMrNEhQVlcFe3UNuG ZY0q3QKBgHPo5ViFrazcRe8YvW3e+8EVGhOiI6iaBHwWtv/G2h3Sb2ePjf1FPdnv dW199YvKZ5BU0wUjGvnGT/EIY/V6KK98n0SodFm+yI/tI8uREVjyLkA1BwWspuZe SYapkc7QNeQdyIq+Y9ZV/ks7Kq6XgjWXlivXE6URhBCr9xcn4jKb -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 56 seed: YQ== zfec: required: 3 segmentSize: 131072 total: 10 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:CHK:dx7tvyr2fc4u7lxjc6kehq2svq:tiy4qh2g6lqejxcaym3rr7ymkdkinn4qised6kgxloj7sptsqu4a:3:10:1024 format: kind: chk params: null sample: length: 1024 seed: YQ== zfec: required: 3 segmentSize: 131072 total: 10 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:SSK:fo2s7nih4drxob5yzbnmv3v5lu:cbyn47mjceyddva5v3fwwfhmezre57nsrm5q37pxqyg6lt76vmsq format: kind: ssk params: format: sdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEogIBAAKCAQEAnZs6x26D5CRQj7bukVe0NoSa86YeTbow12aYJ50262ASxppU 2i0am5SA+XcTLinpy0bVuSN0b9kXqkSnAPWZ4fwbFCsgXsLvNwTiGBo+Bkgwlarl 8VVMz+FJLYpGxviWNlctkZ3MFEkHUs5SRnXLAE6fz2qhzdXWW/y2D3h34pqG4ETF 1KbU/X7OSmLzgo5hw6OsyxZYZYp/oyesie/y21TeE+6LyClb3O4mI80+vco6A9vp xncn1MJHDLpfY6z1sLOfC7NQ4N7J2BUvabJ6HSJVEOL4LbfoBJwo+/tKDQV48MfU C9NPhbV//G6T2JEg1sHcMNQuOSwQVe+hEmY8WwIDAQABAoIBAAlAGuSEjzCfXZn6 8vrVdh6lDLIPuIzBEb6FFQduMxvb2SLXk4UZlJ3uyt1rzTAcKs6+tCrqhPWYNBQ8 4r0HZCY6kPjhyWhPlv7GRlAVjsuLmJVHISdXnFkc5xnJdMRODJeP6TPvERMTJ18r /F+u6IVSGA+7p1ePeKG09yl4uO5YtHVHEynjrRbfzk+2c/6ePkWSeCpVDndCQ6eU me64LjZEPMFMRlGc6GhfqcT+0GPvyWIBocHZd23oAL8cJiMdXykWhDlCX6ZkkcHr N29MILURnGBYFOuozrYPe8zrZTGUhWpWGjZQLSYleyBM5BjCX0WR6cFBrhcdFLW7 Fgt8ORECgYEAu16bOPlKglvktH4ro4Ren1JpCUCW7SE8bV7hOG26MIWs9UACMYPK GdbK2Zkj9rVD77IQ5zWNNmmobe5jTqNpyOxMETsV6uRiGvhfzt1AqQOd2YR8vUKd ujdHaaU6owcWFOVDDZrBsgftMLcHmxFQDCAtH+ohfidA/OkVRbgs+OcCgYEA11XE wB1JfWDo/dIPUwCHUmE1AkJBEo87mp16F8e37nBXmQ1usD3h498ACRbgrNbdSqM0 gJ0PIKLysM781PqE7gGAn6WeudlLZ6/b2YHh31HDD2THio0+MBVKR4Rdv1rHxe9B nYz1A6gpxbYpFt1MrZRLRKGqa/F1J/bqfx6rbm0CgYBiUhS14urcWQg8RnDzz0Qv 6ni/qCsKqAQjiEQ67iljyOGnmD0Oao+k23d6k8excBEEOLZx/UHqqar+dLebzlh2 XLjV2eF4bvukF21/Cc8iYYl1WPZ0Af7udo98un14iwFlWaDEBM9bcplelMzi7ETK +B91vdBxeHu7uzu0aB8BRQKBgGUqsIMpv0seaphFRlnSl8EGVmc3RWc4z+H2NlRR yoJFWYJYozY9/JCYRmX+z5OkZtcYEiSSpXbJ14dl17cf86/2GL3oi8f45MpT/tAT i1DmEuR6jpzzetIQTpOHBpxORCkkHQmuHbaYHPf8exV45vtt/mbCJVUNXeNmyAjt GdGJAoGAL2huWld1IJOColg2ye15MQ1f9Pyu+9aaghglEtGrvnd3VgcH2cdVbsHj 5Sb2tSgT4m1N0XjT33nYCyTnwLS0txPqDU71KlTLcCfH3iPO50ppWbU6qTi2yUFj +MmT2Wy5/VgrsU1Z9hw7+FbXNeRnPNns4l43PE1TlZ3uQaBSF0U= -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 1024 seed: YQ== zfec: required: 3 segmentSize: 131072 total: 10 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:MDMF:6nqjbopsod7vfscqp45ndtfhna:ef7jrzptougwgmcofxtu3jrjgvfoib23tms67m5ssugrmp4ddqua format: kind: ssk params: format: mdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEowIBAAKCAQEAuyDM4HLs7yvKQRNJvNj7dNcszbLrH7da2Qzj+OF58jDB2Wqq 9ZQQcLowoltdfRZmPIADAYy16w05CGa1YeFI62ukt7KdZZ0sbGipQxaQShZzeDjk 6sSFyN/iQ+v9ZX0GwSSLCwULZ3JRbXPYO5fRSa1WAd+OuLQ7vC6zyPglC/IyKEXG i1tBPe5GKboT/1iKUjB7CRybFtERRQh0wZ6L5ChnLjpGJHEkz/Z2WZcSwsSvdcb2 gRTkquIk8kPMU4BFZofuFEuqVZVe27sOeklsPgeCiFedifX+S49UMuil5zVjr+VL kG2/ZI2IRU3JPA62RSlNrvtSaM9Ltb6TBnpZawIDAQABAoIBAByL/0VEXbSKOSNz +6fwGsLzKoDCM+VVlZSOx150mKO5CsbjY2uYaBOntntQ4Ie7JDiZjMaD6U1k6ded bOae4Clt9VBtfV5+5kPZiZd++InHjVurTkd/AyvRn7/IihJyoCup1VOxNCasBWkN pvtHCKSqk/gwMHfT+FhCcFHkgn5PnsfPbZRDm7jCIA2gaqmSISL4gHjJginqKiSX HtE4E+GO5NEkzIiNQl3kf+vcEBOTYgj3YeAKZhqO9XDGLDbZ1XHmWV8y9n2RWsjk ySZuMuvOYUQrp8kmnAKe1oRGrmRLexLwHM++raXYUCHEauGOirRd+e/yFHlWTA1/ tuqjfXkCgYEAxV/pHECWeSJaZSyY4ygO2DhYlPPw1YXZWyZnp4ptodyRj0SPHoqE ucDjjnQOIvgXRB2ohTrEuf0CTkJcnWtrgaWXbzHxMbLX5xp6IXeZyyU7Ly+JEzY/ DIZTrdbmL5vfq3pfTHIt3ea1bHG/Q6CX4ehjnV5NmJrbBpq4ibWHbIMCgYEA8rXC Kx4ASPmvIMQIyRrMR1g3BURiRBBdJ6O7WTFDBfUiJjnzqSo2OViZbepXMxV0h4xe gMoRaZAxEWc5G8UkH+tL2WpneEA2w9o9EjFX3aUz5e3IKVIys1AIIdof6r6hoFV/ /oQMherBDS6mxmyS6GnzPvp8UmuCz3DD9HSpmvkCgYBotxPkC1hJ+DHhT6HlkpEd ofdNP4bcoeDJfTytJMI5h94qFoOf/nmgW3ffUi9V2i3t05Ze6OkKi/M3NfoRArbM 19/Z/LMsXOgzElcNfni30I7v39ZnvPYCXRn0Nvl09MvcHFaHJmSzP/2tBUQmSwOJ tVN0YF3mwvHFNT0GwqqQpQKBgQCr5Lx6qwnKpUM58nzCaT9KPBjjmxX5XJmNLHHQ boooWv9vkVWXdnTm0m//n5tYa5aXNXvsvK/uUpfd2nxgxZObI5sZhTl4ugnPVe4w x0+Sg6Eo8+nyEewkgMbxqrk2GQMBOeynhkAUTDmjq2mkWFsHTZpf/Sk1ej0vy46M wF8qiQKBgA78rBWa8R/AB02hKh3CrIyoMwl9WwGAWRvJUuxtg8tXehMQIX8c7Wtl TYNLuIgitz7X4kPIxhUkFGH5kX9t2hdFaK+y5cTLayZ4Grf4poLVLFQm8rm/vApZ 3wP28XLiz49qZzx1LYE/n9k5V/bpQqSib/RtP0lz6Y7mN9RpRZKd -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 1024 seed: YQ== zfec: required: 3 segmentSize: 131072 total: 10 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:CHK:yo75evk4cte3b7rdw72zxvl5ye:ex6h7ff7nclucjtsqwgwu33qgmb67t4ezbrki4zbgurwn2ct6bbq:3:10:4096 format: kind: chk params: null sample: length: 4096 seed: Yw== zfec: required: 3 segmentSize: 131072 total: 10 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:SSK:x2ypcho2ngz2yydnvdfg7zmuam:rnym6enrwa3tqqrhmgyo2wqdhmxfjn7wpzjrkzjtqibazembi62q format: kind: ssk params: format: sdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEowIBAAKCAQEAwfmkPrdspB9pzUy2t5MhIH04ydpX+d0D3pHDWyQO5bcyu0eL prmWkEvPlcFW+ze0WTsp7bBnvl/ZNm/4ATxhq41cB96l6uROwL2jVOC0sJYRHZFZ 8BplNO6frfwsvGCVmml9cLj587VP1k6tk4eHglg1pK1rVNg5o/FzkmieGGDW/ynM 5FfsrCeWbFh33pr4NqrHHnR+VDADFwhYzHbMlWLOS6zujSoV1Cbum1/BIBxPijIc MxdsqJKWyCWA0mSbMXsV7a4altzIWax8fEbx8oxCiBPTyNFhQNW5sB0NcwQhpMf2 mHEuuTlMVw+RVYNBtk2vrZS9pOi2SdfFInghGwIDAQABAoIBAB/RcYHWIwHYTfRz /nl7WWY+aYJ23efUZg5Bbmr0rbrnA5vZHw0Qs/lkbh8nBpERG0oTHIwVhoGhsFGF KV3D3VOUzav+zMw3JLyUqYZCkRvG6fTQgxERtgM/lz16DY6IRfynttYMNE1SiE+O OrEQp1ztV5NKh74e9R7cJt81E6XKHPYAR1T78RqRiz8eTrfkECI9Ad73043NwRXQ 273Ph9X0EXKZh9RQYNuLXlvaTfMTU22JpPf42kL6/GlZSY7Ex9ldaQya1V1TG8zr qmcTX94hg+TMXmt6xx/imxt/DA3JOqTpIrA14acmI757uUX6FmGHI/0ATQBL1Cr6 8oY43bECgYEA1lVDh5cr7aGRIcomfrOHQeUQgP4EaVpOfF8VigcTk3GqOxfSKTND BqDUiOO14ETjs/qV72poFXdruRNZDEBT4w9MwZV/J9K0eUtNTnh55X5BDLIG90nK yAfcdh2PvVeDBeA4hoLaEsumgBy3bWr2WfY1O79sPXvoRWJT10+tnesCgYEA5685 XDtNGlz8tSUJpBXmaAAoJQzbvRjOtlH65BujTFBWiW5XXH3kURUlCOYShyM2Age1 S01e22WnpgSlE+4DbrV0MVB3dBYLif13B3+iCOPZ77m2uO4kqB870K5wINBMWuf8 /mv69nPhCS8Mqz99JOGNl2vLtwQzrkfNhldmTZECgYBwTlUIIyodZd9KOUZadW+W E5TGQlPFcFBX0urSXErho1lzhVPVysqAGp3C7K5MSUyW7eLKhJLtTJnhbEXoqXxL KaUqek8aasmuFMr5Jx+YJMOpB0+nG79peNUH/w1mRQied5KmyMHDv3oK/wEOEFHt aZkTKYZp4Rcf5BnSZCmw6QKBgCXXKLc3uFAl/+BWPEzghtFVtTjX8Mvh0WFV4nR/ TxyXwoqPyxUAOtpDadkaOsx3o8qRF7tE18ldwRQMjinDJixe1qt3SQtczmWrUFWZ Mw3gqSfOXVm3C6Wp9EsRMp8pZk8ytM+ZM1QteQPW+2q84+OyMz4YDR3HQemlMJxQ ihUBAoGBAJnLqJG0ckNRnuaD0SM93Stvx5xOu3g2uzy0zJYjzjIQfu2gNNx/KnuF iOOJID+Y5HgvqxXjIuRzxyH51rYeojVcouSIxpkbShDkURDSS6l+pfEtRcH1G1GM CR9cLUiNJqUgtyLxrFljyKKdfhpQ1o7o7DBhJ2C5xwr8WEko+L5m -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 4096 seed: Yw== zfec: required: 3 segmentSize: 131072 total: 10 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:MDMF:thtsw4tgtfmq4m3ajpgxuodvum:64bbbja2hsofykzx2imjzvtys33xhinh3kuyg274bkyuknwtobha format: kind: ssk params: format: mdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEogIBAAKCAQEA5PUmN2ICIhg9Oj1fJgL8KS15MQuycqxSPnRGryj70RgoY/x6 8MguJ8Ofz8JvXBVEOzMLT4kYihyce6fVPTzNv54/ln4Iw334UDEVmsY97hNf8/tl EuksNVofSTmUOQxPrJVeM0ezNGFJTVEPTPPjhPjFlA+ANox3h3x/W5GI1zXJopiI llMmpo9Q+UhcP5R250bchMJ+wxK79bwm1HH/1tfICLCeXnoX3JT+ndeptbfks2j9 /aQEz1x6YLLkLJFWVZBrCTwbQ1GMmKHe/7fwis3yUknn4MtzwzeN2a+KbNPCa44W /77Ono+sGlZ+JfdOmAr+eGgO2Dn1sMUhZWDqQwIDAQABAoIBAAsQabNdchrxruvE kXeFx2e6AdRD63CtMSBBgDTwtxKIp1MFnW9LTSewxWVF0RnTEUQHGHHUfzIVZd53 4s8dxBeRbyM3nfbMfJZreM7M66s4lnd025KJYBCH9WEVfjsvhB4j7bRur5NFbERn OWUPmBwR5YJdKWX5bcFHW+Qx6Tn1EQoYnOS+e4gYbf94tK7l0J1tqJ9h2LOo6yMe PQctpdwRKAOVneENboh1pdxIHYXwziayEjuAo8QaPkF2LxroJASgRG3o2aDg7BUo EXFfvhxBzFNe9zyCVrBp7PfH740R2CyT5vbf7W4d1vhN1uqhRoFLkVWW12DEAvD0 5No/hNECgYEA+0wtYGe7c7p1rFJHw88ngj1t9gWRY4H1JcNZ9o9/7ZOFQRHG3d+s HdQOmNJ7wJlhIjp/NQQT5TiffFSWAaPlD1oUJ63N8UupyEGlQsONnGoA66vOkXVh +Pb3MhD1kH5HDG5uMp/BEIJMVDJnf/O+8McEEbvNj0Jxf3ovM0oGohsCgYEA6T30 QGc5/ZDXmDZDbq4kaBL9WiqaGj5RLMluYfLDlsW0pxOO24U/A/wP3qVzNNjlqoFN TApi/x1TUFwaQJ3pjiKBRTO36+i8yW0Kyd7JnMws7ai4uNYlkD0gUUynmaiYvEJ5 QA7sgBuDhK0aI+hSoGCU5gPNv+ErWE6v+VTLmvkCgYArS22J7XU7NAWwAaEBmEAL TUATodPxm+M7dVObig+VQ9QyaLilYzLJFM7K/4B4pzQ37HIcFS7EUCQSDJSnhbAi G/fa+jO//bQrnzu0q/JK32x3Letx3hJaDVp7UrasBUWCW8g6ipF9oaU64FA6mCju XKtTztJUezMIrmlRYdCQvwKBgCoZQZ3iQ+hNnWxe1vsCOZYDX3FH4Tq9Zr9zuBW7 0KvFEZ9ae12KBl68v0yLhmjSgVmuLvp7oXS0oVYO2boyBnbeKYEJHbhZ8MFWiiz3 pmJDxBQ9cOID3RHUxqGF+XZVpQPN5761MuDIlot7Bw3WIBvMcvO1Wgy5Iq60vTR+ pqVZAoGAbZurdaGhKu9DyJroKTRRv4Sk81vsI4hNgZuyE8FW35vfJf3XaWVIVo5P aOtH2mudq2vws0C16/W5fq+vwLa1/xY2X7Ni4ubRF915BHo2zTwR3K5dUZKG8QxP hHOaZsh6CgxL2cLCiUGIWNpKkcOz69GnZZoY0KJS0KazKTDGdMk= -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 4096 seed: Yw== zfec: required: 3 segmentSize: 131072 total: 10 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:CHK:4gokef54smahrbfr4kq3jhc4zq:owpwwfp5gof2vhly5u6jdnbsfuwwwhqkazpsbeg3nldxv5pse2iq:3:10:131071 format: kind: chk params: null sample: length: 131071 seed: LCa0a2j/xo/5m0U8HTBBNBNCLXBkg7+g+YpeiGJm564= zfec: required: 3 segmentSize: 131072 total: 10 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:SSK:eb4hwxibn33xlwshgqgpridd7e:7ca4kdczkgf5dowmqrazebqydxt2cmb2biui53d3etgy3rlcwkiq format: kind: ssk params: format: sdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEpAIBAAKCAQEAxxM8zLQ6lSfHQ5U876H/kkiJ4+xxuvH8HNaT0EhF144PQyLi pYDMOgv9h9nN8Da9w4wviZLj5kM3kVhPF1ElPq3JNEW80o3J3w/5hTX/B0OpU7t9 SXNcKGOftT9tYx0+38LzH6bbqQnYAB4KFfLyus1truXeJMzIu/3pLoINNT0My3ZZ 3Fjr/+Pwcgxq59o4atIJlzLILQo3UwpofAwBhD1X0aNiHc/BYxjMdAY0uJABvwxk 7JfC2XckIVdTCtbOz4b6m/HoGU71/vdg6gRTmXy4m/18b+yuEE74omovDXa1Exkg 1DroYe9s9zQUzLZABn86xUZwR/c+vt+qK8ab9wIDAQABAoIBAExNj9bIV9H2wrYh PA9/cMWBfzS42mi0upTVHCfPo9F4lln9w5B7Gww+r0kETx585ORQVaIuBqMp7WEM z5fY1uU82CtsdXDgvtj8Nv/7j8oZgYviB6YBDPhAIyVl78f3HDPI9cYSfww+BSga W3RJQAcgmSNZ4PkK8v+3VUqpt2VJWUHOHp4CETT3aL7JZpyjt4IlUb6gROey4XAg KubV/WTjrHFbS9RyFiFD+ZpD9eXnGand1yUemMDy5N9gAM6MAdYHjNFaO/fq9vGK L6Rr/Xf1vmDMJ849kCEG4rOpLwMkol9v6L/eO+xssuvPRHHW/BjCbFTtkPAD99vS tW80Co0CgYEA1KavoQqX+ApzTahmLON85F2RkZVMAFmNa9PYzbXrr3pmrFJg0ac6 V+vPYBTlaJ10bDLYEsD+rpYqiHUwXsm99yHBxfhRHdvQFe5CfjLVQqh5p96PMta3 Eraiy80cS9287VTaVV4IVJarbn3BootOyK/VGij5YNR3COrrr2kDgiUCgYEA76gV AIOEuGPteM19cUYRzJh/Q3TlDDhc7jK3n/N5Nivtfris33AorN9lEtlQ3i/Gk/aY pNUNFS9K0PW/8u3M6yoGDeh+w1wLb1RHUTamZvOJiTSknb5vNZWTqFx/d16WvhDX wP4QF5YKVVRdOGnrm2XdWlsbwE4IlMWna4NSVOsCgYEAvQx/AOVhGzN3NGfshiWr x33jxxB6Y6k5j83jZWZA5F0l4DbQOjK4LKfIUbviAzJP6Uz+SRXolR+NKok8elhS GN2a3jwXKTtc79JErNrWOw96MCItHl5CnVFew15StKOprTiNbe1N7J2SRIVqWu4M GWAwTLR2l33rYTMwWl46rz0CgYBX7oX2MEtMFG4XOt5h52G0feeD6qn3t95xD27M Y1sAA1Iagsv7F331H+pH5jCDtWfY9ku/fuRT94wt611IVvQu/LZH+Bw6tdUEPhoE tFaNw6GdFBGqRysqr/0DcxzZwXzxs+BV0WI6JTUZZeDmSAbId7Gl63PdNUR0wajS C9bzjQKBgQCxJZxGJT0MUrX8BzB/tRcdPWpFhnQtDENMdeunArN27KbL04dSmrZZ 1V+H8GY1bBooSyULm+H2fouuavnHN4VHp6DI2JJ9wcxNsY8l8NOrrwh9y3sexWFJ 1I626BJlwuFT3Vd/0IUpJjPwQSVj1On2cAMTfsd3y0B56H2GiAXcjA== -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 131071 seed: LCa0a2j/xo/5m0U8HTBBNBNCLXBkg7+g+YpeiGJm564= zfec: required: 3 segmentSize: 131072 total: 10 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:MDMF:5spgmqz7se34bzntgo3wgnqb34:zbzy5bsn6kkh6dz7leo4x3cfoiuhnotjp3o3n32nj43rkpoee6ea format: kind: ssk params: format: mdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEowIBAAKCAQEAwi+iQ/lFevdNoN9SEG0Ie3i85CHwJAp+ZcVxyybkesM2bDzq 1QQIfD/LZ3Wk3BLxJNviF3HPVAEBjA+EK/vI1hsw912YAmClkt3/YQOIVHZ9DKHk iNejKGC8ViL8YHABszBlnPshVB1k/oXwBO0izlwP+9hUGXbHjzuksYkq0FLlGAJD bNPvAunUeC6NrSsA0lgPDoVdMop1dktN0tOhLxEauDOQu3YpVHLHhPVRpTQ8c2S/ C4JsvoQzT8BWuilfCqjkua2Ul0oq6mTSV5nVE5GFxcxghV0+5M5csQtiFm0wmxTp ZQFGGdw8bGMbF7Ghn+k9wfPnJroSGODuTGyUqwIDAQABAoIBAEH5wiVbG2awgGEA jxbCnMeqmW7fMwJjyFcWkteFgspM6gAzYEv4f1OLrzWbDGSzUNgHlxUFF36AiwCF ww/Yj39jJKte0sc4A/lW0K4q75ZW3Zy9onJ15VrSJxsS7vFrDMDPWC7SShwUkpxB cG+UDCfVsp6L/OLb7uh0yLuDEZdOgnkDP4PD5v6bhccxm4VJK6LmU1+ozeFVoyR8 Azp/kCPwBjfs0qZ+pfyR+0uQlgcOqW/JcOTeGk0E0BKHxYws+eEZS/FpPEeKYUNs tgLZAcuqqhniijsWpvkhcItD8wn+Bl/6ieVMDdo4qx1weXjlTx/CVKZOcsBNVULK PTDUQuUCgYEAyVVq3ac/Z30nvDJbrvg396VqY6SDQF2QSVbV995ZKRofxJwpeWET QNoIBgoVIVxbk8bOs1ojAIFyPkqtIuunkTlsaVRfdopx/choA6mzRHed4YQOV4za Uq7xwmmOCS/vJA2xNl+BXnDd+K2dERtLl8iW+zzBbZ4wJN2vvvpKn10CgYEA9ulk PDQzTpa6SiOO1VxlhN81X7HFY+k/FJe6Ro9tVnnOCGJPJcum+4yicBATKmhrwLQk H+mmqxj2VBu7WLCgNpEEaQPmVLivE5I74wo7zAM9952r5Cw+cf+xtfNNYL324o8j VBp3FEV8U41uy1nsmCyf4LUMBCyFVTca25NCK6cCgYAowDpGLQD/YGy3gfXev20M mhWjn3vVflqjDYl3hzDCyf/eGsGmSMjN2pO/LTFDtF7w1U+nK7pj8s993j2XEN20 3kucMjC0XKdf971d6G5ZkGCLceA5RlA2ZiSW9iiCoYok4QSafdBAnlW/bNyaxsyR J0+wAIciOd+CxsA4xo5uHQKBgASjR6W12UzdmewwlMs/LAz94FPG1A1XYT7yxqXy pbwdF5iiuBfepmlNL/Po6WM/iN6aw57x1ZabJm1YBAHbd3bu7GVIlHf87BTzBzrx g0QGv5A6HvNvPVEI236ubkKl7tA8ng5DXP89euNa4bziGIaXN/2RiQM/DtYV7eQ3 9OM7AoGBAKE1BocDrOsN55olO9cnw4nQeBzXvlstK9rZ5AYa3FKnNs/sY2euzlBl m402fQ91RfSF3pBbUX5cXVdbNt/g16bN7bJ3tI4A4XSo71Pr6+nmiaAt2WDH2IWy X5cw5+CXCLok9331H0W8Im3yiuTU73WPxGWJ7voDpoxpBGj9X6it -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 131071 seed: LCa0a2j/xo/5m0U8HTBBNBNCLXBkg7+g+YpeiGJm564= zfec: required: 3 segmentSize: 131072 total: 10 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:CHK:7vfgl5cv4nlzqx35z4uthjv36y:nnueftbzxfz6u5yjxwwofaxzzft7xss5wzfh66rrcwv2zwrm63sa:3:10:131073 format: kind: chk params: null sample: length: 131073 seed: /N4rLtula/QIYB+3If6bXDONEO5CnqBPrlURto+/j7k= zfec: required: 3 segmentSize: 131072 total: 10 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:SSK:f5xydhnzscumihbschybuyocvi:y3wz7t7pyj7id3xyzdut5xwsvpmzxxbotdy7b2azois7fxfuylna format: kind: ssk params: format: sdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEogIBAAKCAQEAofW4kcitiw1sFYCYY0wIJXe0zMw35+vVTFDpF3kH6dTKcemq oHylBFmDa/qGwg79GA9TfpUVJ16EgPKEU3mBdls1MyWrqHDfy8JsQhHPS9fUD+td cLJFKypM89gtEXCt16zWe+36pL13rffT73hkoeaqGI8vE+xZazd4ZnFWd0bTGpl+ ZYmKMSbGNY6MAQfWPCOeQ4EK+ReIPm1dKuFAC2H3g6+BGNCArnLSSCF6QgofRvXx xy/taEULnHHRf8G9ELRIKpT7MQRediBwRU4C6dizNTmVKigOEKIQ5uxOCjS9gjX4 ng2ng3pErVaOB9jkjROOPrtF6uCOQY1cT0zTMwIDAQABAoIBAELu5o7VNSd07hi6 0v+igfFeFenXcjlWRQnrnFE3kzYnW10VeQ8nRBlWlxIucLfNcvqZBuQW362sCa2y zE4lNoQ/8G4JYPZVY5/1Y0Ew1A9fjIPhvPWgryZGLpRN4F5HR4kNJH0GHmIr7USH 2d4rTsd8KQrKTeX5dQDy5T7NEzNqeBtsb0Imo+5hsZdCI4NMZJ1wYnx6OE8uYMOu L5MMl3uIkHQwWiRuilods5bW5a5kBmzeE5X0hsGi+czYVXcD6NvS1ColRi8Hpw59 ZKeGJdaX5eCOGFpUIkM70e3wIboCPN0msyEsWPNfgeuhAeJJ/l7BZK1avr2+mhVs E5LyzAkCgYEAuFIs0imReqm3/f8ahvlun7VPNVqeOjr0Qp/O5LpDv4V5x2RLG6fR MBUUM4Lf1Rknn7EHYfYZzl0FheNooQcIMO+wwZ5P7dsTDPMOpcjH4r+88xxa4q75 fiHOmfmlcXn36GKCo6km+PjmA9t72Yd/ylRFkx9KMJtNiIpI3UNABrsCgYEA4PFp iuCwLtL2MesiMSWjNFhJ3766g7ckQDBeKEavXt0ZrteEXAUXuywqpGrrhiLG4bT1 LbXfkoVfuvUk3JBSmTVLuCRtUGdNeAleB1Q/HC7WO1++yKHm1yFveeDN7+fJQ+y+ pdoYgZ58NtjBQEEdshtaz2fKwxCyb/WyI2uMaekCgYAju4KG55oVXouVyPu6iOaC PaLyY/PitAUgWVzBiL6ThWu7VN0eqmTqXlvBNLDx3eOJmMcmnZAZKn1knFZvSS60 VfM9RdSW9u51hzUivI7LjYIy2x9fbK5fXmxv+y6wlgWSXm6XDbbJc28b9lPHMvZ6 IeYvBFTcoW7hdnVzt5LU+wKBgGCMHZHHKLegQp1gX9eaYPdZobOQKHvaQovudqtw 01qzKY/a3uukH/BtX4wcfCShjp1XzxgkhOZdqp4TFBQ7OciakHpj4Ctve1e3JY2d wky1aawoRznUC8Fwj2lPbPS4lrE5zwZemsAfpw7fb+rFSBqnFQ4KbYPWCdB2M1Ry l9zpAoGAKRl7T81Ji0P1T66Mi0i0vm+l32QZuSd5pUyBZtsycFcuVz1F+lDIWhMA fuai8L6uzFNd/ErekSIQXL02buGHqp19QpbxRlzNYY4OB4OMz6pZkWtP6H97hXxm lGTDX1qzm6DfQpJjw39B3neYZC7cwAImcDyvr1kb/tlOmePvv5U= -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 131073 seed: /N4rLtula/QIYB+3If6bXDONEO5CnqBPrlURto+/j7k= zfec: required: 3 segmentSize: 131072 total: 10 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:MDMF:2hqvwmzuxctume2eereg7xj6vm:qucnebkcgyqkipwxlxmt4k5nagvufpmhahuvfvmncuihp2llobzq format: kind: ssk params: format: mdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEowIBAAKCAQEApvVF2hYDR6x/aIKoeIrLgvEerWkVwxeTUYP1rTP3faTPXZaJ RyDqHMkwSVlkTIdhorRROoxWgrD8GhI9Uc443ac1iFj7MKE61S2xnKNplhnh1sQQ OmjkCn0z9cIqCdaqDCtLwKnLKEKqbm30JoCi/nGc2Sh6FGvJ5Wa2gViqI8bbBlIy RzO1NcoJjEVmxlAqLgH79XDp2n1TOJvm0V8WsOiSNQREcMcbLTcl+cwTbpsKkE2d gmkzej9Vf/PMxHrGxGqYOUPsYwX2gNNiEWx6pMN+INUgJXuMOlwdgvEDxiCz3Noq WsJpTsJKmr3rPrPvKuhlDx7EsZQ9taxTyB2OXwIDAQABAoIBAFI/9t+PhKIkqsez xodL6SJi4vgPEvd/f8Xiun9PYJd3P+kdJhfycSMpQi6AaVcCQulC59luFZhg1HGL lsXcUEtx+n9nRqgYZcFrt1oxbuzRZ17ETDJaRi2crKJfuxIJvNAt7C3H+BunbArn BCaLrMCo+9pHhIzW5SmsRjDGm1rv6l7gMMb6cJ0n+K1Vc3dy1yTDLqXcpEuC2OpY bdekHv3S2mCwDD2jSpKBhX/fpWrKEoS34G9IL78tYSpUAxAJqPaHJGf6a/GGJlVO Wol5ZnfVduocG0RYfB/Kv5Mihwt/d7XuSDivefP0myr7w+m3XsOm00/BUy2eVKmP yrUDLcECgYEAyGsJwXMkNYw/hIRU3f4gekJrwHkWvsJsDkxluyWfjLn4XQGiylI7 /pix1qj9LSFi5TDLIMzZ7UlJgENNbcYy8jFx9AepSMHFhUvARWncOG7Uj1uHLOA0 ySD6V8dCnqaCUImf8chKiWr+joIdoFBx9xS3PirHCRQZaKjYkGABa08CgYEA1UKx uf04DS9E60ZXQf/Yqw4PZOpFgPce63j9mkggebnmrOcNMub+4BUdW91FinqT+IkW Qt1sI9TYtucBg2tZF5LkIRWnvR1TeSgY3qZsFh1qv4Q2zVmTs+ONUv/e96tnvyY0 s3tdeznWvWpqakE/62hnjdLIVcQRHmlLDAwJp/ECgYASPEL/+gUCZkdlPFEofbXg yehZ8+qQ4snIJ0VeWNcCi+1AMSTpub/Bs40C1g9rKs1/wwfIbTsq7u8kH3uNEGqU RNF0fbn2Z8McFL9i0XX7IIJwpMhQ2fmTj0+X6wZxvv6+azdFXY8Cn9yXhNlDO+6S p6zgmC3R8qU5M5u4zzNx2wKBgE5ggp0OWUlPNA8b/Pm+o8zKEBJQn1a0e+KixuGq 3HSgRA0LpagtiUKlv/KBMgug3T0cdNgCNLo+gZ9G6yF3lHi7fahDIzC31HPUrr81 fsfp68+TMejqoQQd/1SfwTxY/Hod+oR0NHkTWr6mm5GNhYZpCpXu/721n20D2ZcF 3Y6hAoGBAIcx/SZKMhXCGSx0/i6hS7DqXw2rlMsO0/jFPq8jyWpjES/BV31cbkqX i5QiyVspb2k5cE/NveUrrsMP3HHQpY9gJUr+GineG+aWr8Bruh2nCrar7ppimhYR 6ek5FVvZPBvVVGrhIhX6XQ7RNnRSr0UkF9grNkPn6QQNWcj8f145 -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 131073 seed: /N4rLtula/QIYB+3If6bXDONEO5CnqBPrlURto+/j7k= zfec: required: 3 segmentSize: 131072 total: 10 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:CHK:cy7fmjsldeakhfd4psbmghmqyi:6a6uvyai4jkzz6hj5yjugm6uie5etvymcudgiwwjh47apz636zwa:3:10:2097151 format: kind: chk params: null sample: length: 2097151 seed: uqWglk0zIPvAxqkiFARTyFE+okq4/QV3A0gEqWckgJY= zfec: required: 3 segmentSize: 131072 total: 10 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:SSK:zxil2jxwwoe6le5k73zleft6oy:gamewch4gcjlcb3xb6jzqqa6a6xumhsagoeakdf4ranxvqrq3hiq format: kind: ssk params: format: sdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEowIBAAKCAQEA1ga6vTinyDMFCVRuC+g+QEjCE4RqjfDLT2Jnq6F0Ld09to0h wIk55Pl7BCUanPbvA2C4Iy6uH6Zd8ixtrzfcI0neGUkfwn2k8PJ7nYO1DJ4E7IgB COly2AABlm4C943YRx5774qz5cKgYa+1+OtYZv/inbD8f97uHnfD8pql99TNKEZA y2yE1sZ93KSdTHidOXcj+EFNwIpBtnw1OSwx8rQl5L0zUcYRjd2J/709MRmYLaXJ F//mZQdyETFGuD4VOfvhcZiUueNnDxkCjM2YBNU6BJXOWeJsJ8ypgubzzuhOR+pj ERMo7tBnqZQYYcYe4Q4zhWF1IevGmwtWXAU4pQIDAQABAoIBAAWYad9AZ9w9iKca Si8WOQ5pU/kDpwHH+SniNjiiQSiqqI5NuUOHjlOQFU29XbuJOpICrfSSp+tu6Blt OFpN/aBG1r9EV364Q8OVIioOZ+8bS+nNJLt/IKSSAJ0x0zvjvTxD5LADKTnbcT0+ 14m0pNeog1v35RDkLTUT/lhftoOBxEUm/6XQOpfhp3RcUypuf2po7mbpFHvcVpZe DCFhp8/DWho+Oq6yBMsPavP0yqQEgFulbigvMUhq1a3xqnOO6T6qq/A0Ae5u8y4Y ItK6gzqhXkrtkyvawAbCwZfre45NXaVFcHFXT6DqHRKQE0GtAs0sI3HuRm78e6Cx 1ef3a68CgYEA4krJaRWuujRee0XgMPv+lmFYLC/bs+uiMITl0zkt6HCNj23eNdmO GP6J96V6tknKzdCoDkicf4zZqscoCCqm9WU+u/Bfra0pWY9mXVYSnyo46m98cX3r dIqcxq7uet8aMzZXn0z03PgT+ytOS2GMucpMBb/WnklS6hF8lfFCTTcCgYEA8h+2 dgVlyjDmXUf79WVI7J1Of8NEIGYUERaZGAzwX5oTRPSQc7iN2pRwZWBZbR2LlrJ8 23jV/Gx57C9V3eblhYRjR4wYTKkFak98YKyZnckzMCaFMWaiBYz0aLADsVn3CB2A yte9NA5L8Co3OlWo7OPGR+MwhGptYCUPiBtUtwMCgYA/n3tFWl1H6RVvX1QLMa6A pVnfAo2o5mUxcwwS+Q6ZPZvvaZqCVWqISHiN8i6wNcsZVsMJUQz/J6DDTT9KHIPY luCugoTEFd18Wr8TGvIdYgeikjnQxvB+UcKGcgSG81cwcuTr2v01a2Jiyeg3dXPV gLUjIK68zizLtqLqnWxgvQKBgCiWXFXIbdnI/LTiXkAyrFjNvdz49LChq/d5XEyF zr2X7GcAwD5Fz7G2dGjqD9OUwlOOtBNuXCCmZoHLJY+/JvaMzL+volsncjrx/B2Q kWe71JLbwjQXyk035biu2M+gDyMTHwXhyFuzkdM+oGds+JZNUG24jeeEl7UoQURF oJvVAoGBAJvblfRFyShYaeIj/mtQhBgdJ1rHLmKcQNlgERnH4M0uwFOxLqSkgZWa uRCOGQRlIW1+lNMqYFNSeq+1Qp3DT3Y5E/9Rvp8aPqNjN4oCCFmaNNHBgUkgqDS9 EPG2GwgfWxGicfJ+ntis05EMbNxSY5MdoO3kJzRxcoZvlzrDEK+P -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 2097151 seed: uqWglk0zIPvAxqkiFARTyFE+okq4/QV3A0gEqWckgJY= zfec: required: 3 segmentSize: 131072 total: 10 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:MDMF:drsoyrobruplipvl2zet24hvhi:zvyy5uqu52rzcae6erus2ytl3dugnsf7n7im725vxutz3imfi6qq format: kind: ssk params: format: mdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEpQIBAAKCAQEAr4dAM7CUJMARGjCtRYqUue6rdSHxf7rn7zqNJ0k/5DMs3dO2 cSdEuzPmPclqEXoE/XUTDszfZxkNIzKpcL084WTuzcecchxPAfuD95ZlWdRIK/2f Q4OTeIycpBycdEEfG2jbt/zNbUoBfRpQTTq1T8DiK2An12CEAljdGaLgBHgn9/2F 7xMYEDIcjtY9gID8Kkqd4PQmh06renaK5Q6QtwmVoztj/U2O2bdhAtUmejt7bGNX 3b2xewZNBtM2e2iJLZ2TgK7b1XIx07/Fw05RHH7bnTpRwIeAR4e5ECGegUKVucaB BM84CvbVpOasZqcssIcflEsI4yGUgozHlIgk0wIDAQABAoIBACfNZpySBPXUa8xh j1j+lL0Yxt53xPhu3JsdztZCwO8xP5JJqMw92FMO8L3AB4JRBgKnYpvvjxUk1BrQ KSX8c2q05YXaJrqlerD7ZLBm9TKKdZcsGspHctBaKkb4ie2+upwPigtNkxOePXot 1lm831JnbaHiWwZ2x1h06CYhDeVVLZuU5Yyp33l7X/hUlE2YaxOtL7ODZKUs2qvo 643v2x9FzZB6g44n8HWrJzxx6rtZt8qj/G7h3YVvQkT6D1LjVRD/IBVQ5+Un99k4 IxWuMZ5u2jKitpLlxWMuMKYQvw+BWpp7s9nTp/NkPKNfknptWIgsMY5yxwDnPA4g TkSfa+ECgYEA3MUmD0djOeR/YiXNt9ETkSHjL6ADPhdTEtH7BeRoRrAQ7FJXOiJB RiaIJBasJrBz8npqyfyFfYBJSe9lJDz2zSGqitmJ6YyWnnEv4gr7XDERGhkfK9Lv F9hCMQLS6KdoO7jBG9thNBpmEheSlOvaT+LQynQ8bUOdNMXk2LPuR7ECgYEAy4nl CRp1YdenVXO4qttKVXJXWh8OJ82dyNgaX1krHsCnph/P7AlB81VKZ85Arqo690YB Lu5BNJgsWcmUIt5CMNXWl4DDqneT6gwagRpEmiBSlEV5ByJagkrLKKX6Mis5Cuu9 x5nbZ/09pX+3UVfD1i4DEChErGgzCDicNmKLWcMCgYEAlcCpk32iIjgL7GCmTcTl 1/G7sKeC65BYypBjDVklHqX5pMQp5QYtbs9eU9SJS+kvjVBatc60IjBuBlf8LHuq EfV/QJZVhXXXCXzPtS4r2RpzdleKHGkFxA/uvl4jAKvl+XTWkPXb1sL9b9JLnPbr bHr3lA0KnDdcINsH47MRs9ECgYEAtbY1OQxbEW/jX2HB0x+V3HUJUVb6X0StghqU aN2Fpp3ezmwGR7b4HxLdK5Gyo30syYfBFLH2msrkhYB2dS6yL0EppPZ7ORwqfMAz hWD7MBJ9RwxDAcCEx1+YwoBzvwhhk8NlGebdP5iRycgc1E0jdHp9l5YrwTQBo2xO 4irWN9sCgYEAgFSvJKzRr2lpiAAjFs2iDEv01MDAcGRmMTBephSGBhNA4UjZZom/ W5gbQnHOhk9AY2qHi+z8Dzg1e1gU6pbVpNui3lSbng8sFbqDmpbhGyxXtve2eZyH mODJTFY/6VgevSdbgXo31ci60X7rYeGj7njmVGlGF8M9gn7jatE6ASg= -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 2097151 seed: uqWglk0zIPvAxqkiFARTyFE+okq4/QV3A0gEqWckgJY= zfec: required: 3 segmentSize: 131072 total: 10 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:CHK:ptsofqwylmkvzmuvrw5n34j3ma:ky2fs7xrlke64w6kfmhzsuilzxbhfrwwzkxih4rykpbxrr3bxhiq:3:10:2097153 format: kind: chk params: null sample: length: 2097153 seed: BTBX/ampNfLU+ox7xipBGiaSbgC0kcB8Gy7BkJB4oKI= zfec: required: 3 segmentSize: 131072 total: 10 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:SSK:c6l2mn3jlibs5zzoy2bpamqhru:migiw43ipkn5n2poogkq5n427lfhkbtrlwplue6mjh2lxorkkxtq format: kind: ssk params: format: sdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEogIBAAKCAQEAsud691onrSR8mmRebEYMCb9vV9fBoPjt9ARGVUHAd4jSapGF bEa/lSUnSK/8pmEMmflmbDQGw7pG5bY33K23wPqG8LP5z5TqK9junZaBIJPoJV6S jaNJ8DVh+9AQ9f8bdt3dgAkMdRLEKCqKj0KHBBCphvoJ4qROy43ljaMcS5+MoEi9 q0cUKawtXGvwvitSfoLhYqlOij95W0hwD0kCW0tE1u7iaAwcQC0vZ4b0uEUNPq+Z UbbIQtl3ptlWjl+lUubhE5VFCnK77Keinw84HD5Yjom6uTsddorr4vhuu8iCNdP9 A1SVt8Aot4oKt6UeNUcAM0PDK0M+3Y+XHkMceQIDAQABAoIBABD8qz+Yyxsk04L6 ZD/OH97+ExWpahx9fmSU0lPOkDaZYndVdXB8QD0qX7JGaYwnu2FUXb4I65qCkbBG jsPQp9m2QAFTaXUlG94JdVC3xW+BM8H2mp5BwqfA/dqB3VZqQGKXOuypD0p/e2of 7fOf2r+PUHV9QNqJBOVNhh4efnWMmj/Z8dJSHnUVIz7iF5m0UFcS7ewKRRPu7rST hE2tQv9ezvua66F6iYWN4Lfw0rAOyUA7CFm0VKDoEB3PcyRgD1G38XBpk2EHwKJv mKu0pf+AfzeNvpfoRh+z+40hdlgI1OVfIm8RJuhaA3D58Nlsf1iLS7mmbkEJjcna kKBnkeECgYEAyXN0Zmar93SfLx97ZcPcsRI3OTQSeSBzS+W/imv3NXOOmVawyeqm mFvScTJKo0f4hMWdAZxReepNywrrn5PzTlnQndnJv1n8oqhUtgLLyi0N2YJR8wxm YCGxn1t1z9DVBSGTAlx/2Am2DrhQjeXYsI+uGlkRXScQJil+9ki4cBECgYEA41kW Yg5gWoqsXSKrJDWvYNQcJCLbSOIUEBsXS60bp2F0rpxLFgPs93A+I9HCSH7qL+Dt dgQWgAry6Z0bzZdsXNLrXkqHIFaOXTisBxodvMWRH6dbJLyFVt1QYw8LF2UBIRAD 5SiKN8/rxRCuyxpf7CNvzyKlrMmn6JrfhNmwTekCgYAOwrjqr+c10IPBbisaf8lx 6AXH2TrpSSlpjEIGoHaSog72yVVW1iyyyTeYN7kkUaeyAtDIR23o4vQkRn6RSMPx H7+bcVPJA4zxVigu1fGctMRpBZV/m478yDs9k/QD8CdLovQkniZ36+49EeBFJWxF M6HsKE6PZsdWJIA7B4UMIQKBgGIItGIsGNhyG8k9fdbrX2i9jjT24uAWvNgFFpKH XvlaSNpSgv5HSxOXzvPbK4/fSlTDBSJyuNEV55FdMfQBa7TLLrtGH+aN7G2+Vk/p rxELkHy5yc+Zi1XdsSBGCF5aK5Z6NXPHe3J9sgkUHItwIBTPYxNKuW48tq7Suber tx6hAoGAMpcvnPUjv1LLf10MWHoXxYlPvk1kL9INWQIH+dlOdp/3IKglf2YRN5Id bhCrwyIEEcp5iyNxgxrKYDJwEJblJV4M3vFEIkoXOPFNni1mazfBb1tnuWCty7Me gXy3wcxH5qIGhmdiJdubUyZ2Spz37spBASoV82HzeeW+x/pVtVw= -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 2097153 seed: BTBX/ampNfLU+ox7xipBGiaSbgC0kcB8Gy7BkJB4oKI= zfec: required: 3 segmentSize: 131072 total: 10 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:MDMF:immegpvupjky66rdfjm4d6wef4:6j6gmacjaguy375eqqxrigvknrb23lngyzhdv47ynty2iihpqmiq format: kind: ssk params: format: mdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEogIBAAKCAQEApP6Z4uiEAWfE/DO2Q0GeO8AlR5WgZpEd+/hF3epJmGYyTvRQ Gx2ZG6szk/btuLkI9fStqs3paQFW8aJG5I4HJjM0w2N86KIDgtexHL/zqf8ogCV5 w8LWGUPwyiokbUXFdo/td7pbzbW1AYNOLohpWtMkhalxtTIWm739TTPDGemWGilT 5rHOT3NbNmPjGPG645CYadJNShR2F+B0AWNZnxANcBD8IgMWvIl8IrlMkH1mUKux TRf4LneD1X438tQ2hCsLPcgxXJePyzww8pXujXVrv6Y5LsVoaxEy+qGeLMIcz6qT EgilvtiIO5GVO3mDR1TboVAk1sCc9DMPtAzV8wIDAQABAoIBAAT44zxSU4ATV31e NZTrSlB0pur0WGQe5W9tePWKFPOxyLxWYn+esbmCvEguPdW+RcXbvMwT7n/KmYso n8hNe2usSV/GBMKh90cfJug95KLv3JGYD4ZVvcv/HyeIg5aDbsL27WoZRKD7Y9wK z/VZCQvCpywcAiA6xTGmVRbZg3ypX/FdANVoh9ajiulwolhBss+f7OKrqJ9pnGF4 cqYBWju0fmZM4KE5nXE6ab3ea9oan7nXWIz+TTWxFJkLvg5/Kabffspokxc0j3nM DuMKJFTKpJqazitMoFsxGfaketjwwNZbgwleFOFrxYxtP257rPHF/UFqflIpnh/S 7ZF/zYECgYEAtiLFm2E4Naav/4agcOB+HHnFE9xT8ObIlEM+jTKwbYP6Ib9Xecj9 PnxLXmlC4KTgnkHtCPGYdUD4GeoOhDTI0VACgQv+HzY70DDbtN6UNak46AYcKnX/ LPFJhOWj7/EQ1md/BzzmHIudW1k9o8LZ1KIF/uy5pUi0i2R9yP5Y8UECgYEA5+g7 H8Z/RIODQR3esb+j1CntJyYdoEy2U8GZuFg503hyleiOGG9JKSeBAQq2nqnVUWUQ AiPo/vmk91GpoQvJycjQT+C6tgSzzUvjLs/SnzxAe0RwbAjn+m4OCy66WBxgSopY NtE7IbKxZJhXVOy/5vjr9UfeeLrGKoVU6uIdRjMCgYATy7W0jJ2CX0qTuDsp6Yxr ZeTAotrQvRSh4KkkyZSZYpXGIzjLuMelifbbHQ+ywNjU+o9bwH50iAovLtxDDEWj UlHjWr1VAR0BJL5Ma0CqkGjp9vgKuWZxqQv3kMn/ozDUTM1mqPzNr3L74bgsW1o3 nSCPs4T97OgKmnJ9bP+XwQKBgC5DY9gY7zapzbtlzBFFm9ctbgQLVImwBAd9bb8a yp5nPuSs+fvh54RwPwoIKxpH4yhTsvfaVhbXkpNMFTztbxn0F6p3uIerNHtWEkI6 b1gY2vw8UPkcZbrNzbtpXP9K2eLE6og1AUjdrwnUYkes2zOmoNvTtIv9Jp9A7gnV heWFAoGAAd+pZlqlxP+/6TR7rwPRRScufTKaRGJxsEcr+f60gEkHYbIL0yH7Aati gKsp4umaY4bP+hGfDYYNyiibguR0LAguGZSfx1DDaJUxIgSYoTHoWQWYibcyySUN uaRuiTWmWRsaAvWPEte9UmZ7i3kQMziCnPqh7feGbNKrc1+uf8E= -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 2097153 seed: BTBX/ampNfLU+ox7xipBGiaSbgC0kcB8Gy7BkJB4oKI= zfec: required: 3 segmentSize: 131072 total: 10 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:CHK:72amudbwylfpsdjqfzjifywpey:lebiq43z33o6fznzhkr6hppzil25ngracqgvm3s54v4h2nchphsa:3:10:4194304 format: kind: chk params: null sample: length: 4194304 seed: 2NVWOXvtwcAFf0J/g0fDC+ypQbnCU7awZoCxcnhpkVM= zfec: required: 3 segmentSize: 131072 total: 10 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:SSK:52f3eczj3vq6r3llloe3nxoybi:mpy66ok4hjcwr4ir6gizd6inhojk2bubjtrfkavenk5wzw7e35va format: kind: ssk params: format: sdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEogIBAAKCAQEA2vTVaerg1p/nTfX0peth4kWZGnYMPgemUQUDhqXjwa5r6A1f NhAwXyp9Jd+cIDdr6JefXW2gW0trkfRNaHmMCPV3ZsNBFd/nQjaB3QM8mR+4H3QD kLNVvBQuuL8a61GXfTstdKcQKe7GaZ+Sm1H73d63SSfe0KJx8BtsAuQtM4ZVRjxf 0+k1EgFb1wkv4kShOcrVKWRduoOIu9XBGqqU4d9/QWNC3PMsYZCnLI2lEwrQAojn SP3ZCQgB+Mo4sxwDHIzOOWDW9OYSVTCTIQiSl6Z/vUmLIGrG8WHp9aHT19E660ie NwCniPEKK/O79XTEo2e4qKatjGCwdSj5QTE5dwIDAQABAoIBAAKKccmLXLWQ4HXC o2ajfxzJkvfAI+86Vn89MCfJWAXA2Oa19QNjF7SbAR3F5QFosztdOw+x/HjivKpS a+2I74uREaQjIue2k+/sQwCGD5d1S0UuKvZsZlPK5inlqdHOPhRJcgMXBzR9XVcP b3uW7XXLJlRWfprsL6dKIiw8apvc7zF1yUS1OAopfoOcc+HJ3GNMUlqp0kqtLlX4 HwXo6sVmNo4eOjJfllyTcUN4TVgzc2plSg6E6qBxnfiAxFwyyDlDuEFkTaNULPm/ UKOiniDWb5zf2sBm55dC+R15eUHf8rHKcH/wqmrPnTKn8nxzLSeC9qNWPDtGlZV3 j9JmEkECgYEA/XfgM71+9nQ8zZqJ32+ascITSDWvAgAtsZ9A+ze8G9+lJOB0MinD IvRxLN5nq68MjGrc+xZKY0NWfOAh41gbgLLvWYENKJ6AEQiDnGSDOSFBxe2Xc1gf BGaQMUYXWjbI8BoXG+dJcOjo1LxhxW9UWSpVgebNvnAUpgJz5E5jRjcCgYEA3SS1 zgELO/afzf2PeW83QbfFzkLsaTGCGgxwFXyeduHq7Qik5xfo7kX4/RykmyLB+D25 kmKCmdZxQQ773IUzMnMPOhxIj/9Gw+RkzACZoFYOZjmS6b9Sqb6jJBXX/PZL7f9v A0cTftaN2BnZ87KBf/GKrHkuE3Y8moEiDLbsBsECgYAX6MTnXIqraM+LfXZf80Ee X3Y+K4I0qBunU6RnjhxabMBBOEL9sF7N300FtH0G/t4qKLJrpPCjaGiyItpPfbIq c7aMNNYu7LSb5rezeu+95ds0dnMA2GEkoyAa5ceyJNTTgUKIyUpuMio0VwjJ/PRx 7MJgHItv2Va5SiXwdUx8BQKBgHIPHx4zd6Hj4CSUpU2SyUNCD+oEpn7TJDFfPOg4 MFtMxqifDr6KnH9Y48VY4qWJVdY9r9sKqCXEbwGJQupIYVGh+raUI/DxT4R15m85 2ALUn/SluVqKbY5TXz2bbp1wQ1Vrq8xa+nkvHFXbb4i8BwMAh+/RSKyNDVD7TZ6V MkOBAoGAcqeFeUk5BFlRDEUB/34tiUR4ZwGzdwMgQ1RQ5uSVsgSHpp0ArFZCgq6W C8KtdvCh+hmwzF6QXrjcOrcHpRdOM1c1k90HOnO+d6CoaCDsyR0HSc6fIFYl8CS1 Mz+6Ugx5sCeSa2DEQeUSYwlQppTEUrDyjz8y97RR555E5lof6S8= -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 4194304 seed: 2NVWOXvtwcAFf0J/g0fDC+ypQbnCU7awZoCxcnhpkVM= zfec: required: 3 segmentSize: 131072 total: 10 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:MDMF:araofmg4r5w2byo6viyya3oso4:6wbfrlkpzp2w22keeo5frbjirditr53g5oc3ku35swuwoipu6fdq format: kind: ssk params: format: mdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEowIBAAKCAQEApE3alS1Y+qPUVkfT9Jou7Pm67Dm9hikzQfEbQS+qX7pvdPae H9QwenTW6cfhMmo6vkbc8wqrG6eguctXWgQSG+rQroBu4dV8UD9lx27kYVdUcVK0 hKtU0eyDpqMiTGma1ILQVSM+/e3QZPRK+HmIlwvsOYCJSFGhMIPLekgYyVuxEFe6 7olys2mhVNrCpaXoxubhXKE7dOvBuWBWs5A9Vws9u/11hGpXS96xzJldN2bDMyLS p5qygPyIFKNhoUEahGUD261Dd1hu6g3CallXEXIKl8CEQyPbFMWgJv1x23omjk17 UTtpd4QZsS6SZwVsSZLtg1/R2hDhIN8Fd45XJQIDAQABAoIBAAEQcgiCVS+2bPMs Hu7YKtKlIXVTQGuEi8zzC1qmPOPG2N74k/ifzrqUVCoKfeZuMrg1zEuUt5wDv3JE o2m1WgqtQDHJKi6zS81XQ8kBamBJCQZ84ydy1qdPcWDccKXvDy4uNLxAcLGDX1Sw EmY+n0hfLuYGc50wzir6x5AgtGxldX0GOCb8MY6mY/8mS/OSRlVGrG1KdQ5WqN3Y Min+oTPRhnErxHWvVikUdlZ63vtoGrRUWg7pdrXqnCkwxPH1ddyBkzPIIg0A07YB 8oJux1n3u3hO56VelHX0/yzFNkviH+gvybPWBJJu1krdLcRgqm0QsJ5NV/x40/QY osBcUpcCgYEA1Mzgj7GJ9JfFENkUxvHXzzSK45//DEOiW+rA2KziucHTZkLs4Kl6 EqcrXj+Yk0DbTypOW6uVKdKNPWl6XEkKmMvFxZQrL0LDEwAcqJzZlizII/D7fkE8 KMjExMg2K1lXCt50nqCW8PsyaSVpy0xNrZa7x1Iey9HksRDpBfczgAMCgYEAxain m+M8Eyij33XeC3YoA4eADXAdTSrTEuIvUUjWZ0Us1FbdizwniqYiVucI9AKNzGxN JRxFr6az/oBiiBJQi/5Qav4Iqa7G+4YKHhi1QTJogZ0yNJRQIO/CqXTbvUEN027m f3EwIpQx5JbxU0hKGyEhhEZiUdPReMbvwf2/R7cCgYB0BdBaCB6DcUxMx08AuVNE 8gzX1qAke6vGGdRTTs+/H+K22r50L3MTQHnwxRPXFYF9RD+802xchSPk2+GO93QD ovaNpx90gR4C+gimFf68VmY40mcMi1zVj8FY2SBPukIu9uL2qfAiK5NsqK1p3oxr nMd9AVUxI5tgvyuNyR4XKQKBgE1m/jvNgHkAMSwQvCNA4ep/5WVdwhu16XI7oMvz +gH21NdSLO+ZXuKsrEXbs2XamiyzPIKLz745ScMgA3XFtkUcEeHUGRBZoRJeKxge FNyzILmhFUgBzF8ZhOFXIbW7A+8IPrspV/AymFcrxNUYOezlzHpAFcB1clIZlUoi VAWBAoGBAM/HP7FbfXWQzWY85grp8fVi7FkYSaLdpgZeVUWCXj8YfkTyk+8gDabZ wf/rdHzjVNUvFEf287nj4M8mi4XtB5ywgFUvehXA+PnHJ9+55GeCOQOF5ulma0z6 MwfB7DKFKTfl7bHtGfTYJ14xVg/PtWFBJE7pnOcLFMimPfNWWCMT -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 4194304 seed: 2NVWOXvtwcAFf0J/g0fDC+ypQbnCU7awZoCxcnhpkVM= zfec: required: 3 segmentSize: 131072 total: 10 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:CHK:tyt4hfvn44igztqh5zeiusggcm:ex3gzzn7byvhajithwbo7c3p7qwqxmnactaxxsxsxowkafyjrf7q:3:10:8388607 format: kind: chk params: null sample: length: 8388607 seed: w6uP8Tcg6K2QR905Rms8iXTlksL6OD1KOWBxTK7wxPI= zfec: required: 3 segmentSize: 131072 total: 10 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:SSK:xsmblf4mfbwrri47s4psav35va:amlpbidxmdffbpreslrugvw55nhhwqjyaks2fri52ehshxngffwq format: kind: ssk params: format: sdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEowIBAAKCAQEAuSBNunMdTApYjZptHL8f8fM+QhDWIE3B29TZFEhL3zjRgFty +KJDwPMViLL70ELTThuoSUscHyHAvFivKUVYvbGRg5WcgE9r4V1V/3pbrCvEfMxv RwNES+aYzSK1etulmzdBk0K+aXRQxe2Lm07/o2iQ3n4RjuQbYbaQzfZS9isEk+28 iMLAQ/AOIiZEiY1jGIZMR5k7xutoceApwzFo4e91+Axmh2809Y1uYx5SOk0/bv7Q XqxxXejURpevB4HzAyQiFKQcKz4CEsU6+SKr4dovvXdEHodaFUZzSl64sN6DqKuT i9T5c39KQutV4nyD134tdZy++2oV2RasLSphdQIDAQABAoIBAFMZo5qW8uc/26lQ 0Trutl3LFT7dxOjCTsup23oFy/0bSbvHETB30lcqJxfyVCQT4zt0IdIow6pb4eMK IjKx/NhF/a5l+dcFD8Wduq1QVRdPnEdzE156om05yYyH0JQiRdALeUWr18KJonp+ m8TvLMTC+wjM6X/NeFcf9xdlQ69ZMVEEf3Q7mzYc1L1e6SpaSVV2iTZODn6oGiYx ZPc4djYLwZC5+uC9gDx7pSzusAol74/0hg4xJAl4ak1omlVoDfviuloX4AEQDEIl O8tREPUjUOBIYBMpgDyKapqv08eGMD3Dch7pTYFKHYX6UJF8rjih2aA6a1E+Ch3Q YBijUT8CgYEA/gowWim67ErYzMXCqjmy2NBpMn/LUU+nWGw3inDZ17PLKdshXxXe dNuhfF365DLhH1Lded/nr7dLDd+n+IjMD/FNDG7xXJXPMZAlCwTZK9t817C4MNRI dXKliwACfjKt9fBiszvy0TP8Z9lXWtTw6SEvGNj8ftYY4BFKocpF6/cCgYEAuo38 7BZQ6qp7G5epVyFQ1WiGgs1SzTdxyxdHpVM1Yt9Ya97/oVLaTeerKXOAty8Qh9a4 5pBRnkJBqFRZhvSwXdRVIY2b7k0NsEfbVvk+fnhDSAP2wIVDFGQ6vpthgspeCEZm XkrQry9ZwG0IvmopuOw8EHtmvRVb5tdqzhyaSvMCgYEAw/ocPxI+T5eWFLLjb+q/ HB/7Z6fKs9mdIcuqNTTF+W+MZafU4MPAL6pXs9fUe0L9BOsqTKD83UOrtPI7ZLIz qoDejZ7wuBoiEvw+d3ewCfNzJfoAvjqmA2UEbGz/f1edeEOQAPFYayeNqpeymjH4 AAFHkgWjFD4aRpFQX+vpcRUCgYB3FqTaYPSOmP21g39Ka64aTXtwjHnLHxW5O8c0 toVh9ImRcu1komtRSA5vi5gjWBwJWvz10jMH/+vB9PahvBnKC/28SZW87dtLKNPQ FZPbUBJDKqSeCXPk1Ibbnn0E7QJR7f7zOnc1HdkBiZkHVOYFcmh1bREMq1HbbrBz mra0GQKBgAYljdd9NGkfDCfPNOhwVTXZz1Ro7kU+4wId/wckMPd95MyWvfU8E/Lc o24QaZSDb2VrRVJ1dpgNcSr8e4zVZ+4+urhQNsFItvhRJTVnZq/YFITB3EJCrZXe BI7jHDLwaimv6siVxbcJfwPw7DJaTbCAB3BD/u5pa5NSSVEf769i -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 8388607 seed: w6uP8Tcg6K2QR905Rms8iXTlksL6OD1KOWBxTK7wxPI= zfec: required: 3 segmentSize: 131072 total: 10 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:MDMF:zqldyhgk4msrc4wk2mbek4tytu:nc7rmvy5fhgxbszjyshadrvfn62rqh65euocyb7ltfx3qmzc6coq format: kind: ssk params: format: mdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEpAIBAAKCAQEApLgLaCjEUy8x/xNFZuVGYvJnPKzk2OTDtvDaddCmrzTSn2O6 gAX2K3cG4V+NF/zk7A60ZoYjAqgziqk6ybvR5f1zxTk+zmWplhVahGox9iMi7Vp3 atPcu1RNBSCOj1usI7u9/+fkxJmML6EgNtz7p4W3ZbmA8udv/l5U37S3tEb13kFE mMKrokeTKbW9T/0YDkq1ukYziY5ye3eYbDqduRUu+bqmmAL5JPdb4GQATlDzHtvL 5ouc3if5YErOWJCv5ZbM/jPATNqPbpUQ5FBA7kZWIepcN9OG7wK+0aaVUKS/k5Wg +tTjDshkFAZ2x0MDnVz5L8qiC3+wIUbj4VKw4QIDAQABAoIBADlNaXJ31CyYG42A F8G++yiK6Y07HHWzx75JtcYMqyACgU8/s268JDJkuvkGc6Ansz/HscyE14MiHqQb UT9C3rdi37Z5vrawuTlj/lRYWT8mZA0sTqTURVLJ9e1VsSKAIrdfpa5z7qrSO+mJ 5RoQ8F8L7owt54UZLGXSTTZxuQK2qb/g0DvuWAUXe+WU9F2rv4GVLzvsuQf0AbRw 6UdIfIweIa3GU0u44FEoitXGLq+3O0ZAq/9UoNRmTUSAgAAS45OsKWGhYjJ4qto1 aVig4iNylqD6r+En6hbklz64h29dUiHdwOUc50duIarMnaDy2geCFHtkWAlkWJjD SBQHEK0CgYEAwIdu9AvSV4FKSxDefLbU/jTU/NkrrirRzezylnFc/xsjTYIVZOWb rH3R2myLRowBN/WgZUtAUwH2f4HwmJwcmMG+13RWYI/+Nquid48bGkNTUkhbb78+ LybV3mdH2pfRkbYlHpuZCZpf0lZWlPbH2WgtWxbaFH1UL+LT6AWcKIMCgYEA2wWQ ujP1CLDD9b769bJdDYoLMAkciAjtg2qMLLmTAfjD221tcEt95u7Xq1qdpaCaro6j Rf+0QFaG6BzgKTiZA4o0wznGJ74WDtKkaIK243MrOW/4+qSrRP+romKrdrB2XphI t8mr7bURtDxjEAwZcWSO3pyHdcPWkXejOTgIW8sCgYEAlfGknxsJ4a7HDrl/nb/D GIxLCPWWSFn+9pNAx5xYojIfh4D1apRMbsW7B5Mb0YC+fjeliN5XpY5UzS+FE0Ya G3phSGnJ0AC1Kxz3NohUwlqG7QF/fQODybNEQ6dKVduBkys5s6HZSZpaYHVvjyq3 sQGquVON2wFU7MqK4RxlZ0MCgYBy2YJBAgnV2suHS/RRbox5ExA2yjBZ7USPCwoi UdWSzR33LHSc1BlbRZd2VXghaAx0yHs5s5KTwkvP34R0WSdzwb9VODB+mqD6eN6Z pyG8N6JM5jiLRlpBPkiESHVdMb+Abx6CsZAkgDSebKQNwCp/WZnJhg9KY71aXoAK +yT27QKBgQCxw9dLlBiQLQuqDt5TQPkYzNSY3j2seEUqYMF8QSTvWxS8jJN5ofCX A/yQRxNyunl9AJYIM8GE+4CBDszhSu8CFeoE8cDhHSIMMFOR9dSEL7yXlaykQ195 C2iuwWcu1i9I2hWDtNz2d8hLL4XgEZxNGRQNFhC8VXEwbGL1dizZAg== -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 8388607 seed: w6uP8Tcg6K2QR905Rms8iXTlksL6OD1KOWBxTK7wxPI= zfec: required: 3 segmentSize: 131072 total: 10 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:CHK:xymheose4rdlspgydkzr4nqkre:z3pfrvpq5fdpkoybhdxppwbzrt6ejf26xh6emzlce2sgquljginq:3:10:8388609 format: kind: chk params: null sample: length: 8388609 seed: yPi3JHKKbWaEEG5eZOlM6BHJll0Z3UTdBzz4bPQ7wjg= zfec: required: 3 segmentSize: 131072 total: 10 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:SSK:ezd6kflfw6qfpryquupyjxvg34:dbpmolc6skjs3f27go37huyn2zganyrgpcdijrqlhegvfcwdmvyq format: kind: ssk params: format: sdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEowIBAAKCAQEAoNZxep7/aicgkAheO7wqXkoQY3FFMdTRxZNUrOEQ3Z5AAe14 ill4A8FRMZkT8xgpHzBw6bNDWO4vVQb6G0baM+6yZrzG23MVWyyKkiTjnnUdWU2X 7WxGgnsZF/evK2cLFXDU0oe/CV6JzqYMJbyeIgwdL3+OmqJfjvtldT6a7ZphaOhh i7IJSJFlb2sbdcHWZPCyfPHZFW+6sfdTSKKcn/DfR40lG+TYzVnYQfLQ15D7ZmNP EORKNzHk9zOYZnPXwJS8IyufjaEFUJLAguqVZGPHX212GpGW+jzc73gSC8oDDBH9 lzD1Rc9aeb59hGOL/LtyNsBArAqruG0h6RTH6QIDAQABAoIBAAvAl6Km3xFt4fx8 TAxv26WvokJt6qkxPJHECfYm7PFQqKsrY7kyP+mAVPM7lQBYldqkUr/U3DkxkE5V d+2J0BRm3uzQYvRylI8oskhq/yHbO2WE3LLZzE4o+gStEcTpXt82cyqeBijEWmv5 6J3SSjjBK1nG54/niPV866U1SaNOXKPP4JF48+BKkNaaKXeYlA84cAttD+xmO7Tq W0PoxFny6hiGuDXYKsVlBnCujD18eNx8aOv4T9o27A4LJztklpUF+BuzqVtfhXhd CZ8Y8Tay55PKIi2qv/DLYPT0fjMC6cbNoT3NJdTl4LEB5NWk7KS3Z6x61ArPHdwA Em/km6ECgYEAyyrcRHYRBg2keBW/5BpIYLJEj4WJ2VKbB4jicc+wk3s+uGM9lwMt hMPHt5r3NihmsGABjqcARrLLE0Jx2qmyjWrJX2rfLKtY1ZFdyQ5hKKHg+ubpMpEo DwKOVZQXxOEfJDqUUrI8fM15+UUwgkBgh+dGqUb3IGdim41jIxoxiBkCgYEAyqmh w5pP6R2vzbvfTtauSCzhBQzdOoAzHY/aNq9FY/isAHVQVLf51obO/FRWIJHOGTQn l/OuYKQFZzR4wWIm+763shxmGxOGHQQMPs1JwJoksl9hPJx18O5gUZYhQIluI6zB prOXgyVeLqcm6Ios/gkT81J4XLxlNEKC1BwleFECgYB1GdknpJ2fTZG0nWSjBvsc sOOPjbqshk5RA3bxfnIaL3kxMhI3zl8YHPgqPamrj5HQqyV6oYspNLiT+0JAdHsz w48Z7jGAP6rOPiE+V4lssBFKzHkw6jWaoTCE5vzkP5WBfjoriAwRKyXYpSaWjKCW 9JjnzL138d8GJXI0s05FUQKBgQCFF1ebnFiEUDGnG40wOj4kOgzggy06AP0QmesF ZJ9eYu2aM3C44kVZxBhkj4IsS3SdCqpB2Q8Yej7uIwB2h13wj7QVbR8FAxJdNc5Q 5AJeURxuY8L4yguOWQ26JqzZtCc3mHloX6LNxpmOa8lah3u6rP2EGxHeXP7djhxa 7c0RsQKBgAn4Gd+IRyRU0Wh07FVh/+LuxC9Fc/GbIjxNbdodeuQzuOlU7auUtIId wVAXoiatL+sWpqK1h/+4YGb3xEOd8QV8NPO2mImXr6fWJ5VdZ0aX8DVbcDUCvra7 w7HwiFiwVLIGdyNEk6OuiaaYGyJyJEax8i3GKtEeLOCagh+//eyO -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 8388609 seed: yPi3JHKKbWaEEG5eZOlM6BHJll0Z3UTdBzz4bPQ7wjg= zfec: required: 3 segmentSize: 131072 total: 10 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:MDMF:hnueoftricmvuv4q37m3b5mzee:hl2efj3ydjv2ydjk5ilxb3kwehm7qi624mefyi3ksmh7o5me2nua format: kind: ssk params: format: mdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEowIBAAKCAQEAnBWZCUifbysHRyel1LzvRgjn4gw6sQo8ifcbgqhvHx0GyU7B rAOUajUAzvCGVkUtbQPe2031Sx1LSrXkra+1D2yGXlQ/pyLJixUTwAkF+V+sbF0Q bR1TGztpzxQZV+W2SDi2KFCGxR4b7dN+0yZwXCPbZDYhpWfpo9gHBQAlI9VPEWto 8Q8Lv5iMRUFSgo0redIGWfY0lIH4+2/FOlOG30Hl4Q0GwzqXiSv1rGjx7fc9jDhN vxf6tdJs57MVbUf+l0ZZ+eh2aST3MzfCZUO6r/DIYzzauEYyLG6ImRImXnjAhZ/D i5N7lz4R8KE0A8VBh6Zy+SsjtXjvZeO81q54TQIDAQABAoIBAATnndc/l7j91Gei lztek4idyNxJhOz2yiJjaD3Rw/9+4ULMAtnVrqEsGLd9XJn96FibuEdScBHunKTU n0LA+dLUrB/vNvqDepYFsR5F+lDU4OgdKuIO5kNoZCG3JeO6oCDYCAtaGnNmcpC6 5mpFXomwBm8j+f3cDH4S+1zSHW3+iQJiYkD5slH5nyAIWCYkGBj3z6gzOhCOuzVr 4/WJdXrrxv7zENUwz2PUYe8aEhtyn0NVayCnS6oxdONwUowySPtLoh8G9EW8NQb+ dbBehYK92PL/XH7zIachSdgcRvoXXzKSErhFXdVh6NM3wYZEFVh7yBGWcxN/453q JAWTvikCgYEAxk9VdT03f5jM95yHhBMb7+E4GWtt1iB2hfeS1hH6xkPtmBUR1B1+ +nPc2aU0kv3VGgzIElUWOrZndf2eLNbNzslfECnVg/8EwUqZhFJjlS9GvpmE4BE6 /z6kSt9locLP1GvPllhbKm2yi1uQCbPrqQRL/LscJrubV0awDkCdx1kCgYEAyX2d wCc/HDuvoVcHX0Pk7tl4RklShX3mlHHvWLQMAuiN26edH92p48vscw4s8DBB9K5U 1mqA1civ2f6zZtcIvqoerW9vxwSpqVrDwq3HBTOscuip4aYbIOnMe1pLabjmsrLn tP5rkgJeUx0cbSfCBpQjxUlw1BsnM1qepU8AThUCgYEAvSjVekRPWO22rYXomenk Xxc0fMLFfVdv1u/FZ161F0OaMdP/MpaEFYBJLG3yTTfEetmwShRRZOWyoJCvvVOT 8uiQPgm5efPaZEm1T8uK47W5xHsJjPXCkc/9xNF8zyTVO2kvFNjo9Pq4MUfAiBDP /GN12/farXOMhF6P9rhaB9kCgYAZLtELlhwmLDOMR7NNLdAsJhQJPNrKgmzSOtc+ T+p8ZpJsVKunsu2r4e3gh3IIZw+nRC6oSdFmZtnLtjC39sJKCjshVB81UZje6NA0 wcFxHf88sWWiJT+Ywn/jHur0AL8csI2TKoVJT3B4lNfbsK9oYRWDb+VhLS+eFIJl iNUx7QKBgFRzMgeWnmVHkdMt4WqqaNh6GEwXM86Yeaw3f9WLLKOtoR8QoVQukEWC DplFDDniCfBfD+a/NoC+a5azXI53NnL1iN8t/G7f81xWgZC1y4yeDBqNSyhS3vpo L9QHJeP8AZ2cEsSxKmei5+zHuvpN4HuvndyAXuWh1V6TYSh50gVR -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 8388609 seed: yPi3JHKKbWaEEG5eZOlM6BHJll0Z3UTdBzz4bPQ7wjg= zfec: required: 3 segmentSize: 131072 total: 10 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:CHK:xt3owduddxqodfhp3c2fu6yzr4:bvk5d2igrtlo64kbpyypajyi6bjzrnvl2blcavxhguiupjthelra:3:10:56 format: kind: chk params: null sample: length: 56 seed: YQ== zfec: required: 3 segmentSize: 131072 total: 10 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:SSK:zdubpnm3ft7fapqny3dzeypowq:o753jes342uzajcwt6awhn4rmrqn3cszsth7kfd76zd4ow3yc6ya format: kind: ssk params: format: sdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEpQIBAAKCAQEAu0SG8FEKO2y70r7ymRrbI43J051onWTZGOZIMga5iRApsZxe VGIotwXn3+3SkSCChu7Z8OnTx0iRJuAtIi5uRaOzfzFfDqsOulG/Gglvx5kRDVff 54RXY5b9kejnLu/uqBdanMU/xUmaOXumHgYUdERxKdrAZhEyJ5F/AhINA+kgwexU +pGBtTFiamb5S2gqfxZ9MyR2LZfjbAvcA/PRCGcs5w+j2Kn1rSBmIU2c8uED/bpz lEhu7PTB5GgQubsl+1PYwk38kSqDW6UQiydUKhCVwqXOV4fbkzDVdmNarmPToZJB yfY1R2XNRhLShnHm0WPJaVAq/I1kToKUUx0btwIDAQABAoIBABsmvmHNfixLgZf2 s3nbWPZ4slCKPAbF/mwLx1/pdbEXtNPZlhup97lBk/L1qlf8XLBvpQ22+Uuli9YV HrYcAUT7jSTd5ahcyM/e1lRSFfDckopauU359CmuVKl5GTvG8dVRPYQJXUufdkrr UJR90S1iVv34h3jE+X6fK8kDEPwF6vcDrvIp6Y/coKzIiyWR4Wjc9CXYonLfF4fX 0XsHV9cVNQ3RU9UGNn9hnH88R3f4YCBqJqh77qusUHW+RXy/XoHc9Fz3Lj7kGcJ7 CoYBNwLKU95fdi9TRIQsnJiEoQzIb0H26NDJFjYDPqLCoj1C4u5SF1pT4WaazZvu RtjHaGECgYEAv2mybf3RZ8HA9J6n9mOHOdU/5/q4GwxEj0tsIVAvf9WdvbXgDeLm xuXw0j0N9gfIBnCcrEZ/FhR2uJmQfN7bpA5IDYrp3aht/amavkgDv98WcG9RYJMZ aso5RxI7aAJuJn7oN62fKkcCUS4S2bMrTUzYkrfcaqZLxBRABLctIFkCgYEA+nTE VG24gt40Ogehs17u7GL4xYhB406LyZ82rMcI8PN/4TxUV/wQyaZeMKtheMmWGuV6 RHU2l2o4alW084ZAyYD4cY56nIJnC7AUNXXZrYKaRP11LMNCvtK+T7HE3jY9BE7Y FrpGXbyeMz5G2wfV7KCW6kZ13C58IzLF9iy3Go8CgYEAmq3vYrMZ5Z7NLuCHGrST MkkBu5T/8duYC7QHTWRe/g7ByeyPgqk5lMF8OmjcP1VKbunRseXGDTG8PrDZ8g6l r41a7Ja1JkpVmAbW5a2MWiENIQ7T1BcLEyEX6DbzirlsCe/D+Dp1xNRdKvzwfrwq 4eyXlvi9RfHciDdVBHqCHQECgYEAurwasZRI8JH5wJZ2EoWif+7e6nBIJ9ElWkNy AWo4mWYDn4xammsenSqEqabt+p/aYd1cxvPZqxUQUP/r9XHQliypkAkaE90KNWWn +6ANl1d77BpJpgFDn4EDUeoKDV/FKJQcev2Rf0wla7FwJNh3wICPZMb6Exs5hQjT HlOChbMCgYEAoWGCOHmr3XDxNKEju0R0JVuOod3VkhkNwL2l7P343QOOO8lfkSeQ F++6HxzYkR/50JuYS19bUNS5KPYrJXqsG7HT7St88os8ORcaHtmmORf4UlMoJkwl H+Yi5PY0QpBrszST31bgfqdoHKh7i0BW9ux50XO+51GUPVwXQpayX74= -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 56 seed: YQ== zfec: required: 3 segmentSize: 131072 total: 10 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:MDMF:xps57pmsgffe3kshaz7ynqjity:zexdamdswofh6v2t2wc5estpazeirbynhst4q3k3ffrq3w7xau3a format: kind: ssk params: format: mdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEogIBAAKCAQEAnDNgEwMYgFqRwwZJv5J3wuv4qByLdSfvHKQrLKXUjr2QMwFF 9g8ohHmrV1toiqIRQDiqxr71WxCUMJk/84bBdkAoVGIM3jAfl/q1jsL15JtFPeQz mQoa8hkWQ3GnxL3MAnzEi8knIpmtJkW/npG1L39CTYt33kfUbm7+Z8VKNjHdEQ7W qsRxfmxz4CCkHQAbtL6dZdVbJwEi6Ea80/jyY6ipr4+JqIhtGpksvpKhfhpPnetL AN/aX41amGlOE0EEmdjXi2JhI28+fYplXgNR+sBqsi0bVmQZsRb5dwVpfbNjZftj 8WKEKB/vKvm1UlmOTxCCY/bCW3QAYeSStV40HQIDAQABAoIBAADFFA0j78P6Lku6 xTRHgYWZaiFR+rH6H2iRupC+xHxrnMFTmUesLXPxsZF9ptdAEzuwy86s9EKdo01W BAWsPVna2RgJX6zcqdsy5iAs/88/oKi8bjCr5xQYYY61ibEjilTczo8tz56RCVRt 9ZLPfwgb5XTCYjXbPsXIkEJsq3/23vg7ji2Pyq6GmhrSB+mxe18u8ggwEwL+sMZu 7VFAXoEkJ9Qa6AI3Vwx9/QuGs8W3SkEA2MiEjYWrVZUYSYTdyQ7W5b4qnFPBj3GF gDzsDbNEDNijyW3CJp/SEfqSZm4/3wmxIIA3f6Iz8jZc1Xmo6LdwMwWOm8W1ltDE 3J1CGNUCgYEAthlbkmf55xW/cw/0OIDcMkaiQb4ld5jdLrVz5ZiSUQjpbeO8lIBL epQzSeTNanJ6p/iI0xZQ9kSL/9kLBIiVMqYhCFsnzqXcd0TX59WQkqhh/Fup/nCk L/Hl/eZvDg14QYhBoZBsuWgfgD4LIT2+eIPFtQxv22MJoBWybFy3PYsCgYEA25di GV6OHMhaE4AENe8MFUlmsFq5Pdp2kDySDDFYjriKJ8Z8ejY6XBHcWIkg6qCTykld 4dTkdPg++bbA3zc1yYf2wN8bRVISOUlioafrhGsi5kL9HUIFWxtfHFgDutMQGxRI ROFbXAbfm9t1cZvUpPlT8skoU0zvHKU4L8MV2fcCgYBrAWWlF8Jq/4Wb6KEbXuWG CampNkIwEDzRCMGNBmXchn8dGvkizm0MH/AvmOr4hUL8V3iXigKTZF5cPr9Rr6z0 sVix31b6AM8XqvWwfvfQpm/F6ltvb+ObZOtAkttph5LF93qRpRuuq7fvFQZXR0AY 814HcMJ+SalLT9SkBquK1wKBgBykLiNo9dhDOZx5ghMWztin7kDqVGcA6538iIAW n5pd74comGvITuxbWAYkPKrdrukfkKM4BWRMTMp9T8LNjLJwjXqynvf3sHDQZZD4 OfvXjYHDEwiR5+juNQWZZUMk7GDb0GFLk4L5Uokdor/it2WdL5nnKt9SlY2C70Ur iNoZAoGABeXyJCB8iv0+FoQG+UERAN1q/z6ycM1CKEOoaXlgWbX7bV6C0CNK9NFN IcXQOwahrj0N7cq430R8tmgugKp37R7mFW8RB1B/nx/MFG4fSLduuNZ6/a/vquMP k88UQFA2FX9bV+T+NUIiu4BQwsNsO/vVENX/cSAZWOV3mTtP7Tk= -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 56 seed: YQ== zfec: required: 3 segmentSize: 131072 total: 10 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:CHK:x55owxzhsezfoayaxe7jpwnove:vawdgtqpxyntgy5i2po2twgelrynkfcjgwm7publnlbdp7hpqmfa:3:10:1024 format: kind: chk params: null sample: length: 1024 seed: YQ== zfec: required: 3 segmentSize: 131072 total: 10 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:SSK:j64uavihuouuyskf24dua7pc2u:obcjb66mh6msejksq2b52dhz6vffzxuvxjnoxvc5eq6bp42zadrq format: kind: ssk params: format: sdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEpAIBAAKCAQEA5i4eDHhy6sVcQaAK1wSU0kjdcM65SS2aB2Rl+SI1Y2o2JYCA MdXYdmxkBJXkrrZQ3OEOu/KTms8ovYlQnN0Ayt+9GuUoD8x2kkGrry/bIxwEmTqu kSqmPbiloRytbyfxffo5k4+APjm99AfnvHeOaXlxSy56dzkQJaomI6gtrQqlLLhT /lSPlackJd7Al7umpBQkX5NWBkqyEGT1xV+9Lz7ttfaaOeM8WMG0PJBHy6LxqpFI /V13kwFNkHw5FXaYzsrjfSBmCOBVPQT+ZmAd3CRNs5oxtSldKa3br07LgNRvoj4M 2A05bCiNoMm3NO56zAltjXBtrd/l3ySxdbWHyQIDAQABAoIBABGTdWtt2haHrfD7 48AB40xAUJpfyqGmGAQW/DtfC5UVA9/utTs48T+vrJ52BKF7neaTz9B1qCQyy9FX Nh7YOEqFdZbjZyD3s5kc5xtoK9M9PTOnGbPPfiSp4AnSmwKpGeVM8U8NbtUxiwni faU0Ot1ebtJ3ENZgNtWtbZ6c5an4CEcxTjug8Q2WDqNmLqsGyXRTpgXZNPIFLDSj rwAw/gfJ6/9vmsxil4mnr/fbnSL9/i0XCj4y9vvc9AcwX+l7v5MsNLAh65169Gpe SfkNcngcbVWDSIlSAjAJW9lJCKZtvwCIHUYPhX1lrcQaEPLzcCYnlX7hg5ogEx+o p49JTwUCgYEA6nbV7BXTwuxAUNmog9i5Mz72vydZSyWPDk3r0j/uM1IdXKAntuyw lNi07UcpjjoDXSO0+YtIHCpr5HmE7gV7FDutNdG53mqkXImfkSiywRM+EPF+fOmH ajEsueZyi3erqJRZrsa7b7HEAQQyZS54x1I6viBo2m6nFsppqV6hMN0CgYEA+1KM BOXsb67vB+oXDc/oBEBq5HV2TXjXQK3ZmZjOfcoNW6jTpXDpJXXiQoMWfHT7W/GG DmNNJYytgsNUfsaxTLCyE+oCsqvgbqay7JckKNham8JLR36GT7cV7TN9RMHDTKKi 73iaKLrbetVOgmFLP7Qwy7edhAvdUMbA+uudrd0CgYEAxofraeWtkr7DUvKKu2GW qCrnekLSXEwoTv9x8GzLwM8GJ7lBB6ZxewfoY4Y/TLwYvxQOGMN0Qs004Jh5E6a2 ahKB/zFgBlIcbHLoF1zzx4MIqgYiiZigXi3XZm4Yjbm+M5eyPMjwS4qlogqwtXZd NMGPFhCRWGwbtbOdNpn3OU0CgYEAqFg72FAXFxxrmraQKL1aIfbwYwXXb7+BGB8b wgocTyAX4Izu8EP4uBIFtB3Q4x8M/CKFdH/JvlxEIXIr2BvJyaAWOMaodfwxgo0B Dv+SxhVeZDU6bbJvz1fJRTEXOQY9hsjuMVBsmtnHiLj3NNhtKkfN47ejuD6mSaRI wgsvfLUCgYAe7EGyWS3kXBXEkwRicBjwPi4kwPfxHTXz64vjFKj+NbimV2TZ2O3o EoJk40dOQWZI6GYDJJnNJ807MTi5OwC4rjgxc48mCb44ZBIoY0BUwNh8clpiFCJd 8u3/e4LmEYJe5AFRT/Nuz2xvRpnZqQSQAvrLB60U+6Hmu4ToRMv1DA== -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 1024 seed: YQ== zfec: required: 3 segmentSize: 131072 total: 10 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:MDMF:n3rxzmvfupy7vucfjfe4pkdgga:o6s2fbq5fryvlt2vbno35pqtqj4l6pjfsf4d4es3nz5cubjukw5q format: kind: ssk params: format: mdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEpAIBAAKCAQEAxJKzLV9c2HRBtvz9Sh6ZhLopsZmlSaN5VmFY4IYF/t8KMFfR hgY66UseCV+wFwjjiY8ala1w6daStW2aP4W6Y2TUvgevlnHTzWqfP/Rekeyyj8Gw S/GE7wieXiahP9OwHENyStN8dbWkmvnIrYqBiPyozVjpavYMRuhGPjKpHMNXSkr1 yYpeaQ4hMxosd044ktj6ObE/ajBBiM4l3uhQzwsPg6rjWpc/unGPmXjiwOKB5iO8 YZrUUvCMchBg9oOHyf8ovjuWB4NU2yV1yhNVt+xbN89hJlqzilhCJwHMqf4sIbUa ScPV6AycqUz8pjCqPWOvTYHms4jrDkuXbPiVswIDAQABAoIBAAyyJuMi0Ppkg9Ut ZWbcCvFH0137Dzs3Z1t4hj3IY1iaIjWL0wslvkq+V3wUN/uqmDuUkZ2iMyr2C1vT t7u92gEF1aZJOYJa60YpOB6VHIrTOLxQnERDaAWiPKex7E85hmQgWsTkAFFbW6EQ yfDtJ/YwqsoTWIUzpuQNzJZ6PZbqqNlJXNt6nnCqecng7H7Pt7tNQzQ+WA/WHjnt A8R0e5+fUFUO7M/V7PZe40FdtatNGOv5MIRzZfkVvim/0BScd5wnyhRQU8evph9q wopO/r5wQDlsYIBFr3AuAB+8gzlPC884wdK30+qPUHAkUIvzwYLbDYLVnz96q8Vh St0WxD0CgYEA/nDTsY6A6Bk/c6fj+Aj/i0RP5F1bewED7Xr1el5ZhBFrdAvVSPxf AGM7KG7LCi7nY8d+HZVKljvEp6tDFjrWrDW6gcPmA1Att4ofQvt4BA2djULbtmNU uR2tkx1LS073jK2u5qRyO1e6BCcK2UjGh5EU+4A+ZYLSF/0Aozxo0VcCgYEAxccW tJUQMphzAE+rRaCrkVeU+3yR/LLK7Ob/x8P8n3OwrJMQr+aJWK1MRrmtmzMLnGgo hqzeaibCC/1ncvqKpIw0V8tYxMEN8I8svoeUQuCmUF4gDRkXwEJfheE3qc5aosE6 QnO1CcSRZnod+ywuwTK0I9/OSNr+G2CUHa19GQUCgYEAjWmXvm89JcIiid0dzpTx si1dWcapOUvvKuXT2RbnGYe0+OI6wD3Dbyu3jVlGb3pyD/qoFTkMI0NEoQuGVayN 81hJOCXwiJbfUcrqZQfuRBJtJj2qb0v1oozkE4eMeWaCHyXIt1deRa0ULYqldO4F qQLxbnZwN2rl6X8sA41nlDECgYAFs1xbLiS+YJiH/MPiCOSJFu4rZYbLsteYhnv+ 5Q5GBk6kWsTTXSC+VphpPXbcj1cZVgM9BoSOqLlVISO3M7OFVKk5kpnnae0d7vKK N1w1pUYF8QCZgAyoNQGN2VUCZvlD0a/9NFqWgnzyaDivAbIDTZPVqODIRs+mOF1s kZCWhQKBgQDIO9k69EE6VDVP2iO5emq6A9+9QegnwQlFF7iHN9PaKYFiyHzFueB5 eyNxHSuDrjmXIBXXHjAbX+twD2gLA6+xeORj2yujYcUCVJ5G21/FJ+7PKFdUPh74 IMMuQtmXaJw6zwUUsqZIGxllv82zcnfBQbXzInue2+081BCPh6wn5w== -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 1024 seed: YQ== zfec: required: 3 segmentSize: 131072 total: 10 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:CHK:fe64krzyaeff3d4teunjbetkzy:27hrywwaffqiqcgfkmzwbot3iamotr3bey2l5kaladmdmxuaz5ka:3:10:4096 format: kind: chk params: null sample: length: 4096 seed: Yw== zfec: required: 3 segmentSize: 131072 total: 10 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:SSK:oibebmzc5klthxwje6i32qsa64:4jn2csiis5vv6ix2qndrvegu22up3a6wciqg2rrcbf7m6sxzxhya format: kind: ssk params: format: sdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEogIBAAKCAQEAn1VR+MsUI22ssr2wErUH2SszGxF613S5wZV+MY3I+5E3xFn9 Mwf6CHMiv4sdNTnU2dadtx+lF5S/MP/bpjqt4shLF/71Sh0uNktvixdE1gHm7nKx oUg1huUgVgXV4D8cpDhscZ8CSZ94GMa5JXB6s7e3EtrAMox93P5qoBVYNUKWRTyQ YZnYvVHE1xMZl+1QBxnUBGtHjwmiUG/BfafAqVYQyd2e1hakp2keLmk38GIq62gO 4y3dAUTDfImlENH5gbMiEHpmUve9QLxWBRDqk6VnMc2L1pTk+DA5l5xFrFPCU9yv I5M7rjV1MFL64SA4+WmsjaqCv/VLkjXT4a72zwIDAQABAoIBABYRZiIUVny1swaq mRluM2ETx6dHG4F97EBwqSLJ5X1aVqP+ZsBLqYjEEZr/9JKrqNxnCj8TxfTnKDfs KAr086KGZUg0itqyAfWJKzDTjzgo2UhLYGjbLHa7g2gGtOGzPA9OtU5jXJi/2o1r 8LbLxmLf3h5hZ99YcBJMto3nhukRUMB504vpl7TIRJ2K2Nq8Qn+9oK+WkZ10emsr gSPNH7hcZntVEFSwPwNT/xs75QnFT5wZJ1+RqPfCK+kPocIFsGSmih9XZfPeWVjz QD/uEGcTl8fxvxY9pnwp6EP+NW8QOvcBudwK0qVaWYPE8DtSHlXYOLX81ZfKqKhQ TTITPiECgYEA3FS0smNgBbFjAlIcr1rLBYpJZRCqC3XEjpppu5bVeRjbhEdJ6BaN g9VrgyRiIK0DykV67ebC3wgABsuT8WntWF/Rsm/jdr8Cdyq37bZLP0trRToKJekt hEnE1ogakMSydW8sG5fCWUNrmLae0XqmzkvE6D46WcLuSKBYKqmi/zECgYEAuSCo PmYYGqwK7G9C5AgjEvSeG2FRHxruYlK472MMgR0C+NFVQN2yXqAZWGN88bS8SIXd sDzVPxU9+kfGYcm2HFejQSbg2RevvzXJCMAVY9bvZfqKXtlGMLaH6puUDgwbMY7K qwSIzlTf8rI1yIsZrT0gI9QXInSvro/N/nAv1f8CgYASqKG80amaEdGeqrF/MCMt Tu60PlsIKWsB4JW/qyBc5vwAEcFyhCZr2bEHJBejSMOfZ47ngrlSBe1qpebbdOsC puqtP8h1j+t3iAiXeu6YZ5yn+ihN2ZdfMpgWyuPlCqNKSqXjmFB/GrSL9Dsy5j4m DkiYmlx6qYVgZSPSSRdioQKBgBMKteN9MtuDeLgrFZFI+PqZKK4eS27MoVqBsb4F zSJ2rniTZ5Z9dzxecVzzFsXx1jALfOsExtZvQ+m7ej9StSWjKgqoihYqZoxfZuc8 gra7Q/KUW6k35g1aqQ5LpGXxftaRHm2K0NuQRVy8UeXn0ONN10F4Lkz//ZYR+plC RtlXAoGAT/+tvp6kB5UqyLWI1DNv7Cu6jtCyAm0IebguHqf/I9pTLH2fKLd1vy+e aeTjxfUV2CiCzZSMMyaym9hf7JNR1VzVzkwt0Y5L0DLhrTbDGdgDRV3nI1GLTXz+ Ro+Go/k+Ql0hJb0FF9CQz1kWDVprTooWe3S9llqMClMjBKkMPMY= -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 4096 seed: Yw== zfec: required: 3 segmentSize: 131072 total: 10 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:MDMF:nmtnmr7nerwtcasdqjo2mwnxwm:uol7bxzbeeiurj7e4bmvkvc4izwgzsurrxm326clfbiseuzspbaq format: kind: ssk params: format: mdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEowIBAAKCAQEArLV+zMpxyAS74Gnjm/bgauAaMXK76/Ph0vy8Qj6b3zyoKJKA m/3Tm0AbX9AAZnxF7hkJTfxdQrdAvXv+E7xqO39NCaNxNqO011MI1wgY1FV/+2cH dwITBj+Kc+Rr1zNoMBu1CRQriej8rK68CFpG0OwKUfRbOX8vpIhzaEHtPN4C7NpR UFEUbPOQsdfNsUJzzB6ilZ6vsxd87DzGoS7zbuFQMmBjt7+typiWCD2jEvfXHxcF 6WH3EkMwaxZB1bmtPbrHe9FnzPP3OvP1zxjjbxsvCnYmxt6lyoN1wFTpCECIkM4r Qwz0rTVcgMfApXxg9EWILEo578qripethPURXQIDAQABAoIBAARUr6o/w8j8gkqs ftuVaLznIA13RK3e2XM3CRsLe98+6sBaJXOSKAhN355MugT0VEKWCgX4N3gGM5Zz 6nW9dRC4/N9/uRr+X856Pkir4weBYSNdytyEYzASpQyKJGtAHZbiWhe/WuaV6Vl2 hHccfNzkpJZGExdRglFjZlV0qxfjNostFNHIv3EZjqpLgtBp365zMzga2nBhshhN dMRN3zI/Wysuzemfz6IC25F7Yq/5HAwRKvuaDc2rJaS4Dn78WJSk3TA//8KSxE4r PypI33b3ljafExSBSIB9++C04ORKvhsc50BYPfw/g5PyXIoSzjKHOOuLFwVEPsVz 9DQnRzECgYEAz9iKWOl4z8LzTbBvw1udresxMJ2jt6uiFx1hyV8OXy4MulboMBE3 LLPOl247/zyWbw3FdvnUs3/ckPh406N1IVuSE3LZXw1Wt7sael3mFYT6RVD701VF PX9Bq3xE7hYx3/ALW6k3H8BTaVK44SRCd6PZQn2wKtdk/EDx5YJdQDkCgYEA1Lj3 EP3yA45nJRf3Z/5RxeOGYcOGS3Htdgsom1AQnK5gNUL8lmott1pEcGuyQSq640je /HkoFcqFvybQ9W3TGv2rsYQmShFbkHGLcD9V7XMOVsLSov1MpL50jwG/pTDA2yOc cSmH253V0V/CjcamAONd0+YBI0ycIY2KEnRE0kUCgYEAzw2RrMdQ8e/svx0gCYaQ Cvz8cMjpmoRhohNEIf4O7CSMy2jeP1w3EdJB4TsQi9DIr/MRHtf826BpkwXkIDl6 6vM1DyjfgMBh/gBnfTVji1aAl2L4q2wL4RqPygyvAlub7dFND1AAOSI4NfkRcj/T 8ymHuqRJRjRzRpRQJen7iYkCgYAI3HOeR5XPRB1T1D3AHT32ylWMuQJdHi/QHQLi BWHLxQ/I6DNxaJbi7mWvcS0JvefvE9gGGF3tGnSb09gcgSisFSkTyfd2WmbAC5rN YDYKICLWxmLT201YB37/fgknrnI6Lq+TnzFDmr2PbTfDhCTiIJaF/yzI9aYDV8wK nMFJKQKBgERFAoAThF/5RxZygP5Cr2yytKde1V6K6tkx68LlDct2ayQ3NVioz3TZ g6nZzbnYZye9UxFq9Mj29Fk5KRjC2M1u8deuxBccFwsC4Tpe+YyXcyqE2nyaymcc 3HHpX5Hj7T3pvHSgGAA7bB82/z4082OxiEeUnzxPfhO9YY3eBz4e -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 4096 seed: Yw== zfec: required: 3 segmentSize: 131072 total: 10 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:CHK:gvajllsonkuscfemygbnqhq2re:uwyilm5a7so4blhsaielnf34u2qbaqmudd73opjkgodgg3okeaga:3:10:131071 format: kind: chk params: null sample: length: 131071 seed: LCa0a2j/xo/5m0U8HTBBNBNCLXBkg7+g+YpeiGJm564= zfec: required: 3 segmentSize: 131072 total: 10 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:SSK:ghiqefrydh4yklwo32nwo43ebi:vszy7wnlzofynf5nswobaym4emv4yzdhwswmhoxzwquimn7dnthq format: kind: ssk params: format: sdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEowIBAAKCAQEAxHHC4JknMYBpghJ7/ZyumMb9aDChLKIpFlACpvC+CADLX4zx lJUGu5iTIQcFOi3oPuX6rdL3/d3dhuntUW0Gw3R+Jym++vrIi2kMzMuTcmQ8PjgO 5jNCE/4hKdWSIZ613U0uAJ5inusERXBaU60P2zss+2hvTk25lxkkRtNQ8OQCwXk9 TjWhnk0y0jW1vdjYOMH11uHFcEG7yIcvyKR+a5jZe16XMFsM5vkhMQ9RhoDud5/B jj7AiJzWc98lqkTuEfc4NzQecNn9hT93GpqjM0yC8re2Lou4YBT6nWlE4z2n0F+i 8ljO6mDrwFO8UCs6Qu8cRzsFcfp9WR1lmyw7IwIDAQABAoIBAACFQ1HwYJ/gn5Ge VkEcIC5RdgHHrIr/ZwJ4dc6sG/ojc7vYwMR3B0yJ9qHYxepwjV6qogPB57GphoPB V5qmok+e8xKhfrBlkmXO18oseUM4AXIyGIAC/21+zTxcz4VSnq1ryJpse7Brc63H bXCD/eMZK2EDLCsAnmRIccXU8Nr349ndSmUAbRVXqMW4bRC8qS0d5JsyEvHCbHPD vo5xJCRYdDtxm4uVML8JcAxVFLjvVWe+9XoebtfYDe9zHZfgkRrjTRuzWLda+XIy KZDgDkIjm5bNMnjuw411SB9vGIgbYF8WeC2pnZVuS/o7b69XxgxRtImYDtt1v/fG tBWvVYkCgYEAzUxKAaXxoPpkIcGNA74PZ2F/FEe82yJ+GB6vM4nlF3k59OwGvaxE Zz67jGMAZRdIbagWdQ6Tc6FdBe5bp2j34LOxUVFwU4bbFelBFBSZtFXIIqkCzjLD 7QWmLrMiHvrsND9Xlu6+esFjQl7dPb6y/wmFurAY2rPatON6Kd6MBGUCgYEA9PW2 d6oBDUP7kbvaJbhzlIv7DyATgt8oDoDFVOg45ASnvuAprYEJ4Mbs31b1SsC0UQf/ Y3pllTd+DTLjvKszCUIobQl0j2g9mbkR5qc8rrN1DaCAavxqNiSgjPLM2XKGy9Gi bxMjjBtoqw7Ln1LhyhL1I7qd0QrdT0zY/zfU9OcCgYBHCj0ZsOiaAcsgey9muh/u cChfRiut0JO9mPCbbv4dT0+k1v/GJpRM/cI8ZA3A7XucpmuO+gpAGvhrkv2YQpRz 5vpW3011OdcaD+r7Hd3KL1zf0Ygs/hgaLrhAtK/79GxD8B9JFThIlh7Y2qbINPMP maXy4fjXxSDLM2QUlPPymQKBgFCKaZA2yVm/PHvSNAuq9fWlgMqcVU32aYk7NaaR JAN0tGLB+XIet0y8my1jvgryCVeLNaFToQrK0Bsu3EowT/t/USNotHZiY76jZwtb eUxHnPj6CL8kdxeOO2uceVYVndRt/OZgeJOcf3Gez7x2195FFWzF8xXEaLemIMLp bI+JAoGBAIiVzbyeGxhf7aCqbILLUTGxb3yLf+SheEiMdVJ/CR6bc8uNcKKH0E3y 7ygPXZpAiajpuSfqA9e4rfTxHJbinHpV1yxOkH55QpPm/4h3mMupthqgv6dPN+aN PcOyLUGJiFlXKXKHMeG4YHHHLrEOFxbKsqzMrgLWANmg7c4dN9Px -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 131071 seed: LCa0a2j/xo/5m0U8HTBBNBNCLXBkg7+g+YpeiGJm564= zfec: required: 3 segmentSize: 131072 total: 10 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:MDMF:unhxjfxrkuzsuds26fq66slezi:bqley4l5syeh6vyws7chsveukz6rytkb2ihrquvsvilbumohnrdq format: kind: ssk params: format: mdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEogIBAAKCAQEAvCBlTHQVDtHf5zH4GvRbJxJPXfdpQ9voK7dqmz/ggmAS5bzF z78igUXiZT1Z4AR6wWgodyYnXHcl6nbpvSPBJRrVD7eI1dBmjV3QUBPAoQzTYmzD HFTLCR1wYQLS768Rrp7lUt0S5WHU0a+MxslcWNHi5DrhxKWUQ0eYX1t91+86l8nS B1CfhEI2stSYlTFj5vCI8DhSYMxhZclqgSUx6fz6RFmqRA5zZ6ENzH0D4BktOFHx G3Tx0Y4LrDqU2U1BmXBpQKzabJwLwXTwghVGI5PgaLEPwc9Br3bOmVsj+PXH02Lj 4vW8YCd/Q+CfieivfUdhVwk2PNWXWyiCpyNcwwIDAQABAoIBAFhJqdCd/83zL+2b 9VCNEgQ9oxK5zGSE3So7C2Rtr2rwNJ4tn/X1wPdDOVMC3l10LLn8rFTyinFqF1i4 UsypbXkA5THZk/WoNqCsgNk70+ChGMktuslef+S4tKdKgHzsv9MgDgZ76uTMq2h2 xw35rQWgBqfOfGrhvDlw7bD+yoneFRNeiIwphYezgi1m1fhtQFgAT0JhBqKCJWPu KrB55XmZgc0dKTvp8Bzg9/KnhV9zstpeScngbjb5gNgrWqSG10SmBeApYQdsKD3t hFwUEb0To/8MAtOt+/T5lM/MIBBoU/wyy3Z8zQgZZePgwu7NmAg5Z66p/MGWMfDP ZH2Ayb0CgYEA00xMPiXnt3NkbRl2eByVJN9TmPATawagV3yaiwZSnwohgizb2osM tkG78UAr6cOA5QSg8KNHCddPO0Wn+U3VVyua9+ZH7u1gdjH4ApnpQ2jJewmfVItJ EKeKd8qX7m0RjU1IRmTsgDveABdTgcDdHlc8D0KjuZRTHFnHt/ZzLlUCgYEA4+0m 1vliiIcM9RxTEOiEZsWnEsMJBBJuD6Ukg3Et23rZKhIqLcCDWv/fb7X1kaDVPYsO qPr42x5b5c6gZq8AV8UhvDqxOCY1MuKnWOzAGleQTUkx+BOa0N3LNIqCxSzvNUek e78qMhNLnXxXuRiZtkOtG7w+3vlhc+0exqmaRrcCgYBTmQc9O2//A9eC1qUphl13 tif0BWAZYwjDNFhMktbTd4WkZC0jvQntffpmy7XUCfaQJZGrQ15SxW3ijH+Vwjab A3SPifuBy0bz3Hc8SDqi4e19EWSJZYYl4bOGC5Cq01ozZpUmzL1JSuZdcN0oI+8Y Fvl7LClsvgNX3ymGXipZ9QKBgFfcQB+YTJpSbPVDcOXQq9EuGeRKmHwgWprfTv74 LvQvG+1yyR2P21LF1ayrWLlFZU3u/7y12h4lSsmAaCaNCTXMQN/dRBlf6RvvcRD/ WmINJQwVzhRSAljHVqCvUA+P7bn9HvOw0iQxefGAUBSC3iX7WoyZeSbcvOtCGZ39 HQJrAoGAC/jVQ9EFNzwXDnkv/pHRj7i0N+3cv9XXCX19bu5AKmylBgTvgj4xZ3R2 KFwmHdXTAs80D4qZPkun8XiFw2va56y+2e3lcWT2MrTKhs/dwrodYidOOg7XGCMJ gKrOJbH7cf4Ts4VTksouepDk+ZAdbgFcKye5wN2FajvvY5kSK78= -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 131071 seed: LCa0a2j/xo/5m0U8HTBBNBNCLXBkg7+g+YpeiGJm564= zfec: required: 3 segmentSize: 131072 total: 10 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:CHK:zdmicwopo4p4h4wbfcbnwcrvyi:6qn75anpvs5gls27f4lybisis3udvjfjhatxiny7c72bcbtuztia:3:10:131073 format: kind: chk params: null sample: length: 131073 seed: /N4rLtula/QIYB+3If6bXDONEO5CnqBPrlURto+/j7k= zfec: required: 3 segmentSize: 131072 total: 10 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:SSK:r3q6op5itaypcvquahlmpudidq:6esnusmt5xfk2kr7nyqoleb3k22ks4efmltbaah7ldhq3mfz455q format: kind: ssk params: format: sdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEowIBAAKCAQEA4s2Oe78bWzPT+ETNoPRutptgpkUKYL+xxJTAoatGSUdsqQbk NxrKfoXADGGWz2OXsraRc9ucRjyjlbjFNVIksr4Ql2EaYPv3IVdPXK6yK9ZRHp+x 3F/sG+d1fY0BdXfMyafiPHGC2v2E+Mfa2PcGxEjA/3jb5pZtI9VKew+6d6ANVEtS eiRo7oFA2Hdczo3nNJGH0I6eBO12ifP3e1kAWoxrRY+kkx7zp4fx0hgxKl5YYJhp ZHhW2VeX8KNv4OMbDnUyQCgTiEHFoSvDCLYNJXwAuOgziyDMafzuYI7ZvNtm/yPx mpDMQKDF86d/PWTst8CNCXy57inD/5WxtshRtQIDAQABAoIBAAesEyPGmab0NR/k hfDQRY/mtPZS7UOyE5IJXSdq5MNuutm2t1DrSb1ncJE92nnoATYlFmNtFgqxm1Zu fzjrSMoFUDuqtLEVfO0Zp/N3UzG7Ikt1dMqpZQWM07mvfV5LP+tBAymiLMTHLcpN PaI4X9Gs5Ph1ztLsgXZXmf+/m7Vw4IrdU+ovT3qkKumBEMgOWn/AI/WqVaWpBgF1 eGTPP3pr39cb7b3kPJkPCTyQgKHaoeRvX9JrpGXe+z4BNqiZI01U2cVsAF554AKH Va6bXgzuIgXbVEFBskgQ18pN4KcZyBx3pGQP0OEemH28Zpm9r0EjpmZKWDhq9BIJ RyAjzOECgYEA+RHYAi1qlnPVBIr3ZItxSVwjd2+Brg9xqzvkL/sBQg6LVmjxuFNs IXYzZxZzcv6Dl1Up8jdP3NUUFaZLMKvjaLx732mUgGSesalrqshM3j+wxYrsmpQk J5hHgpDYrKTUpj3BsPJFBq+E39WoEnMnWipLIimGUFsF1bZ5rT7kc6ECgYEA6R0a lIDBaboOjLjY2JFdTHnqbCg5+f0J+bRUAby4IYENe2eR/omdqt5aPbksdDoP1/nL 9U2uAjMIdbYL7zSPd6bEIVT3Q7UPUbMhbqln18RcPLvjdk0YfeK+eT3Hg4XZhTqL Yilmntaq6AlCcpZ2taL3FaCXUvme6XRGGEUi5ZUCgYEAs3IQr9jqz0Ta92/rt4vj bdgtUVKMGszTt2vqBkuQZ3g1GWd4p7Wq1RzlAeOh//qw8ioQk4sYReFanBJ4X7On nwEVOixGKo7T6upGQQAYqZM3l8t0lhYfSkujUcVr5k7HSpJ55zNVWfDBCcdUVR6T /pk0EoPaWjCKLqROW+xRCaECgYA8ecptgEGtFhG0PDg1ZvDXaEGCsaToz9aIq1mn 4be7KWm//AyKBlWbAHhUzvdTZ8S4eRuKlg5wj6DAOOw7sF0P43m6U/qZ3B0PSvN/ a/9+oHh1YSEPjcyuy+YyOe7KlizqPVfvrWHsDzDjZZOReqttT8veFn1rj0rEsd+F aFo3SQKBgDtU6oa+9PU6s0iffu5Ev1GRT5fYE6QckfPP91UzOSIOqVscfIEzzKvL N86/xD6Plzzft63kQExQ3nuJnzhsf2lRNn3tedhT12GwHV2PHMinI35cDcFLzamm Ew/1GbD3WvZ90IPb6p742gHoDqYUy4CjVCPLl400h0sK9KEIt3Xu -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 131073 seed: /N4rLtula/QIYB+3If6bXDONEO5CnqBPrlURto+/j7k= zfec: required: 3 segmentSize: 131072 total: 10 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:MDMF:wmnu2vkxtpdducouhywbdxiavy:tbrrl7peldu7poep6u2bhrnyfiixnzzgtj3bwrvfbfoedrabknnq format: kind: ssk params: format: mdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEpAIBAAKCAQEAys5wie0saDumz0H4gXLEWFKZ9iXAFs8R0S95CwxyQn9EMifm zlAuS45VKipB+ysNcZPStxlyjenCyb+jKNg4ZHD9DXBEB/iDflGIuJ2IL7byY4yK DQfk34/eiOms/TsiNc5YJjYQ9vaov5W46vymFhnOLxL+i6CnFHUC7eRSJPcjnina GhopcwN215sx2j4nmad3aI/2d2sAtlVoP1inlZeNdbp4ZfIe8YMBwb8TofO9ga5s 3+4Hp5bfhiZJkC/RfFlZypSIJi4PJdVohPxfTIFCYBF6DIPaL8rkxPzwFcFDgiCx tPY3JjSqU72586e2JMK+vygKMFS337YFf0Kd2wIDAQABAoIBABVAb7uMTmh3w8GZ LKTH9Xo54adRCmF5fmj4vArj2X9NXcSRuNZqwYcqWZNLDVH4D3cU4fJM6NulIMPK YJsRmUsxKdtEJeTd7k2I1rZdz50MYzb6TacS6jFhHpUjQ3zfuvR/dG5AoSN38nPt CQ0av362Yow9Rc403f0/S7jJAbCcw2vo1UZVeUGJKIsc+ALANrsCOf0ROEToCh5B F82zT+qGZ1DlDnCyuwUEcevWkYqnGDNoECfG7QGSFDqq5CcmLgsW39SdtfWBcyUs PtJlYaH/xvi9W6Ncuf5YIrUv4alkZLJ0XBgvuajTttsuwMOpN1SOSs6xvoT4AsUk /py7ek0CgYEA6FFs8usZHywVZGveO74qwJTLUFe6LcYEnTqDxGuVMCTeVMf9Hp1l oE5CBhBfFe4HbNEtUw2ZqJBQDkbEP1E6oz5Gf4LWlmnEbMsZBqgd/UJZ1hUwe33q 1cNdC1kw2O6OeULXCBzt/CyKZKGSjJ/JLUza0+Ha02TUTSEOGaFylj0CgYEA33rg m4rTpuowGvJ5ukWYEnKfsHoBJKtZfWYdpUnopWPs8v8tE3zx0vyD14FE83ZHzGlx xxdYT/AdKvIQ4EB5Q73E6MzNAFvxbcVHFx33JwjaR3/bG/EWs8DgGfkOIeDyogft 4nVQuKcernxMndU4pu832DBtbpv9rXI6aRcV3fcCgYBl19xFGZ8ntTGjlk4ULqeb SR9gFzU8/8PiEVbWcrsyIdd9nzZth16XyfbTpbWpbXG/2GtgL2QfKzSNLaS2hSuJ iLFrELZ1teQwNVDBRE3xSncLjLp2SJr8HurZIL5zOxEmQ5D0s4n4tKXuu439K8cL nteHb0l4xojzTvxZbBdJmQKBgQDa8rg7q7fRQIAA5q78IFLtP//UFrQoCPiUMwe4 eMDFyTDModS30yHZZCyHZs72+Fs/mc8vD2AmcUkiWibOjlxAUhwpOP1f7LSMp0sP Cvyp8bJpeopgxcNIOR9WUvvVlV4iAUK/K9D6GEGnEYC+4bevVY+Q72FHjOzskY1I iKWT4QKBgQDRmlhPCytajGNxHBKF4WM9iqSeBr8FVD1Q0L/OOqGHd2WaOPr/mK3X Po0/Ruug/l07GIuTO+aDClxbnjSnVIQkqJE0PJcS7uefQSDpikL/UXAZKgO6fBD4 pOAT3qspB/zHsfYhbcqhJ2wWNgcl0OBtcFL8qa7aZXj5j7xYQWx9IA== -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 131073 seed: /N4rLtula/QIYB+3If6bXDONEO5CnqBPrlURto+/j7k= zfec: required: 3 segmentSize: 131072 total: 10 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:CHK:bv6qthmetlhdnwc5tfjqamp3yq:ehz4ttd4g7ktkxvbovt562wfedc6jgnt5c6af7wxgp7jbwfwhoaa:3:10:2097151 format: kind: chk params: null sample: length: 2097151 seed: uqWglk0zIPvAxqkiFARTyFE+okq4/QV3A0gEqWckgJY= zfec: required: 3 segmentSize: 131072 total: 10 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:SSK:bliwpq6aqv6np5ekwyzd6pzq24:y2pbh5xp3magahwtbhypx3gbgs7bw4guuvl4rgygv54vlx67jzua format: kind: ssk params: format: sdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEowIBAAKCAQEAjbfnWiPN+x+qVaQBm2BqDAlffuX7yL8H5/WbFYqDVhUvewd5 92mHczn8gPkatrrqfDQ1uDAT+3XN93ZNbLXpVfApI/JR9Z6xbnjd4ngrCI3UenZY QdaMgNAYh06MkErLWuLiqNFIRxssdc50l1w7PdlTETjPfMkwQxrNx6NQYUaYqkGL Js2HJg+fvqY3IzZtYlbKjTvo6X1vTk2qf6EkpsVd6hm5Gaec9eLDumTGHSsW9e17 vKnDNQGcEMAR/XZLHPoKta0iJO4gEdg0uUolzcgAAxMbJFQmYDg2ZemEFp3ESHe2 MF/CmdJSXvPa13D0DdfGFq4alsuIDx4a1JTo5QIDAQABAoIBAAJdD9G0CSpoA8o7 2v8BY6NhwKL4KPPXI8WdlgGM9tXHsqwFmuYib2zfibOI9AYaJfD+WesBekPWWiIH ahEnE4YoZDdCQlWrWOAzydeOE4GoA+Qq9xvZ/SvkzJPtHnEFnlCcuhUAsIjnDh3E 3LLtidtlNXpzDRrSrChiWQ48TgnhQi203dLAQHQ9SDu1GDux3RdnPnNFDB41ipjZ dGGPCOdIuEpbP9B+k/vQcr/xBUPhyghW9ysASn2I0Lnx9Ge+GWBug/iq1Rt7x3bX SO/qY5iAVc53F1Fp7tzPeThJyudZexakfNYtzOL/fnQ8WmWLREwNBwMGCsm2+FWU V3yKVn0CgYEAwqfRG4nvbtfUTxswgH9XL1yH1aY4870wMcQ76PWxT/4Dfo2YtJ6f ZWjTV1Jock2I4171HTRxApKMOhS5OmlgVUj6sjVxtPOSyM9kj9vzWn2ZHUvCOj6s kJKoF5a01IZng7qzo5dgknjnRMt1eiFI0Vhpb8R/nNDCtoDullqSlHsCgYEAumFJ bogBLfkJyMtvK1RzFXDEc35vE1O65lGcW+Kbo2YX7hlkKy/BmwBtrl1D8xoaJBnl +ztKcBrGXGta2B4n95PY1u+IEYom42koCyu5v6DFLmpIQ2JvEu9yIiTtNFTdYYyx sQHG9dsIOXAsBOwbDn3YbFNXDhvQmEcPLhPuah8CgYB3Om88nPpJPG3QnmjQ7C6s 1dJlrNDJiqIQeY/wmz0mMAJX68cTKu2bIeABZnqPOKqWCj28y7hEyRqXIMZr3sug sXjM2ytwmJjZ4x5Hd4PRc4jrhtHK90SfsRTAjhDo9AJHj34kv73pOaD+ZFjqm6SM hcjfKs63cK8zNjntYkDSLQKBgBKAmfOZGNThhjEi0PRyO8KDIV19zbUTeNhofac5 hc3g0rtWVfVbllK25iyLIbW+f53Z3FTme+tJHSwLlEckJz+Ss9ISkWV8W4Pz6n/B ZX06jpifAHGAEhrFHoV5OPsa+ac3emRiEshRaC2bjyMl9UGpCJUoaNoDtN+JHl6q p95tAoGBAJbsD8qJ4q1Nf/76a/CSR6QRjkvCiDyleC6wQp2Qn7hOQwbIUwb0qDHP ab2VcrW1bhIHCL0Q90VwD3lPlR5V54aukmNGJKY8cUqnNnMH7jSF6ER0y//AowrZ 9ZRTAHV2uZPr4N8lnc3daoX0FaZl3M13Np7vCmRXaOuppbEEock+ -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 2097151 seed: uqWglk0zIPvAxqkiFARTyFE+okq4/QV3A0gEqWckgJY= zfec: required: 3 segmentSize: 131072 total: 10 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:MDMF:5duhoqd3bucjahftqmhyms7dbu:avli47dw754af7h4atwg5s36ffiwbxzaw6ixatwm6nyohsjie46a format: kind: ssk params: format: mdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEogIBAAKCAQEAlPJxm2YJfiYklq8Z/UAsKStUXkHfOfpaljHsn/mCLvTN45rT f2UWUhtX+6wFiBep9Bq3FC9jofyCHoIrcT3RsA7W1X5OgGZdG3AsvPF/RTaKpBtq fPD041dOuJ/ekjCEtQXtPNLRVETsCNkYG/9A5ixZl8Ge6OmELFQpHDMSVx4PtOPe tDn4srKQhOMSEoOzngHf0dUtE+atdLUUs+gwxGaByFOcd8h0UvZYq/JcQUKsPkTl rvbxFuujFQnhcIshhC1da8wWANcP/N3dSx0wv3934aoA58ukb0VzV21QFHyF5Bdj E4Yf5CboLT/dZNpIEmvokcGg5MHEEO97+I4bswIDAQABAoIBABsZRwEaXeTFJMgD gIt6Zu5wky949ZeTTHLiD2aFmyFW6bScwRj+98Ildmrz/6ekgofGaoOyIYLhsXzC ewvlzuYktQJvsfGbbholXQZdO6YIh83WrRehMTTBeDGP6IsZZ7OVqfV0d6BIz9bG RKQnWxPlgsFg+Tvv7FuyTi5yvkX6C3g+cGRXxDHVE31kJy9aXz1nK/NEFDzbbHLx PILDgS8UUOP0/mnGyfHgcXNSA7vhhiQ+FKSH9gRQGNImX7A+/vFxc6AzoQ63kq66 ygiDdQv9c4+O0f+X3+/cGyaKafz+OWKHg1UeXGUv4aTnYuxelo+RwV4dtaRygoEm dtF01XkCgYEA0ATGNf0sWHpxkdpCHEgaxLge+m0OerTIZKNzVQ0UFszsRCj4LvK7 Nhvu5rCY9T2C9CpJkgb8VpwGtBBZEFW7Gvfg0C5xQkzDaZ+AhEbWiSucOH3dFPGg 1IE8WV2oALu+IttuKizhQWyjSpfWa3taRiuZgWemEalLM5vC1qQTiGkCgYEAt02R z1g6BoMt2OExNMB9S7JyEsL0X+Jnt3pMa5tG99lHl9hc7vqxgb71mLKb4jwBi9U9 tFLIbTGJ82dXlFHsM39KU/xvJJiBlxfHwlfRF9t1amp68oBJrMyuQASJYs+3XmQN GAm44QKSGdCaxsdBOuQT4LMIQJKjv9bKE6gG37sCgYAlTTXd6JBTLWHALcs9FxD1 xa6IaZX3GwP0R/sefUHk9MpJTq9ye8RmZ4vngjNrhqQ89HhM30PQpBnvoB7Ydwce RuThb/KPWQSRpDB/h9RgtJlG6AsE/m9ArAwOWmUN/JyT05VlqraZ7Mk7Tw78JxqB CsB0HAoDkMATeRLvOmzmQQKBgCg7LjWD97hWMknXoyUg2l8y2zai81/YIUtz3DIB 8qGTXtNE+aC6BRuk/eJ10SDmarB2LQTW5oaQyOZTWDWFhYIH/hhQ31P45Ph0j7Nn 8sx5rluc4z82SPVUNyp11HGLhYOCEh2khJ9eIRLpZg8azIZQaMx4fuctSCNi0Rdf WaLhAoGAB+9gxfCGBv6EN9tZxPZHx5YTAjIJ4Y82ZRgMnGhijzpdLvS1AzZ82tj5 KDv9+If36M1jJNGSQIJ0bNIOGbZkaqa2gHAYb4gPzROct/msPq3/UacauNiUutCn Fu3UjLZ+kbmLgwU3vM+RGjup7xbshlSbrHFlnnrBW/dFHJNXYgE= -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 2097151 seed: uqWglk0zIPvAxqkiFARTyFE+okq4/QV3A0gEqWckgJY= zfec: required: 3 segmentSize: 131072 total: 10 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:CHK:n7ogyjbo5jigvxgel5ll6q4vbe:3yjb3zq5hcdavv7ruefawal6euyvjx3lx7quslvasjellv63cxya:3:10:2097153 format: kind: chk params: null sample: length: 2097153 seed: BTBX/ampNfLU+ox7xipBGiaSbgC0kcB8Gy7BkJB4oKI= zfec: required: 3 segmentSize: 131072 total: 10 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:SSK:l2haupf3wtffxnzmhgwrab5hbq:cux7vo7b5w5fcqfqldt6l5ijiskdcjid6rlcz7bjchvmleb5xq2q format: kind: ssk params: format: sdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEowIBAAKCAQEAzZOs4RtK/VfGmRuKSxE5vLsWwJtV0T8lRJZl7Rg3EwTrDB1K vK7g9E8vsWmNphYSpOe6gZ/iOiLLMB5Qhml611luN4VOOckhRqJFmcSdIPjbmw/b O5z1985q14iV5ZjN2VMdfG900+GmMKv/h513BlxOC7Vp0BIZ+PAj19JyZXLm0pbn 475i2D/pAyIkzWGsNPU5Wm2GqEqbFAu9knp7G8bx5CF5J0RuzmEMJMQcnPZ68Kzq PRfPmgeevEI6lBRLxD3QpUJL2BgT7hKMKInIxP4GIFyWN3hMiDtgxq/2UHWHdH2z YGl1j0ArwtPLK4GT1PQ24vORxw1iHdY4w66tjwIDAQABAoIBAFSfiQci5oQR+VD2 Sr+q6BL+CpgfeTyI20z4Ah4OnUEpgZ37gtPXwwcef5nuwt3O8T7LmvUX/RaEUxLM L8acrfHuwNV+/NwBpL6ANtlc23eCqVeTt+G6s2+eG1H8ygN4mqfutFEQSk2b8f7Q FoBbO+802PWt6FA721AjfgWt/eQvIhCON4Jt405tCmRlOQ9KLJmbCyXsiB4+gSks iQUAPZI2zoj3L0tAd9J3KEcSRCYrSRTv5I2wBCuQtQVDXWZ5rCxM4T4hoNj9JWU6 kqKEU/nCQnfc/01z/jR1rgrJpTnfTtimqoc1IWC0sEbuM2Q+O2Z/szmN0hRQLCcz hxBduvkCgYEA0Qet8LyVH6CzVLof40VcpDLu3er4qPHC/Y56FNmtpscTUh7QtC46 ecUyNkWbxw2i4J7J/6x2nMNhPIgf9p85Xx11bQlAaEmBAEdTimp/v3GBxrY7vEn6 5GFuIo0PkY7cjtvk80GsM3iXELaIg92qRkSTf6X02WydJHXiFWZ7wwcCgYEA+8Vb JpC43P6LB4oHbZmVEShjmiy+kKRurGFkwOGrH1cVq70x+78EriyrMQso0Or07L9R eiv/sMawoGgpF23UeGDWAr3aPg+ZNBeOaH9RxrqPxR04zkBge0+j8fppQ8ynAPnA 8OpsO3FYZNFBi0qUizRxklE/6mw6iTUAkkvIdzkCgYBI2r5bW782CNK4Qy7+DZze dgofOth0Od9WdKRERCJsMJKhWrAvPLWQ35RCqjxDQpN0aqPJAxlMRiTL7j4FvTVH 24KkAEd8kbHuoO2THs9rsGolEjr7w2U42GSEklnMx9hDyoyf5FHalrtATf6Cx22j lB88rGEMrviOTq//+XpFXQKBgHQCrkuI6AW8rGde7KlN2Wg8ihiigXS4r95ySjCu S2F3iR7HYN61V/zBzGge0kHh4dWtGmgHGhxkkUJ4fGa1Tu/g/vvoa0WpfliIejAg apf9ov2ax3ASLeLkAZEgZ5y8Ej/a1VKtUg3Z3ncmDOOYC/ZQxfw7wA3OrPJIH+lJ TifRAoGBALkaj07vfKfnf1oKCsgalurnPRF5/zaIrsQUQ02W4ZKLDPCa+0jcAPHy LoqUyfmGy/+Q9QFRGVTY7enh65sRadx7CN3TAqdqHyo1KIN63UaJ11irGdQZbgrr 6bNSRq2kPWGyWtURUP2oo4q9jc0iWfmlIO0OXihdf1P4C5Uxc2GT -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 2097153 seed: BTBX/ampNfLU+ox7xipBGiaSbgC0kcB8Gy7BkJB4oKI= zfec: required: 3 segmentSize: 131072 total: 10 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:MDMF:im5wulgo7r5kaqlkwexlxrkw6e:qztfutuv3vxg6dzi5zn2is4q5gzzavvh4sttlaliyhz5oyi7ihqa format: kind: ssk params: format: mdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEowIBAAKCAQEA7EmomKj2wzFXeP4lRtDJtToKB4s5mP1VP+LP4XESDux60i5q OjCMqXBnyRJpd40mqPRKjVWrHLmmZuc5+aVqxXrcl4TW7ZLBEyR2WI8WuTaFnEeI gJr8obAKiyfIeCrace9IShGhtFNuon8JTMILE3y37jPzhBMGL6OEMguucOi1ct7y Pg8Odd6aiJ1Jn3OYa7nf69PVwam9FWvg0Er8jMrphI+2dpNETLkyHylZPTkk/t9e QUBSNW4sHfUOqwoBLYrw4B5xDaTNSsdjqmxdz9rRovBs925GwRCOEzM0A2VCUkKg ZeuIOnSh+mDhgQUoY3CSAqdjda0ba2U/mRP4WQIDAQABAoIBAAo1jAfti/zmy75d 778hrAdtJqwJBU/xiS6Jlp0JYUP9Dnj4ma0iNh+njEptJdqtIfmclYCKDiq/cDvD u5boYWa/LlsLGay54ab5tTXSF+OgghEcq56P48C6LhhxW3lqs5XG5o8RuDSGAtKZ Tc3v1+HrinhrrPa8yjCCfoaZh9vXsuc55Z6Svntr5EmsplpTUrgUq8QgYk5PXpV6 ERadGnNOKlP3rpy18BxciEAdCFHBS3genlqfefIJ/6DSUH6lrlPWpGgoUC0YIQ6i mbQ3PnRiqQzLbKlNbNRf5QUQAfcqXSlEC/puwzj/5TNcM6gGgAaJ73X9gh4+xsLL 2WmgvBECgYEA86Sm4AR/+yPEl0famqO0ThcEoRd+oDyxMn4KPVgIqsgeN96FZA4A vGQmPuGlZtsvvW8qQ0XqaGi1E4CSdkGyDYiUchMmuHTXRn+FVQRNScio0txKICuc MHTy8av55F8GGa3m90LyPvuqi/FyRheV3dmJ9Ou2EJy7FbU8o4TNr4kCgYEA+EWB 0t/gMEUg1LLNx1EH30kt/5ivK6uJUN5nyqrtKT11s+5Amri2zOhS4uiY5L1FJ6ap uk2WewmyVZl90xnHyb0ggznjhM1Bu0v7538anx6cmk2vztxVtVp7QpCPaQ2zVRxa 6lVjV7WYwxMCkCK17b5C9wGtVrZInuxKm0pKflECgYEA19XPwvoZihg4iq+rt3w4 OUlo33BZy4eYjhtb5NX875XSNzoYPvesrTenLeNlTEX198Hn1aq1KoM/jiRDGyG9 owGQR7IxhgxzvM8xBYyHD0sES6+8tt0LQ14G7hKkkCuh0tPcnMSgpyz4+3oL+o3g RKT28pJxOiwuC9/+9Pir4ckCgYBtaaRnHIaefziSxCHv3wQLISMGa3F3W2dunjU7 mcxeylke7LbH+POGpjQxD7Shyc+6Q7a1BhB1NLbFBpnu+IOVoqW7bz2XfyWitz/S q689xK3bSrVaArw66h88HJ02/PS1Y4Olle/r7XnfLneIseNfXOQCG1kax8aFUzkl 6r2doQKBgB4L13+ad44L1O1wmL5bQbXilhK+y4ZCeI7kmArASXbok6bCjf2KhN73 rw4VTeImmWMn16GWznvekJyff6JkG6f+RMUSeko8GT+Q8A3lb+99clJxUL8E548U GT672ONV/XbMLaIqGlZfblZ0qElrI2744IJoBr9D0uP1hvxDz1z5 -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 2097153 seed: BTBX/ampNfLU+ox7xipBGiaSbgC0kcB8Gy7BkJB4oKI= zfec: required: 3 segmentSize: 131072 total: 10 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:CHK:i7vkx7yjzrtlzwnm66a7jn2dwq:rpz32lhxxu473pbze3c4a5yrsy6yoabfdb6v6o7plv27w4rlwk7q:3:10:4194304 format: kind: chk params: null sample: length: 4194304 seed: 2NVWOXvtwcAFf0J/g0fDC+ypQbnCU7awZoCxcnhpkVM= zfec: required: 3 segmentSize: 131072 total: 10 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:SSK:guphc2riwixq5yh3hhelvznxvq:cb5gfzv4yswoqct3njn4irc7j24q2cm2byqlxuawaxyjrrvns2yq format: kind: ssk params: format: sdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEowIBAAKCAQEA09GH1R4NN/xGCfZG/AsPeQ/UW861L5FKvT+F9FWmEbotN5Dy DsbBxeduMPEti0z5wZWXbuSrHq6lZMD2MxJ1M2QL5rNQ+eR1Rpr8LAQO24PE9H9X TGJO5aqeIo5M/GLFNGG4sDCGmwkwMGetNAtTFRzqjq1i96GlWBIYclTRLj5Wey92 EaSCO+CWjyiYMaYloSd0QJCyfUipfrtA9O/tKBXqDq5pY7g0elCCq0KIjyUBRHx4 UmTcl1Hpg0S2i5BpkskvJr8M6foY88eoWxmYLw0MFb7ueeh+3e+NwpSPsx0lP2Wc B2zTT/F8ClWoHR+u4YqCSOmePKci7qRtx4pQKwIDAQABAoIBADdzwjCr1mASvi87 dyfiqWFTIJAMVGioi71xlNr7VSeM6uuCGax+ohnyVWmgqgCu3S+tvuA8IwQ8SnZP AeUq7t3OUkNKLGfPRFiAmIXZZh5Xp8cuUydfETKU8SMwx7zHCsOE1bniakrKJAB4 E+LtGAoN8OX7RE551fRxgE7mH4EQPNwTpl+1aqAvl2DOukXjcyywW58zBJ5VuiVf OR87US8E1RLKAJjxcMeh/J2hJV4KY5LIzViWBsyQH+3E4938qLubXlKpZraXzoYG STv3PTQd17LTD574P/1aFAirJlAobqodbwkQdc5sUcQjGi4tcK8ju8UPEllmrpuD kVkfQUkCgYEA5zqm0feBtoeJa2BxoNmjKH1p+zaA3RRps44hDnriOrZZ8R/xaa98 CjhVJMH4lthlFNXDtH+2VgaMs0AdAXxQfhwMUTUgX7Oxivd1OG7XBDT21ITlnd14 f9tBy6J5fRXGkyHRSZ3ImCRcgObCA0BkPPXQtdzdV48O6pQudqEgQN0CgYEA6oKN Ot+p3unrLUP17oQqyEHnCcPRrOTQkdVOZ+rGeQm/2bkzVOs36q2cWJcXrOFh+L1h OKnpdTvAYpgiDsBt0i8CybXhUr73l2c/uO19z91VUiFVLr1qNRcW9e9bJNQd9opx E4NbpGzFINo96wB1c9oVOJKfBKzkkiUpCOGAAKcCgYA+u1bO2AtE7fiGPSAWt3Tg Y0YBdYP4drVGlWS6fPQrYZV9KWFhfs50J1xSIJ3EruidgnEZ4xwgsp4xc09rO8LK s+lTjso9rI6aWRBgQxHqfkQI3BU/gvpSFbX//RBgsyuwdxhElJ37SMIf5nr0Tt/i +f2pmUYjnxg45ALHBGevsQKBgHhMXkyMPeTnFEhVK4yeah/uhqlgtWe+vSuCQ8VV D1k54hu7QJTYUQfm5WQgpfl+aLaj14KszuDftPIe3qG4nt2KViDJV3wOEI6vXWXt FnQSM1l9VegzLI9td87TaWr8ER7Op/D9mn4/eeQ/cDHkO1whzG8H7+EDHAHIZEN1 AifTAoGBAJfmHCZGx+d1X5Rtx87YgpWzwVDDXTveZ6CNGe4hUNAEVQCNgVzol8Ha o/i+J3IqLSb5KUclhFrnH+fMYelJm3JQXFy/3c25cwnQLTJV39hIbREDrErGN+SU XvQJSQDtqXtCjWILMbSq+xL4fvUYNSqtoqsvYwV9XdkjG/pfaHZt -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 4194304 seed: 2NVWOXvtwcAFf0J/g0fDC+ypQbnCU7awZoCxcnhpkVM= zfec: required: 3 segmentSize: 131072 total: 10 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:MDMF:fyqcwoevvyytv6vwchxcwvkcdm:fqdtjlkf2yctrwprgu2eei5n6noufnasi52qvqzhmoalaxgo3zsa format: kind: ssk params: format: mdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEpAIBAAKCAQEAtptKt0U5+0WhbXHsP97E+R4e98MMZ5zhSIEJf4w0SX1PN2JG NoO3CpdtmqZSHA/QdiMGM18ThzK2RU9OfU2ltAsM9VGFPjE6qVGYu90vpYy3RTVj en3dXXVix8ib2+6vZSKx+BZXxadeK1AMFEKl6XzbcnTfTgd1JsE8pImgHhUGXH+p eM9h0MxIMXvYdGeG+OghjNkPUveC25jrJO2ZNv1A1AxAX7Q1vNgFPrYe7AhJNlfA QYq0TJYXqywUYDlVxEtA08CRM3urlFJIHrdYgQ6+7PkGqQ1V0nUdosqHeXxAknrI 4U1WAObvl0zGHSG6UzfboLxPKHqOtpxyFIrptQIDAQABAoIBAA1CGdr+orwUt9Nw ybzFbuXA2HIYGPwjjtPV0pu5Jp64rhoQ/5S7sXw1DGyFwOvFVixi/0iW5vMSV9uR FjDQ2hlyvUkegcnHMewolCn9CurpMZv2dAzpEJEq5wt8YZyb/Y4em8St0pzzjvR0 G7xo2Q+qf2sVrepGGQJsVfvYUOYoyXihZA+INZ2Rj93jhFHDTs27vo1sksj4OWDx urXat3Q3tA7AN/nTzdKPOSoslqbctY3qMtJuOWi26a04YWpa+QYp9bWEEKuKScRl LbTDqj7GgBEC3ODeWyI+PoQg+Q994H1AETOZ8bDQ9r+C5J9cC7nXOKZrpF624zbO 0ZvMEeECgYEA/z6I+iQxjywspyPHvhbN2EeDwEjCtGrSw6ywO6YiOLy+tjjxZp77 drsxc8yIfy2DAN9L/zg4vNYD7AUVFE29/h/GcPRU09VbcMUzlP4xAhs3FKg9Jq5E o4t6Z3kJKVzFjkN38fuzMq1SsIw3+ntlPCqF4k681DabW5sF3XFaLekCgYEAtyWz Sid2pfJoyHudbIWKTQZwXK8STw84VE8KNCviEGPooBoBSs2FInIgdW411DYQQyjY PtLakwoFOnj78LxBPElq8NzVKRRLzzUcQVw9uXTZN0UGTf9ztauZ/dKysBjKIyzI NWfz1+SyfND/BFwQlAINLMpnxkgwjLOk56wMge0CgYAScfJ0ISlzrz2K1osYsY0u k/xxaNCpOQ8CFPinVtoiP4GIqZTIVbTWX7CzLZSvnBpbdceIKgfvnYerBrL/RJ72 PlWY1A9NP53cCGQx4Cyqek0AsSe6I93R88Jkt9pxosKkBTwlwIqyntPa7kcdUs1+ C5ShRg9fRpLzi8BgwFBEAQKBgQCYqJkYb2qLilJjAf7HLUyJRZu09cz6D0Kxq6xi rk1hwhVuFh8LneGiQ6TgnTvLJkFJ6arOOu0r8QdIpP3DvPdXbA7ys/ANrLhAABIM PPnKMya31hYaP5rQTDgwhUaiWBdtWG+NbJepVhycw4w9swuygz8+HXyAnz2wmjET VqqaRQKBgQCJxWxMire59h1v1SMAf6x7eZlNMfBf1VTmLmQalt33xCk3egdD/lHx qS8fZdcBgxHcVYZ2vWxESyfyZ3Iy/dglRIKRy2rmZIyKxp2yQNURXuEkKNQdXLrt NalUR6GCI8RRgnZCHmFIB09WQ9BFnSUzbFhoLOYQIg3Dm2HoA495yA== -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 4194304 seed: 2NVWOXvtwcAFf0J/g0fDC+ypQbnCU7awZoCxcnhpkVM= zfec: required: 3 segmentSize: 131072 total: 10 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:CHK:d2hbvcmbex7fm3qu22yj4qnkh4:2xwqxbawwgn773hht6etox3oypvqqjv2orktnthfo2e7vibko7ha:3:10:8388607 format: kind: chk params: null sample: length: 8388607 seed: w6uP8Tcg6K2QR905Rms8iXTlksL6OD1KOWBxTK7wxPI= zfec: required: 3 segmentSize: 131072 total: 10 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:SSK:cugmepep7hr4ori4mvbkkplac4:5m5v2l3ul2gmekwefhahep5xvi4o7frzr5t735ruioy66oicvf6q format: kind: ssk params: format: sdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEogIBAAKCAQEAh3dRTejzms9mArre9F+NB3B6BmFfrOo/fytWqAeT9blc36bJ x9ErWL3PhkhxJBaCQd0QxwKbM3gV+41JIJrrwMCdw1D3yi2aAqPo8kogQg1/MOMp v3LF2OcuzY716WOMcd9KFJDgIMd2dQpO0sEmxsg8r+PDS3mL0ZFSMHScpcn+tq5W of4qhCFEjkvkWEtVI8fG+OcA0JAQJHQDzjNPuYE46q8VjWwRSSt/5L3U4IwF8IDy v1w52Q2Q0pdO1OAOVh5FxZ4lybu/bhQENQGaQV0ViBSPtYglei7fYYkGhVEpZlNr 3VfwUtywsvqTMfsHIDUhgycAFqyl839wPal/oQIDAQABAoIBAAFJ7dCXcEXfRkGt ZHSfuhhBhi+sWnwrKFp934+ursYuMsd/7ziC8W4hNCm7Y7QrsKaOw1uZVz+om1Du 2azBPI/zZSzYBtt6DpaK16sCcIcgWL7u0lVbcq1rGaNkAkvrogjtTmeerzswmpxn cjOIGeXXPtiPiqqbp+xg/Q6v9VOQclw128X3roNF8WWtDKZBUuWAcYvT2Zot/aNf 8KND5bal3af2m4dEdEGK3d9cvKuPsukCWkYecghy6K/vgce4uhA5I+C/NYvVEvWv +6Bu0zNNT1ITlE/adhtcdYD4EClV+ShPWr+d304HOVLZD6NWJsmuWOa77ZewgmT2 jWFgOaECgYEAvPQP99gKb3+NkU4TilgK6vDrlt3qMsGTQKgPwB+hTJ8MFFPKyA1e +pSudVtCmchRg2E7YAU6NOnSESuMvXwAY/r2i7annvKqgegwmP4dr7K7xEwXILic w9J8G2mAU5l6XD/55kUPIyJRE65d0ver4j9krjH8Ok3HksL7Y5NCoKkCgYEAt4ii 5LYOockPa/coaRFNSTfaUhKc9kii95K8iPyRMx+OrgAZRYuOAL14j0R1Q5gt/7Pp R86SdRFVuGCL0Ec3NVwZHAeJZVxAv5I6vpkKQFE+UN7UAp79DY7Usbo2oYNKLbnz QxeEKdFHGz5YzgZAT9plDG6YDzszdltxoyy4KjkCgYB7e5BX6yLevN/6fqi8d08j PLDpljrwUpr13R718nXKCgKt4hiaZkqUvcfJQAulTkke79MKrD/expOWzvwZ9MiY jjDDG2otsO6HGQNxaFhkMw0MeqF+q8cfHhYnH+pSN/HECbc5qhX7YvjTQNdmAJ+e qskUIexw+dWb7rq0107qiQKBgB8KUOJ1mAvswVr1NjRu5K9Zbucqlrlgl49fldtl O/l1gOAUzDFi5OVjJhy061A/UKhKeU3XthVulRzV10+me8Pei7Cd5bLq41iDFsRd hcpS80MiLKE02n+MhJR6dfrjBYyuQmI6e/PGnUwSQ1q02/OlcCmmGrmYvZ9q39FF IvChAoGAJCqcARPYEZ7bRbUiWNHw58C12bjqA9+Z1aIBmBSkuyIFQF7jDDoOpzON e4njvXv2tY0C51NVQrnNNqa4Fsb/HEE7mv/hAApEGBZhcJIX3nKD1o+FtCIMROe0 xx7f3h/IErCNOmoImhrv2A5Cxi6cq+ZN3LtO14mPdkq1lwY+9i0= -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 8388607 seed: w6uP8Tcg6K2QR905Rms8iXTlksL6OD1KOWBxTK7wxPI= zfec: required: 3 segmentSize: 131072 total: 10 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:MDMF:ynoa7zlnwraylieeahe3ndidf4:ukho4xxl5f6zku4qsknk2aetdzbkqkq7v4ebwiqwh77mdkyw67na format: kind: ssk params: format: mdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEpAIBAAKCAQEA8nnT38dExgYDxNQX/t0WtAGZd4ibBLy2OpW097OaJ3ets86R qWNH5LWKwESEIQhRAM1bynMudc8dVCPRRuMBRq+aVJXeq7eGufhptFvtzIFz+/h5 sehSd+D/t7puCQw2uCA3E/xf/QiqVhMrqnBZKAWzflrFlIxabgMjAxST7XxuPk4v HxjrwEdBc1UNwGMJ1trhMWdWrP3FgBn4BIA8YOszmHagsjheLBmI+QtVbtk9EySd +zTXZdGSN1wq/2WuO+YWBhLz3NW5dnFn9OCTlBYqzGStpAD3mATiQ6RNVoH+hUMN hNcepowHlrnzjLW0CArh21oFReznFg88eEpYBwIDAQABAoIBAFH216Wh/P/5YYXD 8jaPctC3Z7Kt5UT1K55jI9DFj/r+bCPHVJrPOiq4KWZz4rwtzP/56yjkxZRCRlY+ Y0xUiQZlbsRgAuzF9Y9gxw6WMqy6J8RJio2WjGYEkzx+kxqQ2+Bi12t0mNf2eWnu QgjzwFeUkcfJFFfyF9FvUwMEDjss97odJa1TRP183kBDSm/yC2dndnXuS4HTqAnK 6ybRDUm+3dg3um0wAoe0xZhW37OECO0nS42GK2DdHW+Ln8Uuw+sMWT4lBqq/5JQT 9pNyUPqvbxkV7Y40JPbSFHG17eUBDCRtc3G91SgEn/MY0iZMdYeqi5xaMyhknWBi CTwRGkUCgYEA/W8qJcXUoo6TtR3y1XO6vC/gUBc7zxj6UaQAK/gw+m6SUp35Q9Yd pGP2t37T1m2PMcWl7CWvG/ctkuY9B4JvaAGrfD/6XeKdnqm0qOt5B4yy1yykfJ4X hnXQ0Nv0nxWJ2Pi/+rwG7tLnbd3mU6AQmp6Kp0lS9I+Rrp2Q2qnOIC0CgYEA9O5D BpBn0mpgy3d+55vZXufTSN5ZS/vc7WRcWOGCtVPHD80k5uNkGiR/XVu6MmFWeRdy N8XeGZl41scKnKtqaIbr1tWbRB0HthSOQvJOSXk8cbEbbxfchUsBlW0LNmMMQIC/ R7388vtSdnIOY3gcHprfNT0W35f2KxhOA6BLBYMCgYBMhl2WrEbJkv286b8ifuB6 5IX6CRnxLdyf/EJlBHtdkzexpKvYtPWcZubff3ddvxVG9SRlyvc2HYvwWH9DHjqf kCmEyhjCcqQffaTkgL257t0tpfhA/MejvT2BY3lY8/r8vhfSESaSxLJG9YMP6zw4 Q/kgDD71Q8i8ji1oKW/pPQKBgQChvwJ2QEC/vM5lL2mX69y1huSJl4Ri4FW6U2+E po/ZzRSFA9VdwEan2PhfH6crhApF90zPNhUA1M/vDgyc/7pKgucVvYRGi+E+xf5Y iYlXjf9zmSDj0V8oiyrlkdg4t1os8pje+MEleQCxBYso9vWi5GWI0+naCJFhTjCe xmkknwKBgQDLVkyCu2lqLdJXEpEjnaYitlEFo61g8U1509LxnqvqjQCyBUZRf0Wt xc+QEPNIlbF0PmX9dBFVazq9bQ0bNZk1C2Fp1EJ/62MNGrc5PzNuZjXG/abfX+Zu uYxLWlHgLHuky81onVX0+H2S1y2RTUiBe95xwIMJu6gFEB1iVq3a1Q== -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 8388607 seed: w6uP8Tcg6K2QR905Rms8iXTlksL6OD1KOWBxTK7wxPI= zfec: required: 3 segmentSize: 131072 total: 10 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:CHK:ccxkyfl2qtqyhihduarpxbdcci:e64be3i2t25selbpc5y2zj443gkdo65chs2o4tpqb7axud5lnxta:3:10:8388609 format: kind: chk params: null sample: length: 8388609 seed: yPi3JHKKbWaEEG5eZOlM6BHJll0Z3UTdBzz4bPQ7wjg= zfec: required: 3 segmentSize: 131072 total: 10 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:SSK:zlyggm5badnoshujgf6jckdaxu:cjtasx6gvdlsxn5w6wf4xltilhhqq5aowc7tblsewkmvzplkjlya format: kind: ssk params: format: sdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEpAIBAAKCAQEA0rjXHJzn6mxSQA7dvD/ZGq2Ot8GytcTkwcCzqsSS7zWKiR5l 6n3krs0tEkZNteHntn90oQV/LxB3yzoITuZmuu+hT/RO+uC82NV/F1Qa07mJlfDY qxGvSfWVtLgvpkh2ruvO2N8B1ClRKcB3mHMnIU/ajHY12rQZhKIOqjvhrqXPI+We S0CjiFbAfYViaCsb1wabjTxg84HhbXVl2GKjujecYhiwxINp2BsNAVvuveuEOvBK Gx2CpnvQ2QWv/Y3mZe0o3TeCy9xAEpMmZIxHjFfrLZOsCifxB1VXcjxdGiVpmhcn 4gxFTldCKVwXdjz/KzyG/gQcKPRiVJanBC9GJQIDAQABAoIBABahHwjFmOpF47dZ YVqcCLaiuNbnCEgY8vATv7exEI5703rSNuOtzWcwRYzW2/WSYw3oNiAstPHa9OJw QwAmIhYlMc+iTvEGPYGTu+hHcfIW1L2zdbE5Xve0VfVoakWTNpumWzpTCKE+Jqcz MiS/CQ68wp2e/D2WZb8moCiL8bqNhfo2jhuyXuq+QJqwCFhsgNN5QbDT+SrYCTGV sg0sudx0bcmuZ9+1RATuBuzgBLIVSbwyWcAtM9YRoYo+DKdlaFCFFAbQdo/G+tnj +scE2KjiPE/mr1//Wi338gbjQZ7OjpVcAE7iBBie6Yrj5VZ23Kg0hkFQYGz+PiHr FcuAsakCgYEA3xxtkdDHGNpBx3JmCb9aGqSCO7YhMBFlo1CQhRVVSlNJwSv/KK0g DTYyhwD/bIqC/jny3Xq7RCkaVgmQHw5hMoOBqmNuqzAdBHHUL1xvzYKMjaZ7U3J3 HLSl9xbweADUO9foGOjZCJuSs0bgL69gUry6CfdZRuX7+fZscYChq2cCgYEA8cji 514ojQpIpckd7TGyelLQbSZp54YKWGN//WSY15Ool2f/bmg+DVSAt6fK9KEvfuSw GF1+ZaeYDg6L8J40NMvj3zwA9B3o5Qa1VoNuxC2gZosh+aG0GLsS5U8/EqMFwuUU 4Z6LXzr87o4wh7yGR+2OsXuyHghPqg2pT2QJFpMCgYAqn1uvR5tBfDCk0Y38vrmP 7W2Tyq98Z1ZrZLC3O+QXVuH4LVeJhclhvMDaWa5yJePwfVGQTioIU3Hcjecih7S5 2bWjv2sc+QwSFUzb32Tcddw0E2HsByoKKdiq44783eutowmL+K+9nTrhVODvOynD pJpF3SMJEFaa4iDFbjV0cQKBgQCkCHPYIAtG1IlA4FcLSsIZNwHsazlCN6/hE3AL yyneZ3Djd0zV7KbciE3jS1Tn7kq4vhGyFgvgj3kbYEcUcWBdyU3Jb33+ICSW7Jwu G3EUaxf9ObtNDqWOeaxyIfdaf3szJBOsldFcRDrA5XqLPB2lwsciJhdLRLw3VJlf ITEBPQKBgQDHVvKBfDKNKVhqTFkycRAzaWgtoVljEIfjAm/qT1pXthhaB3PY9NzJ qiyd//b2I/8mWIGdRs5BqzY1hbAGvC0qDGLYZ29GFs4J2s3Vxz2jc6IpZmIbw30v PQKAnbv2N/7Heauqbt88TPY1G3dz4asI65gCD/7SmmtqhGqCKnZWBA== -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 8388609 seed: yPi3JHKKbWaEEG5eZOlM6BHJll0Z3UTdBzz4bPQ7wjg= zfec: required: 3 segmentSize: 131072 total: 10 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:MDMF:gxq3orvuhm4mtwvw6y56cugcly:x77co4v4zj2oicd56pqyvqzd4kcdhuyatx6itovmdwyylcin4xxq format: kind: ssk params: format: mdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEowIBAAKCAQEAvJZwIBA/tV4AONyhJyebcdroWZgshXLoo/BdnTDTi2Vj5S8K AtGOObP7foNLz40mn4faSV1uSpFvAKKlNWhZoRUlkyh0hMIQS2WqHUkGFrJ/S8oA HvoZFNaVtVtXGlDI88wUSo4dR1Kbm1480t5M62RyNos9FmG5W5n6RffhuJZBUPE2 hAuMrk1wbJDgdz9+DI/n0KAi+kCPpktBjN0uTgiXH0xFJAImONLVkSFGUVdDSKAX IKCr2LPc4ZGF38gbDspg+NuQR3Zk7/I4IuGevZwXh+YMgpL/oAPdYZe622yb0lkP qmIIhpMw9iMs5bgJHiXKqwVl7DIYp3qiPirN3QIDAQABAoIBAB52B4W2T4QNEJN0 Ak0KbGTsi0vayj4tPFSBQQMLMvA4yp+R2dfGChIEofe3tL7BOYmFGQYb+EeadsfZ 0ujt16RZf73lnUR1pXTNkWJfkYMzBmAIhb0l1SKfh9jzov3A1LnvBHayaRpEcUFG 7HjRqAem1tKRP6cQ1oQW5UJN8qtXcBlXugjlQGu9SoDKG0wNQ0e+47WlOqFribzs XVJK2Ei2jH+ICeDAzNY6KlwsOv+GtSxdCc6hSapVRFKPHf/A93iglSKcFziAg4lh TVrhLwzDrfY5RSXInipYoP+cBO0Bo3LkzF+yo5nVkdBGlH4CTYpjsompgwltECWE DnLLuaUCgYEA3Jf2n7AKjb5LGnA49Hg9lrN3kWGsmJv3AXmNPQCR+4752lcWKPi8 h8Vbeef35VjgrIfoxIGNf5YwAhZy5BHrz+IB6z5icQ7ehHnV/YsqnoDI6uwJy9lQ QzoRMj8xlcXe043+ok33sMbYBDQTwWZ8XFdl8BCDzSwzxQvhz1F5/ZcCgYEA2ttf Se7r4knHGKE+un0F3q6YpSFSxN504W2LO/gXpBDBA59BZubKqcCQSxY5QVgf7hYW Wi7JtkLIYBDOaNKRsJIWR9CtIxN8YQpR0XQxMcyYW7JclhGFpS7MTg3HKi2imfmF 5yVZU43BbDg8dqnl4+oZpDjRuXCQW5csrO+GJqsCgYEAi6EY941LsNrR2SNNudje SyTAO2LTCCo42FMjRoi03sFqf3z+RuLjGyGePHTLYf23AR5qBPBoK2labAffo2OA my5YvpnXX+7khIBGJl3PlVK5WpIbxU+B0XvQ5LhBX6dG2ywXEI8/iELk+wwnsRR5 BU5A9QrPErC2+DQEM+FD0XMCgYAKY5V/ZfcOk9/+nFDk+2BW9MTMOeu66rBzrwaH /zvoDt+Ks3mgT95Y9ooi9lgbcPp7C9NdzpDGtR7b6JBTy4Mc9aJXIGHHo3opBRtj LPfU3FhzKeFZQlWsxK7wGZlVuDrawkyH727xF26SG41LOL9v9UHoWMYj3mML5f45 61jb5wKBgDkaU/ngHe1yZHcp4xODgyS/d/GX1cxcsdjHvpX8wr8g/uqTReGEFblV cJJz246P1u0ISESiLcSDJEaIPEXaV9x5bOHrUfotPrLR9K2q+gxpx7iYZwA8LSvr EuaRLe0uq7CAE6Emd10jQw7CZFkAnlObMXHLsxGpoU7iL2qHU3hb -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 8388609 seed: yPi3JHKKbWaEEG5eZOlM6BHJll0Z3UTdBzz4bPQ7wjg= zfec: required: 3 segmentSize: 131072 total: 10 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:CHK:sghm3tydjjaadmiiuda3flhmne:5fqqykrndg5kydmhwetwqdzria4ap475j2qfmq2gmklzop6y6tla:71:255:56 format: kind: chk params: null sample: length: 56 seed: YQ== zfec: required: 71 segmentSize: 131072 total: 255 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:SSK:c65i4qdeg7vad3lj7aa2nimdua:wcjda3bhjw4zccgjeof5ddovqd4i6h5ab5giecvj3bwiuxlxmyrq format: kind: ssk params: format: sdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEowIBAAKCAQEAz2I20p+CDDfJxxNVFjry9xBzepPa0mCsusjrd1KBfvCMx3oE wudp9YJCClLGhwuATEr16z0endWApkktRRratS2RGn1bNQ0cuuzjLnuTsT7sXhwy GXPzjrUiYMHbIuef2Lrby2kuKTP7UCqpQ1VHS2rqaQ+iwIvah7VqI2haVewh7/vX 3CR7BhMCFKAmTpNS4+lKa2UPbpXvRJ3kwjfkpoSYMvrRNEHORDUnQ627X1St5F7p Gytfsm5do+RpcI6DYDPLjb3BDNR3dwgJytxX2udsHIqsUu/wKLK6Y1AL3201C04V FMlrhkeRFc6XfPzaRocL/y2jCL0Iq7b82FikAQIDAQABAoIBAANzzBvXge+4IgjJ Xpo/IvpP8Mwyl+r2pwl4/MqAuh0l3gIYGuovtgjbQUQwupNW8qzSdqOS4eaYvkqG X1WOK+PmPUsmmRUaAcdu2B1W+09xD7hFWeQolP5kL41SzNYY6wVNnwkU2CrhbaaY /zZavhQYVIypMPlpmpq1v22VeCOEAiSiKicn8LvrDuB7HsZSV0mlHMq0Bjgmi3tQ P8CCofImp8OnuK1+Zpm5hyPTKvsZJ1j3D2TPOT9R8bjBy8NA4rFpZBOfrQd3dX4Z 7qWFHfn5PprIZEaRmEgido4dn8k8wRRDR8IFoxZcDtv2PjlSMVjaiPt2Tse+l3tn PxlYpUECgYEA5F5hvzh5rGFjgwwhNnP0NFRE3N6/Bs/b+K/2EwxUqQvD91XtpeM+ aiLKqAhY9X66PzPgZifDDKwzybGbdlyEYXS+dl6qwTvPusPoEIGs+gQW3BGffMai 25jlGZaoklr2ZOKaC1ANsXsGmDHWuIcq/IzLTRedrouOqZkaJM6kyjkCgYEA6HnU ktCyk4ughuJ705e4UXuaeGLsrj1nOBSZ9asA7QxqTFFSi/DKHXjoxNbUd3FZVJ2g c36knYO4VsYMwgaDORSAezF6j2MW0INkULQBQl03M9HmeqT4LFMW6Sto2NERIvxF R6T2mPWh1IP5PDeoaVRkuuqLcJrk36kVzc8ZyAkCgYAKoMHXzl8LQLUK4kOhbyAM V2elB9DIFmBcYIQJOuetvlhuaFdZAwxikB/yVgEd27n7OwTUfEE9k74NQvDDP2cB yhcbFyjHOWtfe8KPEhnkwM/3ifJsMipeIe13lWVe+lDBPTKCGEWq3tjduGQPzmqX uk2z1seF2gTXq8JluCA/MQKBgCWTc6GbbBHfMr46o2srDdbV1Lz6uGjdce6lndEQ p+Co7hGR33bRH8otven7E3KO9rJvm/yvDqqLHOOhtXQzG0jBoJbJA5djm89uPWux /LYeXQraNZfDTH3VnFFp+9N3z35JKmWPK3DD2zl/b2ylTmpgArwXpxw3XSEtsmGG xRMJAoGBAL16UQxn5k0B7R/bzawd5DzYX96m51DOi0MoZPEYXaOBCMY2k5wsvt7F LsOBVUZIz8Q0vAtYxC2T6uZRj9Tgg0ufjUq3uc/KJGvY3ZBBC7o3GBdB25jLzF2f gmLCud9YOcaZpKzcdDx3eHRDe/cWUoN9d7F0A/b4JMib0dV0Pgr9 -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 56 seed: YQ== zfec: required: 71 segmentSize: 131072 total: 255 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:MDMF:jswgkqtxgcqw6xrjivthxqi5aa:6a62r4e27eqtgi433tyjd5ijounpuelqd2rgnsz5vascudnp33ka format: kind: ssk params: format: mdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEowIBAAKCAQEAuGqzbMA9HFkXV2SDqVlTEYR/Q9+wvjzjekq/SzLK/A8tMWb9 HAVggYq6vNEqowspiVVIjspkXCPAxE00vlKQOpikcdKM4euG8hi259oFgBQFx2ER 2HmEljXgRmkf2VuYWdoUA2ApMQiuoXRM76WXYA6rzhWp4PFdmEiv2zFOzOSWwM1J MnyzaN89Q+bgDjl9He5MArsxsgVOhMTNDWHmvRY5rSWzoPv+9Bs7Um6uO4nX30wT kp6adeYfhndTz4WN0vimYhyIWThXrgaeTIzDRTzAObG8V8RW2l2z7g333PNYVFUL Cdjzo30/b6otmHgQVfb6a3icMLisCG6J4UbWxQIDAQABAoIBAArTrC4uY21qjJct +JiEvOlVHvdK5sXlw2bOrx2OK+0cQ1INwr6LppoMXUC7GKFNr+CscAVUEWF4cYza HzEs8ziWyrwU+VOaf90Zltlp9ciKbw7AyUBX3VvH6h9wH2Aj2MADsIjvMxPkzNh/ 6wlnGR9DqpH/jcOTSrmnS3hsqWdCyxdm/S+9yMYJkBzgzDFBkh7ze9nPL8lpiQpa wFDhfPJOww51nFfksQiVBlMjh7H0XmC7NW/KoZBGGsp4nNmIJVL+d5cehZ6GB4B/ GIhVz8OAqRtLNU6GU3e88dkLZM6sqoJrw6KOQxMR27EydgZP616gOMEYaJmE5Ghb dNx22rkCgYEA6lGKGvs5ljaGP58jtENG3hXk9DoQ9peVvYPb/BlRzEFIY9PdNamp GGpDg9STFq3DWL5/o9i8ltG31NZbiZSG2fBnpV6oUbUYTg0PqBMTzIwKSOt6PaAt mDzU2MpCR6HwLBi1uvM60CQDB+gQg73xPp9d4gvzLgrd8p4iaTYgEkkCgYEAyXsb G/sgl6OUUEo1UwMAuUoIUyVL09sqxDzMq+ru0dAeOuavOBCjX9Bk1UtKH6nwDxv9 vpkdgw6hsV/hBJmY6GIVhR0QBIPNIgo6Y0kzq/pVrC0n0E6ht8ZMf0y4kFbFPQb6 8eATpd5ORQKvk+D5Iwndy5kaKZUhLzi0UYK+oJ0CgYBTyA3ycct4a0x7KSKyDLAl Lnzr2mtAUJkI50HcFQ2LU/hXQWTCEETW5v/2/iYNoNnNPGgVJKTh5GCvqGmYetPw zyWwGnViqbbkCYWEmjWlGJmA0zmlGUXUPkP4s/EY/c0LZ1ZrXxazX3z58b8d6+d7 da4y6gTsfJQ5cNNq/SBgiQKBgQC462ooNkblplcbkeB7PghOB2q4lUSRP1hzH7Ji H4/ttevo94zeEjdAW04QjbeMdDZGR9SOOI3jmWxCFdO6mxbCQjOqJtBqtGVz1ptc QPVR5ML48cDW6TR4LWJMfCfxIhKJPnzXvmeKFw0TPbHUMem3hPiyQuTGQX9hjdPB 9BPt6QKBgG3t1vLgr3+pHCL5JCKyS62AP+n7Fr6RjLnpzFPHLW1MtJoaGeFlEaP6 zVuwxVY54BIcxMLa3lZiuQBxUiU6cM84wvs4A2bHjUmjFnL1FyUY7Y7Wr/gUUyB/ eBKjH7ED1RMIqzY9ewqF6RyxsLnahfSW5BDfM4XesQCYgOSs/f/3 -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 56 seed: YQ== zfec: required: 71 segmentSize: 131072 total: 255 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:CHK:2wvrzaobtfkoiqeqwryjzvxatq:7zzaycbdhepzaqtdrku2sbb572h64ggwstu47osa2gn4ol3eglmq:71:255:1024 format: kind: chk params: null sample: length: 1024 seed: YQ== zfec: required: 71 segmentSize: 131072 total: 255 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:SSK:zbygwa7b7p25n3zxovsemlz7s4:5eklpk5ywpbaqzmobixzkurqsccvzrsobgveqgvfa4mnslut2oca format: kind: ssk params: format: sdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEowIBAAKCAQEAnDp7uPJokCdVpnjWHvYheSiWbRyu7eLxgIyLBNB2MpvIeYGp 8niIIfv5LlsYmmw8pZmAwN8O6IV0AMZvT3XENWhCWRfayIOzTx14NDyR5hGoe+Q+ pKVESHfmNXsH8AqhR2qY2SCeYKOlNGRBUqj6DqAGDiI92UR8FWg9xZBCSN8EdqFQ RSKNM1AEoMa3OC2rJdz/BOlplbQrf0gFUBBDW1VzhDSWz4TkwhKtMkD256RG61q6 PxWMkY8/dI5jbsWoTGRjV9HFBZDx3RvQhBvVFZcS9TPjjHS4wQd+3+8s6SASdBu9 PtqRvUtbwWwQVBYBr1PlJWuO5eYnfznC3eDNIQIDAQABAoIBABZ8kHxRX92H29sn P2KbeLvwrJ4t57vT04D2ObhKrQihxZw/no+I68dAdmBGumbXt276hj68nG+5bbYC 2ditEb3CMPKT7Wi5FCEKE9go87MSzZZvhti59PcdUuRVvhG95fLak/+Eo2czhr7g w0o7iip6vARix1Yzdky6gVGjbvb/8Qm2W3TdVfxnhZq33SkSHQkN2VbMihtkqqBr UXAOf74Tz5xKTiRRHZCMP2NU8PfDH15CMyAnu2dTl471haepUYzq2nN5HnYxNTwl CJn7AI+R8assOiODwCIkwPWIylmk9BsWJfVwS7JvNBf2T0frXc8VKrI85BDzZuq4 06XGITECgYEAvvKJ6tO/2ggRvdmqoKBl312g9dH751nOEUbezEA/t+WTvEALfmnZ LsFjTOLvlLFRo0R/Wt/Pb0ZY1qT4iA9wsA314t4I+qu76+PoCv9DZ2VRXvt1504h /LsgJX616FhFtux/sEOvdIx6n3VT//5yshFhwo/4aAYGIj60y1kerw0CgYEA0XPu 352ZqPHUu8sq9q3yBWSqXxpKAA0XMieIPxOPviqtBHkqMj5W9bTdS/eYsjmvgSza Tb/dV/dalqkSwpAFA11HCDcI3ejV0252KO2HyNUIF2+bzyVU1W2FyFCz9yCLY/gC 7ruMWTqde7PVpg2KMBi/2ltGhwgODSxVyidFcWUCgYAw+XIoOaFGYbVzNSXPRvR6 AsCq6+2pG95/jebNClmNaCOpL+ACz1E17cHzUW1TfNtMfeAQRcElcCyO+QcJlrQ3 Y41CX+J7sJplWTIFyAzYsyLYsrQ93EtZUAFhvIsZibJvxV7GrcWNpg45YdVmnjN6 unyRc22p+ImQNPcYBMaa7QKBgQCMd+PLtDZJR2YUS70UkrOtSkW4Yjker6jOyhRl uQi90IEYbuoNqCFJx3JicDrHzEgXqaz+V55qUElAoUMjmNLD3tq0d7RKnsxIb9xu tl5KIhS7Iu6rja3HNRxzqywGoJza/ol48e6+KMFVJNYz9wCmIPMJzg0OoihKTWF7 obrAwQKBgFbXt9Cww42MReP5aOzcn5udgNQ94YtGuWpfn7xtxi5pD2BjKjH66OcN 2iUReVkhFsjKo1G7QVG4p/QhK51wxtFlZiuDU7AW6gzDSMGjeZQqB0HEtFoPdaPK c86Pmhd09zlzQYCIaA9tZ8vbUBZ0jHq0rBUPuPg+kXVP7D/px0aS -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 1024 seed: YQ== zfec: required: 71 segmentSize: 131072 total: 255 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:MDMF:oojuwcpqotfqvmyuhc245awdpe:vzxamqps66hyemt4zabbos7cufrc5ugi3ii54vsfqeqsmjc5ezjq format: kind: ssk params: format: mdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEowIBAAKCAQEAsO5RooSEJraTk5Cjb4wZd8vjOixmAQwBXC54S0di42ixXyhH xHCLhjK1TuCiC6GaHdYLXAc8YW7RstLhxdQ1NGhCljxesSedcXqvkcfKKyxWhe63 06gCAB8y3hWTEgeIl4ZBlTo+xx1phtpoC2ujGsIgnrwLcwJO+FSIRUiLjCfC5OI6 NACIlx9sKopj72B3N+8TCmqUji821/+jmoiqMT5hRsjmjal1KNqeOczC62ieHD+R 54vi7EkFjZGRHVpRgwVyEvRhy7y+RJW6ezfkUKW+JhLZo4UfAX9AMPqwYS0ZPYQz 8++Pefs9ULQF33UYcekyV5H+1Cdf1NUwEV1s5QIDAQABAoIBABtAOZacbnY//K7n wiR2IZ4P6ymUmQlkPflituhxUEvSXi9X1uXsp7C9sqs5cfv0ofYid5FvE9+139p5 HIkJzEAMJuVY2wTSIy/NQ6liakMICzOJtwqEf/pg08bc79ABFQqxhPxlAjJM12oL zaakp3SBneCU+fZ2zo71BiAVslidrUWUPs4pc722tln9ZHVchXp8RylDk577GoRh paYAIOAWdaPsSmLTtju44Enpb0wmHF9xt+rQyDvNgyRbsDXGg3laU1V7ySCfvKjL P3P1GpJmfT2hIMq1uBQWn3adRUvsxlfJATWLEJH8uIgGnRj2rqI8QSJGKdrU9Eck 3UzsNmkCgYEA6zqoHJdD527EHVtB2NHgQgGG6mLVv1pE/+w8SNUQMYc87ZQyLZVM 7PZkOrZGOlL6Ns/zQ3j0OP3ZhjsXoA30gFLVMIby0jmVmjSV+i/CwOb5IvvT6vV8 n01yQ6rGGbUPYqoMffS1SxkoJMZA5pIe887IVuwjwWcmCfXgKIhpLuMCgYEAwI3V kpHTJzCC+MPtiN1CqMkAyzvtk3ON3D2tSgqpppAnWVzRTMzJqVVu7qClzJM4xp5D sxqgt3Wrh5flNPD/BS7S9WqNFbkwFhmjh7UzijDJYBHJs0l8oo4XjFgHWIGOt7p9 hNUR1nwusFYgAlmQ/gN62ycS1V4Fd3J7YkjAN5cCgYEArldKE+604EnDRrLFSeq7 mJBDK2LXYzyHWVsAj0aC+wJt0PP+gLRgUFyJis5fnIi1dHyJot95uufCGe+gIftV 1OoPoijSvab6T1FcOxK8+HX3/srAlSsfE362Cpr+ujzsy0aXfZ8p4yAhFahun7V7 BV6kM3BPS9+kXMuEOOZpdtsCgYBplhzz91TDG53mDYIWV8Xyye3Og/kdrvKuP3/j pv8qX8fD/9qhc3ZtjXR8E8l26Y/rkeNrtgFFOJgrjUZZhoFA9VEm1BuzOs48gCil BN7TOzdhn79rubNHbAVLpwW4Kar54qBrk27pn0T22vNIdpbEQ2I+BCoWqCz1N+ii l2QX1wKBgD0xAfQZ9D+irtHqR4tCSCznbqmGqCzxA/g3PJeCDf2cNvdtO713i/51 SHdZ2RZI7WitU6ovMEyAXP22tFXa0GL9UHEYszooVegMx5/xR+/z0fgWQQ+IQuAD xXlJCybYPBqfhAsEqoDmQp/IrCllKC92U4oM45kDEfHxssZ5aXvH -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 1024 seed: YQ== zfec: required: 71 segmentSize: 131072 total: 255 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:CHK:7wi5xn2uwerkjzqs5ndn4eimti:llrws4uig2fpi3rtla6lk7m4t6zudjhszdfjgqtihghfrvs2robq:71:255:4096 format: kind: chk params: null sample: length: 4096 seed: Yw== zfec: required: 71 segmentSize: 131072 total: 255 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:SSK:lrrasvfrww4coivsbcorx4jtxq:phdrpge3wvi4fiabhbo73jcrponuzqz2js4ow5vd653okjkilp2q format: kind: ssk params: format: sdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEogIBAAKCAQEAlWJVkT5XFuo8cXgSi25HvT9nFeNuNilLT0u0dZq7uKrANJlG aNDF7WU4xWXHdiCod/qiz6o1kjyaBiSp0Epm2xJCJvBrNlEhRp2e5NsAMGZhuHeq 6Lm9QyRE9qp0NtO9KBWyfh90r29MeQ8jZlnSNHfGE7FHYrCTtBr+dvT2WoovzbI2 ZhM/63HVX+PMMIVYQWTqugBtznDzUNP0JM0rpkCn+sMbSwEIafRf48LG92bHsaD8 yIrDeNA4JRbG9DlPZLDEcvciIbs2kJxMuIKjyLq1C/LU6PMykcWPr+tjBaH4p0Op yUeB0OXugqACzMMx4ZgpKpHWYPT6gwgqjJS7WwIDAQABAoIBADn78efPUWGxKSYI K/aYJ1uDhUF/RpPaYoOUiKcPmSccjD++cRCVXQaBQFCK6anmEk6D6HeAA4xQXp7c 01do0dTeGPRhZNQwGKD+5KLDiSbATtUaCiqMkjTCGsy+LB7uAGHTTjXbguDn7ECf Sifb3JaGGUoiiWl6zdJemgBVpFxO6F8ueGWKywCHX6L/5VLMQQvAAQaxEMkLiEYL Rm+wcAWuK88XNETTglgevCmuZDb+0H9U+9Vbh0wR74+tb2XvBsxw9o0O88KlFaSN mrCrH40p+ShNumfE8QbHz8ch6CQTZpPLdC9x0Ziv1oUo6CGAOA1IGmz7rMyWjFu9 3xx/JH0CgYEAwoXBg2lz4YooGkw4yEdzXffJXaYxvj2BgJKUZyb/ZxRUHxHl0LYY WZvPQ5UyOZDqmqvkfMYJRiN4rW6J/uPsCp2NYdXQVRngt3THqKeHii71+xRuBObB qNNdfyT2HFuwbxELM/1a8r5QW2ebDU8eLlDbiYOEC6Fn7/foxM1uuHUCgYEAxJiQ wMs1eu/dG95kQCiYX0EjgYmhNtQPhbom4+9t6oIXl28CtCmVhYDfJsI3/qVYqXIl OqTOW6PLaA7uuojeQYmq9ehqldi5BHavs04DQ6nDncBIDqI4NlNG/EAWxQQghG1j PDWUn6wZMBdGyAKCX/fiz/8mDBPnlTm2NdiGqo8CgYBgI0cesZGKGIP1a1Js+ZM1 D++/jxHqme9VIhyiVo3H3i4tJOVWH4ktUGpBVo16EftA8k98s0uGFKXh4U3mYbMZ FAD6J3hNdvqu8NJ0ske0rbz4mII/feSckcoVuqjAHzi1y9Cjo0W9zv1cD3p8O2wJ LAE0l4E9VkpOOVIbYgSkuQKBgAZvO+WjgQeaDDGaUMusyHftqNzXhVhHDo8A7b7u Gjnfsif5sSv2ZHdvJV6eYrjJ7qH1I3TM6hgjv0eTnYqraiLY/6h2x+5JnpyfydZj ikXPq6BhJ7qa4p4ckak168jc/rd24RWaZ1fmiRiC2oU3V88OTPUj07n3eM/wiJ8w jA/PAoGAVPnLKxls+vDRSdc0aWW8Ql2yuiw/zU/GhncTfwfiKIrU4sF5GW//n6J5 w9aw95QBMmBRPS2dSyF0iqvK5Ny+EXUzVCAVi3s45o/XlFz/RxkrljibI+5eOBGS lpe0EC6J/AhY2/6/5213nDPBoJKrqD6kCq3cA0V4ylvIaXoSjAc= -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 4096 seed: Yw== zfec: required: 71 segmentSize: 131072 total: 255 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:MDMF:g4lr4g47o56kqvgsyikfj6ksoq:qs67p2rpyurqsuq4ivtebxyxyxi3i3qqxvvfzlhyk6nuumu3nzca format: kind: ssk params: format: mdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEowIBAAKCAQEAyocKsAG2eamQBV0FKLNCo04WL512g7j/7OeCwMofs5xmj+3C 2StysnM5fjeS+zGHd7VOanAx8XbtNwwh5Mz/h0c0wi8zKhro9900FyVQklsa/Vnm ckOt4km7N6BAaW/6qscFcf3xMM9rzbvEv2VvQsY2WTn0PbdgNHxBQKqmPyeHhfqi O68IItuJ9Atnb5nJYm09+FAjAAIOueg8jxCoqJCvPD/LEsKKu4mxS3MJeKG3iqag Rl5ZJhMbxJ0bBBxm4BfrmSatcbyxlAERlyvRt3w1Y23zXhJBnf/krfJNlwYliu8c e8sH8NbmGGEdJHw0kA+1Inm+DJTcO7EJ6Rm22QIDAQABAoIBAET5M7cYhjwt9roU 4W5oin/SVrT1pAidRy+38qxUyfIiCD3pQ+wxI1lJ66EkLR72UcP/j8qpFiE9lvEh 5SMme7nnEr01VO/4hTHw+E4Pq76EX488AEMW8I+5+5Qos/cUp2Jk8GJPkUZFxdml oo5qbbdoiNbfaWJhYtbNIG31EDQ+V027Zd2A67QzJy8Sl0WdhvkB8jQaX7DOe6X9 l2aFWxUH+zUx+T8/2tfH16OYPCLPsW23IbU66Wq55izeu183Q/OzfgS/tXX4RNpg i6zxihOVPpMf6+4WVBh560vW9O/2ehIbHwTsm1yVWcqYgWV6YDahBBkO8lrqKG8o vHOGg88CgYEA1yRioqjxZm+CmF3y7GtXJsFnyBem55OSX+CFaFCUDashKohMw+Pj KWN92PiSpIC/PliROd2sm6BSw/RBe0kTXWZAJ//TmHo81dCdh2B1KKSlsjZecOPJ N0r450/L3QtlzTyC4iGU4ThnjXLmqWK6yM1XFn/UyhhgGpZsr92jrjMCgYEA8P1e T1XTv65k5YR7CVfJHhhcFWkMJmRDFU8h1jW6A3VIYXXAlx7k3Ju0VlB7FbedmyWz hgEL0Q9Gt+7y60+3dMmmeFk9fkwto6g2WqMcrxr0cl5YqFtHjYp9Q1kFL37TuTax /0s99AKkXefp53tDy71ilQjFVsGc2LUq/CrR4sMCgYEApGTbMeviOiHvKrpvS5Ri De0vfkgEc2PiL30Cs8kOuLsRJszry6uxAwlROqAGfckbWWqX3h2zLV/+nllgR/J4 55+gWnAzoYmWPtOf67gbDilxq5G77ItCUAvr0eS5pHh3G7KnWF/MwaQ2DHHGK5yT mai+aSTY1mx10xsqhd/YmN0CgYBh+WAiOO6Be1Ehzp6Gyd3GEnk9axu5cAGl5CoJ gIZDaacnmEvYJIM+/T5v6QBhb+jvboBx9nLrZ56EoOy5pgsbu++l9gH+GtJjOrv+ VVoQBpFi/eBlcdbBQJB0lPh6usExB3+OHvTtAzX3x5VcusxxRGmT1aEFCGnP4Le9 FVuHKQKBgFZTLR0b+xCu82iNXFdgFnpkl2AhG2puinsGRhqhRaafb7Vt00OL74Ur TmyjiGA50sd4dzJuL0//CZsapGnt5yDqA2wtFJVF+YNw5ZJRVbFqihulfR7zz/0c QbIo/GGC++g5f6gU+PNlJ9yDlZrhKSWmaECyGDGVvqrrPCqNp3e6 -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 4096 seed: Yw== zfec: required: 71 segmentSize: 131072 total: 255 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:CHK:6nvs23bhrpiwiz5prqdtvztujy:wlg7g522rpdoitpm4qwhmctrjhnh4zfloiq6uq4tsvaoawg4slpq:71:255:131071 format: kind: chk params: null sample: length: 131071 seed: LCa0a2j/xo/5m0U8HTBBNBNCLXBkg7+g+YpeiGJm564= zfec: required: 71 segmentSize: 131072 total: 255 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:SSK:nana3u55ewg7j2t352qbjqokaa:s6i42emhgzsmtxxhro63znk4cklea4qmjn3hwyvha6ckxk4ptsbq format: kind: ssk params: format: sdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEpAIBAAKCAQEA4DV02okqw3N3Kia2PrPCZrjkTklwT8WV2zts6g5i/0UKNTTt 75IwvU7AJJhMsCvPSh9OrCeVE3aqgUyb+/oQ1EKD/U+hAEVc1SNP3YaNlAWmK0kw tWbTmBk0WTIm4yFwuVVAXEOWJF+437l5Nramp+LLrP2sS4hAOwm6IYbUZNiL2xal RO3S/vkpCFx0SmZr5M5LbkfO+YazS2abU9GmQLXY7ZajS8pzb0us2MrqCbsFfZud 04+zyDLuAJb+RCFx4zDJ28zK9haqdhdGZcHfNjIu6dn9StEIMwsUIPJEXxzU+3Sd Z13IGWWOOOxnLeXcTH9MBmnkCrc5k+D87aBYpQIDAQABAoIBACnzTpUqJ45w7wm7 uwsx2qyaKRuNyZ4fZ9ngfVEw1myRWCba668Q4R42myRtu6GV3N/vSirTcCT3ZdEy CpiDsjzo4iXWDZeNouA9Tm7yQ1DAGtaMB/lVDz+s7ZrH07dZNSx0K0noGnJdV6vC mg+27qlIedf5EdEDIhN5zzIBNoHUmfEK9PCzkvJWsjpmKYRl5KNWHhCkj303wyFo LhkwIjg0Hh2zjwu21BsMcCQ7AAwwqPCr6QR3cV1bhuznBu6TQHvFOEVY11fykUfE Wb4etekeb7/dEOkXf+9MX+O0TDMdZn1LG3MAqEh3bmQnwK5HdedpxVLY8B8uIb2w Rq8yk5ECgYEA6xZhNg4+FnydGZhNexvn9D4pKO7y7V9Ylsxi7655YOj0PMSPu8d5 Rlk3sH2io301PuevGhDQlToxMQJinSCBbS0m9Wh25puBDV9Hv6/isXCLSykM4R4n fr3iXLlBy+yaAX0FSmDYOLtdmRic+mLftg2vDPS8Y67QeFxxBsslANECgYEA9CdW 40AXX1DUuntO7UUdv5KruTl4ERuDfHA/bQHHSWQhHbDQJih3PSICJxvcOu/yAUj6 s7Jz7HLbwHBt6OKReJgWAsPTI9xya6ktWP2AhX70cg+LAJUEqjPNVrdmO80cOpCx 8CI32X0iKpyKosPO25tyV24dsgtCI/64HC8Ir5UCgYEAm1FO6qrgRHUSSk3AqxyV 1F2ZTg0I/OFoo0DoANjyIp/mdZucJwE0U9EwJO/smz1nB2eE3aDMXjtzMqETzCIk wJ/7RFo/bD7DNbWErtWi3X6w2PiHJNiKWYdU3dQb74IjgeY6r5hqYpkPzs1fMWJP 0XUa+WFccjOWUl8o4ccnbHECgYA/b8k/mnN4LIRZP5iuRXsJP46mvavYphFvz6pr JD8nboC8OiibAYCZvbBZgP+jwJxcgR3Ceudr1BwM0Cl+jP/HGZz+cur1Ml34YyiJ KrSt+uKAkFFJgW6I2pTLpzNE7nIbNWVGQ13HgJ/T+oB68e1ZWfZHiTDmBc7Vk2U6 zW2wUQKBgQCmt/ZrSCin2teQOg+rqcmicysXn3/DkYYwafeTJa52OJV7JQTWK5iZ vJlkfKHpqcmsLrcUJH2rRlcOgzrjXinaXGkFtvCaWtCuUUJgTw3n7SF3chOUjmRT SKL1Ay9Y5Dus8UREJ6V3JchkwgIZcUgmWgRuW6YHdjCscWNOiD9Pow== -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 131071 seed: LCa0a2j/xo/5m0U8HTBBNBNCLXBkg7+g+YpeiGJm564= zfec: required: 71 segmentSize: 131072 total: 255 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:MDMF:2qmpqupyhsx526l32tjy2ouhri:taqkf4pffhgu4iy2ddhv4ltt5opsx6s6l64exee4c4vmte55adzq format: kind: ssk params: format: mdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEogIBAAKCAQEAsCZMCCaVWiSLYmGr63IC88oMBAiDDohGPCEAS+pgXNHpb2bx 3rRv3jqtMgAlKF2bQ+zGXNRKC1Z+b0XyDxBxLtDmXVhqz7FTh4Wsv3NKnNA8Lu0n m5/49C9EXv6RrLxk1UQJ88R62yL3/apBgS2bBC82WlyxjyBjJhv03B/6AYzx8kI9 Z8Vqj0DnstVpaKi1vQnoJtpYvTxiqxzK9PS9wKV5hGHEIVqD05Z1RoPKPPHkxV+t 1UxbggtnME5+SfQ9NQ6w0t/+aNeDRWc7dGaLyJhR9FtASuaqjsORutFXOauAXxpZ 0Pk+OLZs8jmvjyklucoNIQ7fU7mNieIO7EI/CQIDAQABAoIBAAWQUBp5zYHZYaWr 3BhFs54rpZGDC1CsMTu49x9uubh00OC56a3VGSt1wv6vTn0l57+PfPx1oBkXlErD dM/Q/yIxavVL06PXwAGp0TkC1Tp6wUviJbweo2hjDc/KTqOcF7s8uOvAKvk884RJ mmQQIrNRInBcOSeCKF+NxoAlamoeEiPh3YU6T9PMHeJkyB8vtaB3td4+XQTX3Qg+ gaajC3cwU2fV6NEU+apem4dsn4TrbHMgJpSLWxof3XaJSdsAz8KVV/T7hG4UZbFK HfEriY5xSNuv4u64vikNPNi8hHaTaU0KAa52VB1wVzyUqjfSaotwCL8acjg4Lizn u6VO+7kCgYEA1uc4dVic62SSxJxoPoojZu0DrhEXgvadFAxN3X6udkYNdCs1Ox45 PuPUpTWjvLuaVbmEt6mU10tv83A+wxjWOcJXetHsWwPROv4G8xPO5vUgVyrxysLm UxCMP1FE2rHaQpXNYtB545V/SV9kV/+iVxmlGJ1VzWKKs40GXVwESJ8CgYEA0dXb 2XxB17O3FK23zSFS/yVwivjLFq2IEoCwhHJ6dLa5VrRH6NvUAX4xYIKiWQBcFsUK fOMBhAe9Y5+RCwwuPmP8ZnFPeaS8mjAmQIzuXxPr0pjXM+j//D+2eAL65oFm5ivU LB9qAtwMQ9yNwtjotal4yBl3HP2ALbPnQCbLz1cCgYAYjdKllbpYKuWaEUTX4HCr EemZudo13HeWEtHSvOayHM7stwMd/hYMWXuyZK6Qod7AbLH9SiL3dmcUKX8CS5Qu hUX5goK+43DEjMG+hETfnqJTU1TNFfe7BekAUwjK9Ac8FGGjKK7EkhA5Ee0lINAr o4J5jYCANwIiAbr4b8sNgQKBgAmVO7od+5/PPFA8csVyfSjb29zs6dF6UVmO+QDD faYw5hv4lcQjrfX3fmfK74EjDBGaJBV6BIq0E8kl82jOwJnm1RMUn62NgXOFOWn/ Ra+f6Egw5LshK/eoLTwj3rOCO2HNpJ3zPVMuG31J6Et6vn31ZGe3CgKP7TepHKmI XAx3AoGARDvoRZiLJEmUDQOw1mI8NtrvD2oKuO6YkT6pt1eZhqPN6SMDUK+ckx+C nG+2loG32i9F8IcBCFWqeEXO/C6jvb1df3tcsxtaJFI9L4c/Nhxx15MQAVhgpwaH zN8TUe04nOhf0OZZebwoDU/ZrLY7OjiW/vzDmeqXOjINS5kbxmo= -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 131071 seed: LCa0a2j/xo/5m0U8HTBBNBNCLXBkg7+g+YpeiGJm564= zfec: required: 71 segmentSize: 131072 total: 255 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:CHK:62dtbeowncjj62jnwbggaufvbu:6r2sapg2cm6dvmylodyxabrj63a736uouzsqyacnimgo4svnktva:71:255:131073 format: kind: chk params: null sample: length: 131073 seed: /N4rLtula/QIYB+3If6bXDONEO5CnqBPrlURto+/j7k= zfec: required: 71 segmentSize: 131072 total: 255 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:SSK:xann47c7l34cwixkcegyjwbrzu:ohhz4cila5kkv5ruixjzrltgx2e5q45bx2bfjpjqbyi6mt2zfkha format: kind: ssk params: format: sdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEogIBAAKCAQEAvSRDhsaKiEoAW8+6Xt0bjgxl/FPZxd8iSWdGcibGedmJsvK9 hpAFDrqSFSXJ4pLjabqvhnJlxhc1xjlqbB/JuP+9eqYxm2FSzBZr5F+mezE24zQ9 tqjCHxbm4JQmOgTeotoum4WXS+mNp0YQSm2NDil71mmN288mZMGt73GN7sYz3nti ubABLMONRwZqN3tffAgzcTaSKON0k3pimr7CughlTGSSbnQ/Hd+G8jxiSTuW1/8i p0gt8NvDw0WaOxbyG2JsAxqG5HN0ScbUlCMEwiobSxc+ajAy3oyMJGbsPMqTPT3p 0ZKiTJKpA5M0uMhF2rQd4F56FKo6ZjkYSFDTGQIDAQABAoIBAAI8Xc368woSy3ew xQe4PmPXkq10AkscAAhB5ivIRVvAYUFrxcDDwanyztipv/3jjzhubzYijEBv55m5 wp/KxzQrVb6PtpHOVmlSMVSB3iWiDcxYXD7LnOVulfkWpAlwieWYuNwexqYsX+Yx ZlTmGZ8OvEeEq06E+UCDDf7Ns7tPhRP9SMdt7QpgfyL8rWsqvh6i+hNaNFXj81Yi xp7S4qgLJ2XgUhyAuQuO+tJxlWVkMhWAcQgKiEWDZGMTDHhm8+v0elc4iup7J5SR 0a9KKlwe8iBwihRyvWTXgf9puiVoGmjQPT2OwXqcrYwMBTIsf281am37Yo7as1QZ 17GvKTkCgYEA5QHevOUc8DqG93qXgcT9k9x6JWnRC6YmVWhaERDRkPANNCiTRcBW DAVgL6O7lZav9qq0CRXmF8X6Ukdu1MYiYuxxxGgSoKY6WqIcaKGqwTyVUlSe6HJk +9mSbnCbE+fdBMYhg6CcIeOvoTeXpFerIeXxqI/N6d3FUbA9MdNhlaUCgYEA0295 /nPiDcXgv5sgF/wBL9sR93MQX1IqyhT7Y3t747odvkxkBg4aQFpZFM/wUEIQFBEY 9adWSQq07nifLL0mrkazF9wrDBJhyppqMnqZStzAjdEj7qxYb3/e+RTf2c6/r0YF 4LC3X5tLd2G4d+UZCkcoSltdmDs8ncOkIlvNVWUCgYAVB2Dus5M+tAEkxIsZDX/D jiFhQiBCE5W9jgGHQ6YayxBLU9aCNzEvlWbJuR1GlTm/StmRZANm93UPDSQuQaty rgecY3oiamE9ZVl6ei315JxJnR+idK61ObtqjMiQwV/YSmFVdvAfZIsCINq56pr6 V+Ui92GPMiAmaiqUYra5SQKBgCjvHB83MDyaYri1v7DlCRXKw9+0VycdMUuOZF0O Ox4LmlaNU5AYityKoVR2LYBcSeCYrsxgaUQa3oyMrcRrmmGDLokgBvV/WY9v9b9w HN1xf5X1N4+trjFoADMY532zmUjFtb2aeOX5mtKyCJSttftXa2V56tTeIw4oIk7E lyxBAoGAN90N6kHoe6KuTq19gc1MGSy1yctOk9LskE9U/9kZoNBK94XukC9jTFZa g5VhtchWKaQmlKIhRE/m9LIIoRp96EBmDMV+VPqNu+xqGGHCLX7hofEfYDst1YLH y8TOOjHiY8XpfqOnftV4ZCRKrwR4BCIIsXPKJ++p1U/mK7sw6XM= -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 131073 seed: /N4rLtula/QIYB+3If6bXDONEO5CnqBPrlURto+/j7k= zfec: required: 71 segmentSize: 131072 total: 255 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:MDMF:opuv6wbozu5pmpb2qfidrw5wfu:x77l2jh4dvrui6plhqzcip5z2f37bdwj3wivq74w3khga36muzoq format: kind: ssk params: format: mdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEowIBAAKCAQEAtMN6z3XTb4STPwK01804o0QpJgxm5B1pxh5LPc8DEZIN/iHn V2wNfPlqoUGNT6/p63eUrJEQYzCCt9A6YlzjkRrgaYC5sqmSevkZ8bwX1QsUh4ts 1eVyb1idHEpvglwvZkxlq0JOXcartlePm4pGlj7/DxkQs44NGOivmg7AqzKILz79 VX9HjFTUYliglfcPsDUyulNm47qRSEfBKiaHHdszul1hLhfupVg6oNF3Q+s1ct/U 5avxFEfd/1DLz3HRyIkI6I5f2bshoS+cQ2nM7I4/y2HWEb4ed0yEztbBCauVKNdP 83D7GSJAfWvQoAjTJeAqqZ6N7MNMAkfce3gTkwIDAQABAoIBAC4/IOl47KpIUd+6 EohvocDrjFeGrsBH4irkzz01/EP/iQLuq6BLLbw+l5BAFCZCDGfIxUnNJ1MpMxhR 9s35k+Mo7Ccx3tCd37MEjiWxiKth1VPEUQj8VeW01yVIyfShHyNeAljpcuE9Fetl xYD2xI5l+Z1kPUii3Cj2Rw70HUjvC3rEAfbdnr/larJKPajWHKE0uCoSX3GqX7nt QtmfG4KrGMz+xXvigLH41zNHevjnvnqb+HBVgOAdHo5Evh2VBBb+1I9l/8tdjEYS Vx+NAal+kpHXZETFmflAyLRk2whcCyQtuhye+ITJhMAtBxvMkm94RC6izoRRKPDy XxOxRWECgYEAy9CWVhzXrr6R6hQKXRBuUa8mV0ogWj91kWYF0rSMbm/SORBBIUrS mX4Cwj2YqqdSKmNw8mYvBL67lV/sQ1h67Mv+g0Me2PojqRUnh2TjtcxyRhhUGkRK x6DZUrh0WgybPsK5zMeT4G+GtZvzeBvAO/D8FOVE6H0zytax3OwUTKMCgYEA4wv1 m8WtVAsurwJVKmtxyYLsTMLeLnW8NO+STlwqfBQXzBxWYhQJjJmfYWRbSTlUPUWJ UCYUNiF0Dusp5dL3yPgpDrBnpUQ/uw+Cza0B5V/Vrh23kvkicABw4CQSQOwCGZkV CL8bnAxI33SQ9B3PHTqunxrb/NAqq5FvP11OHFECgYBTwmwWBZJpwO2MSiIcLuV3 ckiKdO8ox42UbF4WQpa3yAKX6uMpQGueItgVZWT5NPwiaW2AYJgQFiZW8+3Pm2wh JpB49zuVJe9DzGrLTJ38F4Ia5mKhzNECi0rkoONIIogmWbYrvxU5lfvBZM7A3H66 44VlPPd9p/6B7It55BdPiwKBgQCZj0wld/Q75HhFi5lYYGUMOo1heWbWG3EYiHP2 paViWCCkPwI5wX2X54sBTuPiyXBtJGuzlp2S4ttg/7JNq3tFJHp4Yd0nzNohxWLd gsbGgSO/aH/xWqjtAY9WOW9TE4x0DbJJQSAGUdSztV4YjVS4WykhmQPyoERL18hb HdsnkQKBgE9xTWmSxMInj1Iislc4Ru88KyGMVgpU1/IEQVG4zmu3js/KgHteBjPq EZV38+EW/RMblqDu6883JAEURnsroLU+KWURE5EilBr5psE+3WHkHqUVDiG5ghtb TRXTovFp+oZ2mz2O2UL2oA/iXJES/fa+B71ZaGdqMt5iR4a6/yW2 -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 131073 seed: /N4rLtula/QIYB+3If6bXDONEO5CnqBPrlURto+/j7k= zfec: required: 71 segmentSize: 131072 total: 255 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:CHK:hk3a5ync7y3dnwowqjqoa34eae:y4f5b2xqa3lslh2vti5fdbho2syqhbj2p6f3enlhxsjbfpt7zuta:71:255:2097151 format: kind: chk params: null sample: length: 2097151 seed: uqWglk0zIPvAxqkiFARTyFE+okq4/QV3A0gEqWckgJY= zfec: required: 71 segmentSize: 131072 total: 255 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:SSK:7hnqxsumt4npusjqlbhmmo35qu:e7b4rmbxwbgrhguhjfbcc2pywgducrmjdzcs6opoa4z2dywalc4a format: kind: ssk params: format: sdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEowIBAAKCAQEA7VJ3LNata8TXtyH/4NhdD9I9oIEV3zGvsKLFSzIjUWqOoM3X XZdWCl8q0ZC9NTZdDUNDQmoHCLNGVQjbeXkMI9G4JMGdVxafHivxCpa4A1HAcPXw 1Z8L09YIXQmgPCxOiCgvdCcaVNxr97PiZgndAiGr2XQ5XU52YWm1M2F7bPv/67FB kaPKlS4lAOVTQ76WKS6x4z1B6HpVQh9jQyvYYlfmmY1zvzIJvBMmyfMxLQpgBSe3 xKfSHW4Ow1Kjl9WxhfBw71hEr4ZxX6GMLpFhW5aPtmUCZwl7FBLQosC4IJJYgRol FSc5RsdvRnfPYbJ97+qnmpxlRRICBm9MCXiZhwIDAQABAoIBADsA88xcG4XdzNwl Qd7/LDQQy22qamuxiMLb1T2a25kUax2jz9XfGG8/tf+ggspGF+CCRqiuf80z9VqS 9y4+YDxPmf7ZfGr4ntr7hdRiIKICo1vyacxS3LfwUOgAyqvrQCMuCo5QYoWSv+03 9iP0c9Rh1r4b3V9LcLdLdtetduhjTGNfPE0w3GazezdAmWmVeDe7L8Fts6XrDdYM 3x6Z/0rk1daMO/1j2LFfu0bbtWY62m7++ZlFbG2vNnFmZqUF2xxpCEbjIm3PwshA DRKISHqirfQ3yCMjNgn4kHUOFAIMK52IDmplb1I2gknoUKdLlXXuy1PuEty8nOu2 QuPwlNECgYEA796GhEQhbkiP6UYCjaRkbpJ7/YEvwYo5RCdSiNvfLes47OtwLkK/ lsfqvqPclG3O4AZDDTW7QCqiu1cW7tx0W9fEz1t+yHJclrIYG3GcmN1TtXVreKfo MPgxNP/XzmXMevD5qPzETf0yyIQTyHaRA2Yo2/kev2jRl0gqu5kZ0LMCgYEA/UgX Gv+QGUAVEy7us714+7w53IABy5mRq29pAVAfm4aMKEogX7LPjYytMEUmWdlnSuzd rltH24XThnGHx4gfQDbrXHL1ln5MsUtL9PyY2yzbvubr9TqZhnwOcPhvChfH8dXT 0H1BXTs5JBDwEZu43GVNgxCMNy1ck7aVBy6MVd0CgYAzK9isjNBI86fnzuyqhOB8 CjnzScUDV9aBqJXd5nIFHMInIM7sv4aZxwpYIyLic06H0i4pukW5GZ9fseONj3Av S6eLyOwSHPuNlm64JBORNN4vvt3vfnp1P+1XbiD+wg7OR2wrVckXDiXwSuThhhHH lNqwmsOpd9YGnPmoza+JKwKBgHHthm8fe4rQF2q8lqSE2rGpNgGoFqalWi/Z+kqb 5svHVq4cwbkqLlAGcjSfNiP+NYcvSnvOFWF6Le5wjNnEsgHpci7wiuV5xDePnggB wyP7ZpDVQFfbVwl2LezE4vWQQuDWBOPoI4mzRP2jHMle2WVRr+7/d4KuRdEvtJM5 beiFAoGBAJZxp7CjdpRZv8nv9j30zj2b7C2Kn/fBusD9YeI3u1P27VkZ08SaP5te YrH+YndbGm0I0CYVhozsIVQK31p7iSx3jEBZTmNbXsdEduTFb2n/0pXQ8Ods8/iy JoBOx9z+6v/JMYxoKDbPEohDL9lT6/TbWAUr/k2gv+90rdFwVVQs -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 2097151 seed: uqWglk0zIPvAxqkiFARTyFE+okq4/QV3A0gEqWckgJY= zfec: required: 71 segmentSize: 131072 total: 255 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:MDMF:5udu4xzy7ffr7mbrwuxxzus5fe:h2hcxihkyq2fpv2maprwi32kerubawnj5yrpjcomxj5e7n7p4yaq format: kind: ssk params: format: mdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEowIBAAKCAQEApwF50oSlhVzG+vcDRCc1vYwk8pdlqMMgnqSHn4sj7zqNrXZr TJGYDm5NhltudMc+AXFOjStmNO6TtSsU/ttQtG7SJLI+fu6rHf5nr3c2AW1NgLKQ py47GId5AHbdyLJdVqUqQuZeHTHN+OqgHxEJ5AjYB8avGvAtOjGG/1m95d/VHe38 nB7rCIZtNomiO7eFN57T/RwBFCMazD7vhTYyq+bRb1jg/STG+b6kLl3jC67r3/Fp RqXBVO9WpHjwbGgSdAGR6eNt6jm2eXXrv1z4IFR4DdzgnaCtWZOIKmfdk9jLG6ZI xqwznoBq3ahG020HxQMUWHRljEVI70HgZBMSBQIDAQABAoIBABwK2uZXAKYinITS illcziDMUf3sHxVV4nnQ/bbz+a43YkfQvRan0eUGb30SiDsSo55BZOO+eFSGBQZk PAvJTsVlYGLqDSVqNRB9wfJMLaTSsjNciH6R/DlTsiU6UGZdUN/2LuD55q63SLM1 zno49bS1KXUwzwFSd/2wCE+DRag+BlETP4hCR8Rl8puZq/2KAT+e+wLLNyNZA+OK C9L+PgUv3Ac7e7Ll9hvhx0o3e6WKhus3BMbWyQN3fDUt31leVoROXnkvF1lORNtH aaCZxdSFvZDJqTtEZE+XMQND3Ea1aneiWVLD95Q/TwIlQfd6TpvuONxZ4npoyk8V 5CNHU+cCgYEAvlTtbjqSHCV0/3OjvXHNjnH0tTjPXzwJO/BdKiE+BVnVyznVb4UB ygcxDyblCy1zEBOxSszpKOc8kHUh2IxMVe7hLVIxjNjxZgKso0GhOJzF/Qb/tkKN H7EdXU1d8lpWewztR1p/ZZrZ+dI/+m0kKlX4cD2L7HcD2WXbNJ3hj2MCgYEA4KBH Rhoi5tsGkI/dGR1KFLKDehd+tTWUqO77hs/CCxEwLilVu43Bj8PmvAG+MazXjG4G N5RxAUsPN9VhJ2jeUAReCLCkpEKHJ/YuVEAZjwDTypAlgIBIc7tLCPhOYarsohUZ 3+LPhdvBoV0CMY9ZdhTXpyaghhqEuygcEoMgWXcCgYBSmdT5E545bOAbxPn4y5zk BvymcWM993YidyxXjlm2RMiODCle3qBqJzjZVI3ujejzvzggOFGwGLqmDs+DhU/T s3oyCwvKDpSlKt/1chQf15ntN85eMP/CE0GlLmBpP19sw61uXA4R8GRNETwG2Lrr TKgnPe6tzvDytkutyB8N9QKBgQCai2u+PYk50APKPlDeUJqBdviibbvNrRmkyRfg /twAhUji2amUqsk7worjW0eiIcsDYUeBwe2l+CB2R6baWHpsDzUrQW1lXihjRCtH 5/otu2H8AgTrTleK2JediklTRSgds+rjcMdaz4F/JeC2fGwOo/Rjml3jJiegJM57 piABrQKBgHUOJRCcLh4UEvI4qsKalh28maFzjLqv0Neh5ENTU2W5Mv1YSISjqRYK V/hKLURUqDl+1/8reScAjDNXIr0gXf9UGkdcyFWnPjkCEuu/hz30fPegYcnfjLLk BBH7ePyDTDomBJK8NN2NybS8jIx8X24OQI1aPRCK0yLFJcmj9Lwa -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 2097151 seed: uqWglk0zIPvAxqkiFARTyFE+okq4/QV3A0gEqWckgJY= zfec: required: 71 segmentSize: 131072 total: 255 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:CHK:qa4hwhwtd3cpvut74biixlv6ye:uuj4ujg3mcf3lqpuvvwf5pszcvviyb4cm2jjicu7ugiypojjie4q:71:255:2097153 format: kind: chk params: null sample: length: 2097153 seed: BTBX/ampNfLU+ox7xipBGiaSbgC0kcB8Gy7BkJB4oKI= zfec: required: 71 segmentSize: 131072 total: 255 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:SSK:v6mjfmtvbo4eibevptf5g4neuq:pqohdp2lrbontgxcna5cg3ni6bgzljvxkho65xjkvbsrgxozjm4q format: kind: ssk params: format: sdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEogIBAAKCAQEAzHCyGaqHP6kzJgCNhvnbkwEgugxEUDACv6Pmt8k6W/EZNti1 WnlINd1d1AvgHV1OSW5q7xxoasO0Bjy7UoegZPdYEvk/neqT3ybW+4SncCqCjo78 ighVTq+MBx9BVLnQQ7NT+6clI891veBldr5h43bZlhKfrf1Hfz38+51sIYd1aS9t xIVXnp7O2aOKEHgMMLjY/bqNBmR7idiXvB+lM9U+Lm6lqgEaAq/Owb2CV+a2b63Z ddnDUDksw21/SPgJzH4CP0N2UH4KZvMGqRqSbE2fNSGSeXXWowRop594EMxT+sJI FLUuincZzdQeM1fhSQLSBIYAVsRvAFum5fygYQIDAQABAoIBAAY/XGX15N+waUB8 TFbnXEssAebFmLHVocPe+546r0afgcyAB1y+L5N70hH4ke77yrhqQCjR9qvqkp4N LZFKVT+4ok+kH8pQ4Jd1baTuixpdpjM6keOa+RZoPXB7R0kSS5fCC2s5kqQ7Qwcc LCWakE299EzGgWw0/QIZsBk1WJhWqBncC4rNL+5iviWWbWh+33je4syQR31R845H nyeBNpsYDK06j0RljUHqJ5sbe9o/YE7lKj0Vr/T8sTw2Q43riJAF69aDFjr9C+FS C0vaua+r181Bel8WOI2g2lSOvOfMydUoYNwBYaZp8tYH7eJQXmp6FB29VCmvVdNc JjPuMAECgYEA0Y3w5z851Te3PxRWVrGBEpuHBtTawzz19K9VVEAWwuJr2YBMeGrF t+8OUHzN69A8l9VlqWyx9HgMYYz52ULO2UFh8A+1CRYxfBck+TY4Tl5HEpiVuNwD KJ8WZ9L2AjpE6i0TkYO2uGTKaPn3p2wsfh1FCiFBcSosNi6yz/DlWmECgYEA+cCS /3ZHOr9F0+w27YZc0rFuEYDGeOSypBiES2znii0GnU2ACD9Z2XgEG2VLwSE3U4To JH/p511I6y5VxHyULkRsxyEnXozevV46DJEkurBbz70S4U7jXvGAyGA0kdvPEuPN EbhNFePEwO+hCVx/8hkclSIBLSnIYN3vEjGZBgECgYBZzhCtqaTpQWVgvSB7Krr7 9HcbcGEIRrnJUNKqtoSKpGo/3gHnoSp2txZVXAcLxkQRdbyJrTFeaYw0yivQ9hab eK+2J6UX7dDrMyf/PUNIIpMm3wlbHb6ky/jYKcqQDdS23vaB6AaIY3lzH50IvQ0c RwLtYm8fRkmINt8eykggQQKBgDZ/xFQEnmR+aqFlEVNhl43OdANTw3uMBEN0qiG/ YQMw8hmPWNnz4QpoexTzVMWPFwCdpv6X/xWisI/Ja6PVv4wdGFOXs3yZZt2R2z70 yTwH0fESBDWwPkNwlbaj77TIb3ZiyVQNkJyvODcV02E0kyLkQe11HyaY0IX6x/mD Yy4BAoGAX7Kv8GIypDDzgEGW27QA8RRVD7Cb5mDFxt3359RtZCXqH6MhkvOcU4d+ XhCWb9sqD4csSwSoR1/eT7g0uBoOPuGMHyZSoAcSGcoDm827Zjphh36DEBmwCbCY L0Hh+TszyB3ds+x1+1NbAmjvPxYiFn0K5tIsbkdHfdmmhuS6sd8= -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 2097153 seed: BTBX/ampNfLU+ox7xipBGiaSbgC0kcB8Gy7BkJB4oKI= zfec: required: 71 segmentSize: 131072 total: 255 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:MDMF:a245vdxigmfg5vqawu5hddp6mu:su5w2wqnksv23tg26ffsuosxhtp46zxxyx3speyruvj6c5uqaswa format: kind: ssk params: format: mdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEowIBAAKCAQEAmtdn1hwB/oRemDFMiEzAVbcHkLSTomQDzvDP90KoQ96bmprv 9q+PCTkKlGcxuiZDnMBA4dNzcJG+CPPgxR0IBGLQ07ncNjtU0zTfVzOz46p7cbaT jZcHK088eMKAySWaj55DgirTXIMqsmolE58mIn60OOfLkhAAslOej61MagYYj4CK 0ID5Ij/ZX+F6d0DECTSo6iA+B/a9tt5EV+9vNS3axFdrEQb8aiWynU5GsPUGEyVg l42n3LUpBvA0kg/macI2BY+Q3xjy2OdKo0BC3SCcmiIj4/kXXwStwyghlo6gm8DS IjWeiSVZb/yaI1Jb/BtBaTSqC90OLqI0xCjqPQIDAQABAoIBAErmCTc7YwePVgZ+ Skvf/GU53LH1dzhk8qamO6KaHrR9uH0Hly2XbDQE4IY6iIZHvgrTwE68Lqn0BZ1l AoO2cEtW1TalP80H1Bc6CxKuUsS8kWvG6gbiWDht4o1zYEJsKyBvaK5NMuIcHIoi 5/5ezF6BNYIVNZZYoU2hPyC2rjDWMxqJaP5gwCszWlC9sAZJXYVrh9DusFz3S2A6 H3gYZLvowbuiOS3A71LXcTtqmBRIRDV9xvpVt4l6lfF53Oqq08+g3+0btVRV9CB+ QUaFBPDx+7fG+uoChfN8xz7WNY+aC7rA9YwABac3sLAxv4bnkQHWfz1m4t2Wm1EU Lcmn0O0CgYEA0uKcNd0kgeU7ks2WRq1TiobAxS3t+x4/iTE4BMcNk5hrUYlHRN+h maQjn8gTHiMpPdF9aESD8z3yPGWKlU7o6hC0GZL1PLfqFA4ZSOkmEGkDDul2eN51 Duzs0lc4tv+PrV+9rEQv4rDsa+lMSohsRsj6JK29bHpAHi6UL6O7XY8CgYEAu/d9 I68Z99L/d3ORDo+mqbIOnQfiLLlxfr1G9dwIltCqO1kOQMLqMbNjU1+EPs+TbYGn e1C/N/UxKLH4J/m5Z/EJqDWvP6pPXsjY7TeVnl29LPbZRcu5aX0DbanD6zN4Fl1A 3b7nL+czFw0Rn1hMuwrFsHtMFUbKRhyAJslYbXMCgYEAuMnvXdeYzOXkjN/vRaFN qf4oXt+/QCOiQwJI9w7BW8rch0cGl1hqj2nf+XvlHKxs0AmInVwkT3nBkKDdjbXm rGvUlPBMSldSGx67k0MRoqGSF3gF4yXzZw+++RWK0fggmyhg2NmrKDYmBO0ad9kR H/muD4Paj3qUQp5IJXKQlQsCgYBxC9SIPIw6nvyj465O+pg6oOrnCFG/ojwfBEkE HrRPt+lZziKjUla1U3UeNGj9uauqBXsr0BFg3ycUmYxsxmT6nV24e6kNeilIETVd 3bsvRqM6wq9DqdW2GsiQELTS5N6JXMZhVqoGBl+UsnhxxBJJv53LmSvV9AA9EHEG Yru6/wKBgCU6ccUsqtnitFwdtI12V9v+zhr1oHYahPf5c3jzjr4bhDWFFXZm4pag Vi3DpaMGaMBqfFpst7MMrWOVwLPGhEKiBVtHiblKlioxSgQf2rzGWJr1c1o0EJJf V+/ON/eItUzjfe0PmWDrTQr5MxZDHc8ZQloCo4FVHSXhrHIEPscL -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 2097153 seed: BTBX/ampNfLU+ox7xipBGiaSbgC0kcB8Gy7BkJB4oKI= zfec: required: 71 segmentSize: 131072 total: 255 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:CHK:sxbeisgiikrymzuqqb7njizgjq:2ljbf6mnq5mjc6kljctyknsznnyqle62unninepj7imdazch7o7q:71:255:4194304 format: kind: chk params: null sample: length: 4194304 seed: 2NVWOXvtwcAFf0J/g0fDC+ypQbnCU7awZoCxcnhpkVM= zfec: required: 71 segmentSize: 131072 total: 255 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:SSK:ftvoiodksdpqeepc7uozm4osom:tbrsewof5gvjlf6lk6fsgssjs23rxgzdko575t7saubvyhn7rmbq format: kind: ssk params: format: sdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEogIBAAKCAQEAmv4eV/sJqOOb9XQF6bLeYQDp9y3wcNXMJr6t5VzzYNZr4mr3 mA5y18fH30HcmLIHVDGr4nqZ0jT8pBQOPjJ64pCLGQVTddkflwHAqVfaa82XBM41 2EVvmzzNnlOadGjL2QAwFo8CFrIhgN/SfnBJHZpJcJFOIcYzlrgBgSmrRerDT35i 2UN4odKlK0EUXQ6XKUR9eswoKUjic5nuowjgnSvoKC3qzBZqiVks+d8DFHSVpt3n UzAhwp/LgSC3RP21kALqr/fWZ+6iZtR6rsTX6ZeUFVoD4JaM5SdTsmgEASjT6vVk vA32+CVjPmCThnHX5LAKRaUGuA2Z21+qCYWVxwIDAQABAoH/NBEqIGQ02oeb3Nkd I2TzT6L+9gp4u28XJezofiS7ncxqcaV9h5dS/Sof+uAlOyaTT7VgCLUm93bVaElU f5B1t6bXE5C2eOB3vELadgkNVym5keO0MvMgiwXiDU4IlRKfaEan4Owpx3YPyztl exQ9e7RY93fYx3/N1NP7rWhSISo2KN8yimvmYrRe0SpVL+kFCpaA55sZdcGwcxSr dFISGFZ1dFf5zkEvNIF+HOFE16AkM07PICFaaL4XfdFsfNE7J/T3HwVrIlsaukMG u2Rky79ZUDhxaZZUMutvIuCun6apcq2sjQhPIacH/zzWrbdThARSTRFpeDVNdfAR uSjBAoGBALdxT2ixGDF6wXVRZ1sufAmbBIeyYkKI+WJqD44dykDGBlTcZk8YYKB8 VC3poAIeN62YhflJHvQr32UvLu34mRVS1AePkBZ2Za6VWqSgK0SGRjQv5oIvBgQ0 R18EedGLZnjsgFTSx0eHyQEmYuKJRgfFXI3q/WNVhbRrBAH52En3AoGBANhMEx+g X4wxLLRAcRdjbjdfyj68LlmF8sbdkvPDyPfnyp/SUjwAa2ZzJqPraoTD8olReKVO tnNsytDJSF050KBRdfe7PHgKBUPxIUQ3B1dLFWilPqaB2L7osWuVMDnl4j/om2WN 3up8Ydo09UAfPai8EntoYQzyi40fRT3oTJ6xAoGAbossxDT8FE0aKZ1tgEgJ3Sv6 Vd+MUPYD+mdZilWvXMs4Y4kRahaRnARwId7IWp5lBQqFqYyDx7Zsf6goSqVlcrEg LpI3zSF58vPz1ILkr/2ObsJy0P6PTJdIbxzeYAT2MmaqivMdvaA446WDL2pzthkb xjXWjjaqROe8WYh6608CgYEAgaziTjS88/TLY2m7I5WGD4bLXt89PojS660NnD2F 8DK0RSs4CCcMPMjOornSC7TaZL9GgHz3X64azh/O1a2CyYrtGc/USfdf/sLC+f2v 1gL629kt/W+dfZ9ONzyjRCLxiPUwrSroOVbG56aWXpIcSlwvDHOgs716MupLffkW bpECgYEAtfMQhjXgJrGfS513PistBD01BCoYuurvk0Ja6Ssy0wBNk58/FV7/3KVE CcB3JwtQiAfLvTknbvtc5L+a56FZ62bJrgsfvv2X/Y/fPLicvWXuEFoKAxD5O2FV cXl8ozjDXcUvEjwAZaLH2ui3QrHn3jX2iYl7ohKCYiyXqDbj6po= -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 4194304 seed: 2NVWOXvtwcAFf0J/g0fDC+ypQbnCU7awZoCxcnhpkVM= zfec: required: 71 segmentSize: 131072 total: 255 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:MDMF:xa3yy5ihgbskknv4nvxaxpt37a:qrgqgnkwo6qzhzge7as3e7ywnt2ddxex6oktg42ljxnom2n5ymha format: kind: ssk params: format: mdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEogIBAAKCAQEApd+7rVZ6XKYuR1tFUM70FiekjZ9EQRcinHLUYZsTriAQDlgg W+dW3OJQG9w1HHBwuiKKk2ACOCms9LtuuU0wNnclZAFX4xDk6Ap27lVTneil1mne arxqyVeZBVDT0A4q9fuRMDGrV1kj2F3RtBgiXiZd3DTT4BqHRFOrKfXjDOD7sDnm CFZyWiDTAAipvr2tj1dhJU7lNnUjktg6koW+JxP2c0MbQqJeIy/99G5crdTkga21 jJVpgRJyH+cubw9SjqRRKCKlEKkZlF8b5X/y6Htn95gfHRURoltK+XogdXeABIso JZ2L6OHVnIrUew/7nnMl94wTJPPFoQiJGT+7HQIDAQABAoIBACemtzwtJTRzFDKC DqyNwDrwkI07Mot1vpQT/hF0Cu0PpI7tQZT+lNzZ66jxR6/r8AKKwcIPjBBFZB8f lA0PNtR6QFGq4Ym5zuJqJ/p6orGnfMcnyR+OOV+2hTGIXA3K6TmigJc69Fi9ygwN h1TMBSEo/jxm03Qpm0a50nuGGBfKaLk/4Qu946LJoMqZMnyBWMc/6wYKVgiRhsOA 2pf6ji8xgCCwhv8pwjJSKIwl4exMpnTQg7h5wtY/oGzYRM0DhF/8rB1XHzeogbjL dbdM3Yc8v3VwxJuUdFf96cfKHhID+aDoAEB3sfRpkLqBAzXDdlDyMErVYw9THMUK J2le4zkCgYEA15GXSPxWeNXfYCNq7f0L6o3LSoDMzdo+yuAHwGoj+waxidoe1Qku hGRDm+41rkqe39dEGuD+13wGKLwDLqXE04b9VS6gMGXhFMIS3yjZHXher99pv5YY 5vKwOI5AYUbLKg9QsL+Kd1tba0w57ydX2q1AcVr6jPixFL2HL1CP1/kCgYEAxPwT omBIdOEJ8QrNxBhm1p4aCXsQ4nhxuUbNrmoaNgofATcZHEdF0ha7vQJg3LotBcHK 7PmqQ6HomXgTmxsLEBRYIkX5CEDQuwA0WMPyUa4OTaOLZ6vuQgFnteGe8Jtu4SjT nUw7eeWnswZ89q9HQtSeKcd/PpiyFcddDOAR7UUCgYBetF+6eOGkhJF2MxkvJRSv H0xIlv1jEpazmmjNZ9QW3IHzBhi1jysYjtQFFUoQIEhcHr6U8HQFRz+NdcwQGlO2 en+hhLJrkNapv/l6gP+hqtgufACBYvfdvpEcx6IRGoD3IXNZs0yp00D+iqaJIse+ Eo9VPZsFg9yIOBvD9ai8QQKBgD2ysremLqulHMcJ2j80YWmRZZhYmoZEsWIVwjCB /Sm169Ymms/Xpw/RnQXra8lW6ukltNiarnC2krMXABUR2Fo19RDvF7w1COu5eavf 29MnkEVTF0Pmfx7fb8txGqZEGOufLQDUssBQZUFWo+dkKQ7Op6dwW/OQQh8+LW/t 8s99AoGAWsgONxn9p7gU1XVOIXKpEUD5fL0IC296ibYQ3QA1bc/GALkKe9rRFgTz L0WbHRw95qLp9KnV3m/zeUk6C8xqZZlKI6KRSJxQczVYoUJYqWo3Iduckub3oW29 uFXwuZLgnBU3W92hYS5a1CFZTu04UEzTe9zLR1fwDl9KwShyG1Q= -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 4194304 seed: 2NVWOXvtwcAFf0J/g0fDC+ypQbnCU7awZoCxcnhpkVM= zfec: required: 71 segmentSize: 131072 total: 255 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:CHK:r7sv6u3ihzys754twjltuzzpza:tpyoxinubkl4kvg6hipls6ezun7t3yyqj6ix3vp6tvnzqirzzl4a:71:255:8388607 format: kind: chk params: null sample: length: 8388607 seed: w6uP8Tcg6K2QR905Rms8iXTlksL6OD1KOWBxTK7wxPI= zfec: required: 71 segmentSize: 131072 total: 255 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:SSK:y7cqgstkbqw5xg64xzpveq2xle:2rt4ydyudlifphjplkxroltqcbayzsnfgvreu7qufhtk6or6r32a format: kind: ssk params: format: sdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEoQIBAAKCAQEAumqCuPxXTpGEF+uFm+cidAyGqbwWz/TCmGRlr5bEPrerLg/j CfLcq0IeLayQrhhUdGf41+wn+Y88qs4Cp5jjdcNVn18k6mkEnxh7cHjkCqucSojf cTlMY0C/NnVBIjub+OKwkoMWEDlzY170kRgtyHuXzHfuxCwij7huovRTRgktqxF4 +IQGCyhxRJqgApY1oTgXG1Zt2fKFz0CcZ5RHbatjwxtOCANZyowwfGDNlLjTOZLX F6I6sVx+77tfMJTmNEbk4+ZSA7BrGdXX9w6M9LdQLzLWRgf+cxjB3TEC+6WU0MK0 CrieNwcuNvV2oQDBEyxmiiAM6jsXH81dNPkkCwIDAQABAoIBAB/IpT0xGRm2SdVi PMeWIxOyRwuNnD4ct0kQZR4JELC41CDoaId7txAkF80lzQ1B7LRkPdNi2nX8bBWb RmyY7r9XbLPdnwewnC8cF3/XvNns5Jr4t1Awust5cKCyYUaa7z8CN6TjYNGnWfsp Z32Np9C08e7UzAr6k3H5ujNigQhBoP64Fi3ZpsGbZCH8MWQKY6TBLwsvlBMeiHfg 0gliCDdMc2AQREKEV8fAptVGJg6nX5lC0SyNP4e4SFWPbWJaF8Mb6822bU6sb81M Gp5rrEdFnom1Z3/fCAIsFvWEZfsRluazFq38gkZyDnLGZj3JH83mVtW4eCsJVq+m buO8RYECgYEAu2czdC9YfrH6CNNnsbdg9P6qVTUKL8pQE/Kpj/nY5GOzQI3zB82G qf3Maed+gzrfDCOMeMphD9yET4u6J5s+qZF0JVusO02PQOfBoM2DPYWdxDQoGHA+ P8nH95p1eTeZjuEfsREt0Mhi4hnoiuXKlxNcuD/Xzl/p3llFDNiXOSsCgYEA/qbQ qg/3giRNBarJvaw3I2vHc0NMq/gF4pIxjsijekkMjt/Y84zQHovPtTedzcrEDYst w6AuJl1ZNwAfBlTJnFPtZRcunDUmeBjJwkMDhxWdSRkOSbJD0FISIJiGB/fViwnY roxqfi+48SPzMTDwevDe1KQD1iwTUw3NxAIikKECfyMjNoKSXgVjWX5OJSMtPwCw vz86sq5DQMB8v04/imtIRlPUSb0szBMTg0BYJ2BzqV6dS7laONjAgA5qJH1Inncs zpoylhiIclO5IJUF85WVd/9RyDLM2N8c9mF2lJAl3KTtkQOiNPTwnZnHQdLJQzMQ blIdplkLos4N7uR5t+ECgYBzz4/EV+CbekDhG+wF68VjwYeCnw/GgdTDVvNc2Vin q4MfkyQKl3aq/bCn3LRSvC1vb2WPu1BhuEBzqAV0DqlmBDFJsUJMXkuxgKx5QZrg G29dqBx8XatDmZ+O3W7PPuIKCp9VupxP6Qo1+MCIFZa4gsUEddcc1wyuz+9Nfh8U IQKBgQC3YjitqxP1dxzZwTtwJ/c8R/lLWAasI6EBjUd7pFZuh/9igtV/urvpuqog Zmk7gs37xciTlmcDqK9pBy1nBg5dUyJbPjd0TkNPi7+9cGZD68GfvCHfDykuHKV2 zuSE7JqM0hDEC+qXRcw8lg0lODM5X/SsUqmbxj7yWrdqf3u4rg== -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 8388607 seed: w6uP8Tcg6K2QR905Rms8iXTlksL6OD1KOWBxTK7wxPI= zfec: required: 71 segmentSize: 131072 total: 255 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:MDMF:nn7eh7thqv5fcx66toeltgxtfe:gf643o6uocn3hvmapcu57cd5teoulsyrqlouysq72qegisvc4p7a format: kind: ssk params: format: mdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEogIBAAKCAQEAxWHP0UAVKpUHireN8BgAJjXIQiynm9HdoIfvGrkdnP44Tghe 9MzIU50xT+ysUmTrXar5oBRDx98mv8YuJ5dpUoHl6Q87uVQhS1Qgpdvwh9MWY1gA NbOqNpveTvRvHGSLyxrysuUwwjerumpPWTVvhgLmxOryytbwsDb2RmS98yq9pX5d wsyuOf2RZ4yRn3y/QmrbAtui5iCvpzkmeqf+kuXJFYcbd9S2/pNXpLL+J6UzTovJ rySjXAGqu0amNNbkQxvL6Ofd+ZtOmZ2zGHQiFIus0AkbjaM60mDiFd5QwY39lXUK V3VTejHWrS16wbq+Av033vq7jn7Z7nOGzGg+yQIDAQABAoIBAFXeUomG5m5q/Sf0 6LPdzRrSZPec86HPMCqpWHT4uZBV7GrOK3k2KaRui0ho/yKtMtPCEOz6Q+6M/w+J CQVCUpiJWFsGvIXIut9JjxZ403BTfbbkTtsN+WvebV1N65SfjU1jwNfg61Bi5buo ijKWE5lqY7ihOdTSo00V7Bf9tcE2P8NTXlM7rhZ47QvFkEwNoxHlSFJRMp+5GN4Y CTjIjBUvBjda78Xl8eOLmYg2Ct+SbwW714K0hNwVzz6//k1oh9GtCtD+n/ST7h52 13mg77iq9jAVqj9NmRWdcd31mX6nFXQ3mK98py6sOuF3NxiFMK6Ginx2rrux7c/G Nmfkp70CgYEA8fV45HO0S9bC5Rk9TbFYhg52XhXue7Mj1s9FI38Ft+ZXeT7Fa63j 6WfOhiGyJmRR4cWdAJr/RZ9xqNyCZLcUdh3pBvpYR9dUyVpTxUVv3MIFHWpkc433 g1ZEAPWzdHQG8yVx6/OyOzsUg/IsPGwc2sG1h4OWf6SC3bGVic3L+8sCgYEA0NYb 7KpnDiwbZ1BlcV4MGIYL+Z7N85/jK7oXBJU0Icpv4OQdryu3JFSgDHp8bcvhMvyj IqFXH2YkiOi/xlvWnzj9QAP7FViuIb0Oz9bWiaNZLn7QB+Bs3oqgiLS2LvhbSQJq Bx2LGIEGjnPWJfV3lytqCI69qbuOlvdlf2CmxTsCgYBazLXLdahJdZS6CNi6mT0R Qcgl0rEmdrmSWUIm6fopYyWceHP5zs3iv3P/XhHO2oLn6RLcMU5uwEEVD3tXdGUX Vm4mkjgi7aoBzgX11/L8s0rcGRsNSk+CWBM5EPuBTjF1ea3g0BkopSkzwuPa4O+L IHqRGk6WJBSAQa5Ogo50NQKBgCo9pJhSP1YWhdR35ozvwPKU6ocrH+1PQdvuYAmF RG4xTD/o5DgyV3D5zQW5IMH0ozB0+WpfyAeJ2Yn3yhKNMPQzysXQCFFhBpe8beqM QgjFCZzl+Z4ePuckkyQTqWYGxjAWVOvrhd8G+hSGSaKT7ASfu2rPtH1Ieqb+k4EY Q6NRAoGAUl7vKlquxaJmvieV9q34/2NCFUu8aBlLAK8w3sirAVM3UU/ISeEYdJfN OCumo5DKFR5COKu6hkeFhqRI3ieukX9k3FMUiNemnquubGvTQUlkwEJjUv0IRfTB +gWW9npE1Z6LUoQ6su4iVRid9Goq7nLj3lfFCT99Y9upLbYcGEI= -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 8388607 seed: w6uP8Tcg6K2QR905Rms8iXTlksL6OD1KOWBxTK7wxPI= zfec: required: 71 segmentSize: 131072 total: 255 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:CHK:3esfsjn7csgnyqmq5afbgtinay:xsrhdomrbrtzftg6u4hgipm5tkaomumbdlxi5hpvpvawe3bjikua:71:255:8388609 format: kind: chk params: null sample: length: 8388609 seed: yPi3JHKKbWaEEG5eZOlM6BHJll0Z3UTdBzz4bPQ7wjg= zfec: required: 71 segmentSize: 131072 total: 255 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:SSK:irynodq3ps65ftyonzektbciby:asfulhlz36ydefcp6pjz2s4osrrs3eghnv7um6kqogaiqqcins3q format: kind: ssk params: format: sdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEogIBAAKCAQEAtBL2bv+jmSUrBy9rEW+f3y7FS5qfLTP/jryKOd18rpIELuj9 U63C8bVHvJj5ppuDRy1BGA91tVLgr0bLE9GeXWxI0pNzILiTpADn1B7xjpAzLAJ5 UuEPDoawcq6wIM0t5uNj7jrMGm5jW5C3Bq4vlYz/Jxfxz7UjbJtTyswhzRbxvYET 7Q5OhIOuosSdgrlUb2UK/rR54hDmAen4eaPi4Z4N/FuUlsL9rAURL4bp5LBV0IHs gkXUhlgXMc+u8DgfSCCGaWTk6bH9rXDhMBbKGKVhY0uU2lX2B4r0JFql6LjAvzrC I7vrbquJXCKTOVRE2aCSvqb3+83slsfkt298MwIDAQABAoIBAD3G+aNX8XTDMxFK e8VmEadcINSQrcYwvh5mYVd7vGAZePTs+qfAB46rvfoeUxuM44tCI8BQ4XV9Atcv BtF8SwPK9+rCKi+SExijNOVpXj6mCuAw0xWTX3qAy7z0YDlImtRIs4pRwp7Yux55 NclFbc61Kf3r8YnsH0SNvz/mKzxz7NZyrWy+oN0M/VBr3IndorZ5ht34ovOr2zkV We5rbMVpLtR/tYHuwtjH0J0aLs0TUCmzygTO2QCcAGI/XE4rAsCPr9G/avQx5jis DdvW0paMIYPAr1ioYN0iUwvXQVnEeEWINepKlKul4myCdM/OdH4NYOMt4McI9Mbe WOBaR50CgYEA1uUGQgTiyuNXLRkhf2Bm035eoY2NAngefWadNlCJaR9FwGkJYeM+ DAABxnjrXLtMTboKuZN5CMSetIWTFA2HgLnLLUvAx2VPaVTaHQCA0Mdpwqxaq5uN Mhk6QPeDVEWqZnfqBpT6wBGvus9Y1CKaaoj74WAsbjwwwhyxcqjIyc8CgYEA1oTX wUXNngPQRL4QBsdOJz2GlOWuBGee4ZW+GIVw3CjyI2S7+xWHLaAG8TsNqg6GHP4V MLp+XtTzGJprnK8nF8n5gNZIEUr3bPMBJBHWZzNb4EzPBl334YMORuE72EcTRWEa 6q3ZogbepWm/piOG0oyx+yxcDtMpR7IZrp4mFF0CgYBZ7eoagrTuNwlqZBPynEMr yryLWxNhrycDT4gHDNkUVvP3u30jq9dxaidUCZJlcjRSasLGOoLyOmY4IZYVVDwa kKYIRKVeTHVZHRtR+73soScPQtWG70e9aXVJbstU3vqaeyBCtOHiswQZZ2BDFmAM qVrPTFILp8C32w4fb6bnXQKBgDZmwgB1n0tvVCXavV26tYsmAzdHd/YOATDcNLUr Qg/TInTvWuy17O4ZIymR/EkgHcrEdMNCyEFsZ6nZn2jA0n0p72hI70XTaSPsDGIF VAYf9DDRyb6nnfFGtxwqim6yt6Rkl9rj88kvTM9OHhgX8lz66Tf1a/MmgdV4ySKL YMTRAoGAG3JKI3n4MM22AykwVmCkhPDoZk7/ilGkdE7NSrweNoHvePdO21NBiTfU NTIg6eKHJR5EufHTLNvW54L40jiGM6cSJAXUEh1BA9ODsPCsi8YVw7FRPyZBP506 AqEaxdVFEL/GB35RpJPJJIvExR8PqyDFxHmDZ3WEGGPPvXb+Llo= -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 8388609 seed: yPi3JHKKbWaEEG5eZOlM6BHJll0Z3UTdBzz4bPQ7wjg= zfec: required: 71 segmentSize: 131072 total: 255 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:MDMF:umz5bassl5ndo65j6syf4dekqy:jw2rkz5kts253bmupj4vdbxw65tdtahvmtmp2nzzcgglyduus3bq format: kind: ssk params: format: mdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEpAIBAAKCAQEA3JDtfIt+04LLAf9VfMIMYrbtFIzKMlpLx0K7B/JpCtHWhQvp ozW2ttJfwHv+5dB4ykoqerNpdhxJnOxxrCdyHxf6AH7mI5gA8lFsH/0UsOSXS6Hw 8fMJxtn4pH6VF+qmd6qsHYyb6IDb1C9EvxndZuBl2GOE30+WjHcaMJ0oCbzgrF2V mUPTuc4dreqA6Mx8/SUIWRQxE6b2ozrW+eEH6VH3kRndHWtsqdrl9d7vUdqh/ye2 asady6qLCKCwdJ/R9/NxeoREpAm4pnQzSUGPxNHS8EQcgLk/AvrGH6EcP/rB+tZ7 /0P4fraqe781k2pPGrqXTD10zZZgtW664zxaDwIDAQABAoIBABePA07CN5Gv7q8P 7rmcoGYK09fWEeK+8kkeP4vhwIZ/U0Jyu0nLevCcF84fcGJrmftBYLgqYaFT9Cjm uF2C+RWJIhLbewliOvem6r2f8o3SXLafXXT6WJj8vyoSuyoKzi0J9chSNHTpDpHj Wpxuzs8mOLqcJp0TiykFr65xms0vRJGsZsqyA8JDPUBKRckdn6xtuaBgkYCBRQLj iHgSRXCgI+TTL/TkJzYEkSFvA9TRPKYOzuiTjdRyZWlZCK5r/aVHYVv8BZXoRMZ6 zjHqKNoBn0/JWBnxi3YNO6bTnM/I3IOSDoCW7pd2LQB65J48Hj2glnQJanDJ+M0q D9h0wQECgYEA/ZLlcPEphcFk1/8nQf6IZsd0ZnB8TCYSIj+ahDyzlVLUqPlRwnMm kY/Dk8nF3f9b3XTuD3AOYtMeDpzH1Ji0pCpuDrBJDj7bVMeqO3bvG9I4D82DYWrQ eBsPYLn1sKdQ3E4hVt+82kha6dJp4RjkINiQ+1wM9ehBPQbf3px7gtECgYEA3q0u sNTWMSRVZSa8AHk1bef2CwLYMPUWPK9h4feXcxEqWBDwmMKx1cGYqy/lYe6LAIr5 +44V6VfrNhE08dJisR729BMtZ3oIQfpOVPqvGM47NqZi0ckyA8aKfh4EOpzYQkH4 pzh9fjwJFY5rq+qNVNBlJPwwjSa1C24Wbi1sht8CgYEAxfZUoaPk4sNk0ywjneX0 3yh/uym+IEToi0xUeUBagw0zcOeT6Na1GZa+/TXc/79IHNAYunyk/ooLQSUs7NB6 1l85pMYDgteXq8xlHh9v9KxdkBjFpNwa/GlDzCPhp5Q4EIX+iTAK4+7w6vKWLmGc V/g618G5bJFxvQ8M32ITGsECgYAnbjlHXNj06L8qYzqFRvFcHegmuQE5Yhzm8BOA JQyvdomuAInqMwe0l0yGe7u9pLT+ip2LmvRsVoIzF8btT1jkjlwiikbO/P/7VuyK Bb39wX8gxUPYbC0sF/ssK/qJun5c9TunuMwYD194brjIP4d5TlGqw/GA/Sqv9HWK WwbNtwKBgQCHGgwM8CeTCarJGPf+Z5+VJyrjAtybE31yhlLpPd3OgCxxGcczItsA ipKu1boyc+nkLpOcpm7DvRK0RzrFu7XXcX03mdlxokuVuuxOSbcCUTTkWaBUyXhq PUgqs6k7wjKRmpvttA1MC3clLHEYdZ8uDPVyusNKdeRizqxyY8ic7g== -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 8388609 seed: yPi3JHKKbWaEEG5eZOlM6BHJll0Z3UTdBzz4bPQ7wjg= zfec: required: 71 segmentSize: 131072 total: 255 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:CHK:hz3x6hgz6osbyo5he664ntvxiu:7hpheae4wou6b2davtrizoumqqh2k3vo25erhpgrq5w45txmjeka:71:255:56 format: kind: chk params: null sample: length: 56 seed: YQ== zfec: required: 71 segmentSize: 131072 total: 255 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:SSK:m726xdiezljlbxgy6ud6622etm:wycmmsogqcaot6j2xh7fdfl7y3csgogszo5bc6pfaoivwnmlvsma format: kind: ssk params: format: sdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEogIBAAKCAQEAirky56Wj85ffM32v5KlYNEzn4iV1CfBQCeBhkRUBqdvIN+93 MdiwR8yeuJRn1A0xJsEA5j96+xIKBg/ID4PsMtskerfMU2LtQgD8LdtKUA25kx9w 9UZWqap/90DNf+yfjh6SolFr/riRtv0pdRLNz2e9pykBAfhnyX/ergYV2OMCeH+I dwoUDB9Cqu9/6lLYkPx+GTiw2G58Sk06/YHrQmtfDKDUxbUmmnM0zSqb9JJg5Pve wEXEGqXeLgr/BSizd8R3ChJVYEp76uh2vNooNJl7Jp0QPvyKbg4b4D3CePFoEoi1 /Wub3M+Z421Q+YNb7fuX5AoX5y0AJpKuv0vn7QIDAQABAoIBABX6NL8fzhtttGIN N8RXct6oT3VTv1jRfnCuIG7yf8a4B96avMzEGltppryx4EgnReHwolKX/IURzM48 ilKw8Qb2km8xtriAO/vguZMPQpTvs7aD/OqS1/B9z1Ot69CoPXfvzoXSNfSPK6Cs t3hxf/MrqY91zs/P2auB61axJp1rT9bIQr9zVpcfXb5dr468l/ea+pISQ3hQDskZ 9AURZ7a5uuUSvqInlKI6NFHBAQevt2oC3tdcYn98scU1h4Sewgw6571Me5vZiomJ cglaSJjf15FtFaS+7j8yM2Xpi2sUk9O+T71uNDsoB5lfuNF9VanMADCnXk0qDZ8W lj0HtHkCgYEAu2XdRh1G6H7Rs0rnEGwFjrMwt0o7tilo51KV4pui26AR6BAYiPP3 TJ7JNebvZF+dTCIqqFK2gcb8Xa5c5b+9nxI5VFTbaoez4ad9f5kWSBCD6I7RKJib +U0ATHVG65n6k+k1Zi2ITyf/W0f7jZIUnB+26B9W4Wzfl8WS9rEuM6kCgYEAvYHF AVuN00PUDREvFHhvp4SzrUm+W4A3o00k8hIPkLsyvsvrSs4BzzaHW8+35AkYPQzH cKXn7TKvjgy8yDdTmk4iLkKuPNBWShqWY7M7bc5OexIGuIFPkp+1WOq9mp00uwqF KTgRduvaJ//xDxZiNsNMYsZ64vp7TGDSOFbBPKUCgYAI7JvyB9jln4x6/lkspghJ uGzcfbOEREqToZIzvXeu/9t6crHIa93eDz3DzGCgJhGGm6XuaCn62jAQggo4gr4U Ajkqs/PTCe1eFKzcU70E54xwmcSKK2JaJ/mYqokbFTUisBtz5z0zj9MQVMg9ALTs jnIWcc+gYp/vSWBrURrDKQKBgHLL7Z9I6r0T1Zyk0DRCUMDVrlJG3b1oCkwuKzdI oY03GSJjPQFvkcEIcy62wdqtd7VjzFz842XY0mfmZ2WRvl82/ZWwZwQH4H27ZWa3 6EQ4OWpsHQ4fpyhW/vACIyFKIes8EDZL9KhpbxnT/R76nDw4Skl7mm1s9svpyu69 /wjRAoGAesaYVXpU7npkNSmhZrlx4kUETa1lVvj4TmT4LM9Cp8gKpVPeFc6p6khb QBUSwuYYgtcvf5oGk09R+q/KWwe9ppMmj5rUfgBkEVZ/ayC+7GQWEdtZHg6TAppZ VsDT5FKUq/VvHFHciSCvQKosCyQzAn107htm3epmi5sP2i6HXZU= -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 56 seed: YQ== zfec: required: 71 segmentSize: 131072 total: 255 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:MDMF:3zidms5g7qzx5flfxcadclukpu:egrqrou64kwdseq7ezt66hkyjqbbjpalocp53yuiqjnh5ot5nmfq format: kind: ssk params: format: mdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEpAIBAAKCAQEA2Hz1ojFKwlGap0pdBpbwIR1nUr2lpVkCrPvXJv0hD8PEGa2c FDXahwSgTqempSiDmxvG/HHl4LRuvFQmEhdTsg0jEolie2h/33b02GQj0CfP7vw2 AxQmtfVWxf8sIP5TPQ1tUaqYWKww36IObzAqA4FnKLHuy2YQf5c3f83jhLVlDOYP wxvSucWuCjrlBOfXT0/NWvmDbPvkiNWhMTt576+0XiXcsKeTGuhZfRuj6sFjIvAi 4hCDepVXy6shquZO6MKqQ0tJUeXgrcpBWrvkr23pkQoi146x4D7mxYdVRzUgyzdU iQrRYJ2XyIuj+T3POB4ymE3gs3ffQAOmY08t+QIDAQABAoIBAAH669NBNxFKcxfv 7yRlI1dzAA1kPmLiCstF9wlizz3bheYEB48YXZvJ2ZggLyVh44kUarat6Wyey3sa nCOJUsk1Sg2vs7bA9X4RhdSB3y4wL9YPhZ3z5GInMMhyvx0ivmR3t9K2qDPHCKAC PAND2HdVcTn3A+H/GIw8nmcWnSvLHNiOI6waEn2cvNvIBKaHxuRaT2bphJ0at9wm BkCyM/uS7d96LwZ6RJ8YSnF3uYCro6UgySA1yECalrDgHRb87v+EVOXrqO+Qij0U ccEBi+TW/6zzrCUinG25uxAafi2UJGSNMOf2AsTGwqlj5cuRrySlLWCCJS3ZuOM1 1wLNKGECgYEA3VVnMVrcQuf6TYWfBTfT/+so9zbvNjUsS/HPdlZtrZHxMylAHsih 0zeHdo8iDyxUKQYy+Li/1R8Cu8psoQ/iZD7eMl4f+aJtzuGywlBlsF2GQDfRvTX5 dSnuoiGM9A7ph9O6muTPUdb89BlNlVvgw6noZgTd32oBKonGUNlO8rECgYEA+mVF lqpLkLkTtjhugc5jwXhvoGLPKhU6GSYNcr29eO3JKUBRgkNJoX9bMLbw3EHAIjt5 WKCL3bP6QjW6R1YZd7iBqXnIMDjdLJYu14bS8rVTw8MGWNvfsFgJVrY7eKf71dNC G7F2yFXKpUkmcm6x4AEfbuKwpUd0uabgCMH68ckCgYBNUX5BAYqcXMlVt237tqr6 Zb3jzm72MtEMnqZoonyh+6+Uvb5GgrP1QxqxUgMF5ehohF/d/zwUSUb9LxOPmCrv 9f5M/hCRdiqB8Novg0Jiv+kcGePNA0PnqARS4wGIaIUwC8jOP0wlPMMUypoNqRD1 iS9EJEMVvsQ1hfefWqp3oQKBgQCpi9lK45S3Mhq+0AdDrdSuNDahi0ZrYGQuky2X /BJHx/rmC78lTRqWV/4PRlBhU8QdadgIwuzx+eQC4Q55LzufbTee4e9Dd72La0Xc elZsMYu+ilfJ41fbuEDajhpG4LgNWTbyOYAMtsq4kIeQBJQ88YWvN6AUygWnj+8y /uZEwQKBgQDVQWWlaEbJofp/s1B7ncYdVQFfnr8PuVpRPDqVHc3OcalyTYaznWiq 4lHor+uy5Vd3RLzrI4Lm6GeibnSSejYVZkh0UddDA7e2CRJD4ni1f2oAalFPZd7k WW/9ijHdHpKKp4Jw52d7Z/tfrDNukBnCZ/t9z/iYXMBeFf/XYnMe9A== -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 56 seed: YQ== zfec: required: 71 segmentSize: 131072 total: 255 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:CHK:wxmzfzvgaqm3xfzgaqrlndgolm:6t3tqcphsutigxnnzyp2xx3afklcpeytnh543mpqptquvqqahwea:71:255:1024 format: kind: chk params: null sample: length: 1024 seed: YQ== zfec: required: 71 segmentSize: 131072 total: 255 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:SSK:bjfs7kysomxsga74nyejdwojji:qhxtmljd5h5t5azmpoboidjf7ti5yerpokkmwtmo2dw4ppt42zuq format: kind: ssk params: format: sdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEowIBAAKCAQEAwek+eTfmmunqMaNTfyfWdEnbjjeVm0QMhLYY8gC0q5Pid1v1 fjMuAspOREl88Dn//5VrVQRTgLG9UvAIbG1J052p3HP6AXKP+1GiLaXnHEycgvLj U8uZEzW2JS9xjY8DPegTAC2WdAjcgk2iys4OUWzTTMkRlM8gJ+04GAMWldTbVicr T1TWG+QmmhHIGSVWPBw+uTxsywDBvHns9asJKSCrnc8B56nHtnDe+QbiP0GDSK8i L8HidT7nULimgpwj70QYLRMS8RqbaYzfEy1kx6+tpiVKbsVYg03lw9R1RWHtA4CI LW2JJOXAn9YIFqUJm/9MujpCSVptrBpnIMskGQIDAQABAoIBAAlYGhlWcwTwiYdK UaCilLrmVeToyKC32wA909O86nfhKLbWkNd7vmIb3xZ7JxY0E/dm5cpDQDCHAB1M xqR48LzjOldOpGkTnPIVTo1zaEC3cAxUXtV/zbjSAVy8iEo0GR8k0t49lCQzysfM V3BSlSM3LV3Oy6DXiVBl5eqBouyQhMH6oKPhGR4OuycW0BPYtzNft9xzDeuHYFg/ XODl5cbzmRMHfvtcROw95U1lKKHig+44oZdLp3ghrP/QfAxsMniYy3Sp2cMbHIRL 2ovNPSEugMKN67PM2NnkeE3pWBaH5zUrSu9M5I5jaVXmeZovGuoLr3ss5Sfb2z5V 8NwCBQ0CgYEA+kRQECGtYw8E8b5XHKyc1rlj2KPV6bF/cOUEtcU8r763YtkzHFdd OI7ErgQI8I+tbNgAsBPLhEWAct9aH6HHep6PlwwA2XGJHKtVMY3FATN4U7C5BkIA IbvfSrIux3mBhfS12PN9x+KWvcysQmQYEUeibAOdH2priY/q4QYVLCUCgYEAxlpv BRaj5zntcxZ9AHjPgSGg2f0I4f1AV2r5PBzhKL5XNJLDhYRc1GT2m/LTpB6ie/3P 0xzF/H9SR9+G3cHMQoS+2VrX64+WmwVVLUTKurMqEZl/ZzIp6g4Lbk/pMJ67jCGG E9Y18GaUZTOuwQRKnOuMbNVORR5/h+S5cr+L2+UCgYAERuaXX/v2lWsgNoCGnOyR PtnV+fbN55ql80QBVz2SQ1AfAFc/RL7zGH2D+82rTslH8ukQGUaBHC71x5tirwEZ t1v82Neq36XYN5VdI28adia4R2ziDn6yFOPcAu+JuSndgDEbZA3iPJ0W4UiQWeWP ZgoAjo9A2jC8SRlafyAdgQKBgFywa8rD7qmhry0lqBotWkIslb7n+FuqfYOcMIV/ tVPVxmiB4K4m0T5LQ9ZSHcZGroUkcRZlDrvUP33onVxJMIsw/wIQ6m9gdO9SCaCS 0e12xcTdpuRxU5bVI1BUNVMMCfYMwFvKsP/634N/KD14JOm5RLCi6OVxwASfxG0z x0ZlAoGBAPEC1P9iODm4dxw7bCewFciF5XxT4TCSMHc9WE6okZuv3X8husTtZ1K6 OPLQqVDytIq/i98vp8YF5MMxuLgZGaOLpEPmHSc7w+00gXr2xxt33VWUQRzqcL4K RKEZQGP0jaVDX26z+qfM9ZCuOkiD+tr/vnhzcvZiaSBM6vnhdzFf -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 1024 seed: YQ== zfec: required: 71 segmentSize: 131072 total: 255 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:MDMF:lrzl2tr6irxtqw27psdcusdg7y:6koh7cij6mugtzogurcttf6ed7ul2km56hqu72hy33j2tsn6c54q format: kind: ssk params: format: mdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEowIBAAKCAQEA6cNZgrmKTqX0wV/DLvTE6vn+qllrAJWP3WehDE6oWhbLxpFi eyBNA4vtkIKpJAdmzP18k2iSJZZIv96WaYLkD9L3AbJrAhp4oBh8JHb34EpfuUjl 61L5XbEJPqgMKWx1hn96HVCM5he4ZekYKT0tNKIwkIMN3Mz6bxIJR4yfQgPtfiqU 364CJnZjFhHoQzzzGV2ObsiDnEzlketbz/x928canoDNwR934pRHrwBYYvx64VO9 g97Ox/YcKqFHpXYInps87WlUbNBDXE26/e5mrOpAxPLFXmnbGKozRWHL6w4Kit6S gDCCka9QSosvILbpjegPd4Nvxv9sB7qcscX+swIDAQABAoIBAF74XAnFmoCoYL8d Sj1t+QCj70hDCrtSh//B5caLwE7VexVhpHp0XYWG2E3BH7mA/k1i4LU8oz99BnJZ Go+kO0aIhYydcWcJ3R7hw7HG2Z64aJpsmOhZrfDYB3L6r/I2W6r4aGK3gn7KfUJ+ CDBc59w91nAnpj6h0k7Eq5tzcJJPImS8S2fePcM2zxcMvm0o6iHUJsKPzN5VJJpc GHTZq8/hVu1jyuzLEqCL00XHc3AQteiQoHE9gICeb2fn2jnW17ogWMCqXFq6qBL1 MgPXipuJ36Ngh9kX2OaeJs6KLiElxT8C/y1N0aetnLJlxwzV4Vh4kx4bnsrFBBK/ dRYkS50CgYEA8HGT3kascanqYZAABSglY1ByOWCD7dJHgk2cVdCWcQlZ+rDmvmaj LIJWXHOTCWIgIhYpbnV9EZp+mZ9gmcsobqh+viAX0P3an+GtRdBikWNpsbUvX9OK Me9APdVi0nDwmmfSRhzjQA0b+5gJ0SUEgbKXOl8T3g/yxa/lnkNvTc0CgYEA+OMf dr04EJwKJRwO5uZZ8c0Pf2SEbGzAjQifZUp3HWg25hto25iPGNbU8OJ5HkyZqJbf 5kPpuChRlX3VKOz6s5v4sNjf1kE8bMnnRDk8waD5MJQziEtMZ5utGs78Z93etP7m EWXfsbzTF8cNcQPCdfLc/ynXWRRx7eJ1YXLR/n8CgYAU+YNxr26Zl733ds1Zpc/l Iv5j3PSFSYOtbUHHBqQpBizQPqBSWbfASTppZDeeaO3uq0o/9YXMhFKo6gtOPzeu t4oe4cPSGmL48YHhBjWjAy4UL38Ld/OlOX68JiIxw2JpxcbFEP4N91bKks/Aa99B xSeGEwczpuaBsj9wl+dcvQKBgFJ1ZUpAvJ98IzxSRHmpneknyFerpNgLW+weDDlR 5479pRqtwBrpO5e+LYS1c+1e8ZXSjtHKdFfIO+dsbkAF67WwGj/1SovAx1U/u3h2 AjQgsg6vOzePwvucr0hvhV6gOpX60Zy4BNntNn7tOv3Tggzz7tY3NZrU1D49RiiI ExzfAoGBANEXl4L7qCTan3hhM8z2/RS6bVC+8Dp4E1Kyd1bxzpGmjuHrClf4aRiI kvD8iSd5tI3weoS7Nv4QP5FtLNKlI7zgAfWrli8uvCW7FoPlkF7JbWKfDn9gwSy+ 1WL/kD/fdr6kSg40dBbb2zqC/M90DVvLZKmoTOQWk+7D5EsBNyUP -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 1024 seed: YQ== zfec: required: 71 segmentSize: 131072 total: 255 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:CHK:r3q3q4f4zbsipxauq6knb5dfoq:lkae5mflammzxkoc7gllt6qqyg5gqdxeejmxtgcscblqh5tv5obq:71:255:4096 format: kind: chk params: null sample: length: 4096 seed: Yw== zfec: required: 71 segmentSize: 131072 total: 255 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:SSK:37oycc3shngu67rijcu22qy7ge:rq65qf43dfsbraa6axxhtmm6xhzxeirrwyfjvqa3wmta7rbuw4ja format: kind: ssk params: format: sdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEowIBAAKCAQEAmpUbuxF3CDJJ0hTgu2qnv/tyPJ/VzSwgewi0bI4e/g2dplol eHW3ss7agvO6XPbI6TWqBM1OhDrlvuGdiN1KDtHg4aaJSQJaq/0f9N5t/f/kYnT4 ipaX3kmkTrsFsPJJkFYYWSwGeFfGTYBUr39cfJELTdoFDTXAd4D6AnYkZHvUXGke 8f+lk7pxR2UVx8udWyK3yO34tQmaBi9uWNACfzv9QMpaYEJVd/wadtxMb1OC2PR0 W6fmtBIFIREzkHsnpdrUp5jYw1yHKBUAgmdtGBQmOvRAyOeUwc9IoItzSlVvh+z6 q2It8DphEixauzfEzhCJoZgdasmhBoO3bdwAMQIDAQABAoIBAC8xZo3t/xEZiUAB 77pIDX6nHXE0uukwl5n4Rlz95qhZL9AhpV7pUXPdgwiHsFXBYgUQxR9CLr5f3NQx vQ6TwJBVsvoxBaisd1IarS7s2Ve6T9dfLqHg5+yNPwRqRIqI7byLDFPtBOyon3n5 u+D4WRwOjAzwiqpFxsS1M56cwu+KBQtQ8/cqWbQeWeMWKHi3b89WhoB8QfLV7DS+ jRes7EB9nnW5wtUXLEtnd6LRD/LW+RfC5QEqeeSnVVVAhGgD54R9wEgS5+TsC0Px UQMaz+4FJjV54yJeB9WueHbpxgOJ9Fq/khINMug8y2KZvtnEUQ5zEPBzhN7AAie4 oWqxN6ECgYEAyFXYTEIfUtxoMGPuySPEntVOo9GRNmJAmVC75iDH1xlXZH//gGov hKA0fbGuU55cZE7TZulDfIarN820ButlprVmPBebSiR0/Zdkal7ogjv0StqziUkH tWB/+aPprCi4jKOVaR1+Li8RRZTgMjvDaZIlmaz2GOb72xFJ2wp6498CgYEAxYjK xHUkykqDTxkZATds7+aub//s2XfXPjFFPgf/7jhr7cVLGaSsIcajPGvPqfMqwqMn 87Pon56yhbfOQor6fKKYXE0BwRX//7AOLn6mFQVpmoySstbDLxIA0qdwyYUbMuBm 9o/YREP6sT+cmAY4iuXiJGu4xo8AOYZV5IjPHe8CgYEApL0S9QKax4S/mKtUvMpQ 8VvvIv8+Lj51aJ3fJcpnCxanqtkmve6TzLgA8iuectySlVnMtZ+0Az6qpWTeWaJR INmijF/NLxbzrWVFCcOp5w5uQO+/G3GWiSwlkJ+dlBiYSe5q+tlp3YiO520ZP7Wt Z67qhIiahrfK+8YnuZvQmnkCgYAXhBNvk+qPUpOTRQ+e/3Quky3NE5CkywmK097E ZbtoJrtikQxBv0LmunkQZl1QhCxhA39sGczlw8TI+nrJnTX4xHqS8m/1BqN1UwY9 LsKi2gQabAXC2KJf5irG6TwaIYh9ZA2d6L38UoNzunjv+D2e+4MShuh2auvB7WYo UMknbQKBgCaeMDyOJEYdMPS9Bw7JYAd/vNS9xOjE2Icme4HZUl+JdL1EDCQj7/vE EldLMt1NT1/msmPH0oOYi9XtCiO2l5jGtHA+gVVNP+LuxsHsrcnFVix3mjpDLbxn v8ZOIynGqz8Rif96TV/668SwVoUm4kG+EBLynIsPtAGJEYeNKIHB -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 4096 seed: Yw== zfec: required: 71 segmentSize: 131072 total: 255 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:MDMF:67btu4c4e5b4dictm7lug7rgp4:6ir3a7vofnm3lkackuqxn7bngupp7dhrcezxofoasrwcozx3japa format: kind: ssk params: format: mdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEowIBAAKCAQEAgsuk1pcCXme4e/8m2RTs86cJmsijUmOrLHmj6Tl+G4Wq4hJ0 SjqO0Q0aoFIhL8iLxKaSm+haRNASv9iwp1QJvc2yL1Pa6PUFzSdwLkW4AYGSL9nx 9q3md7ctNyZhvf3PFM8xSnSCsI0R590M4QdWwS6CaTBB7c4Noc+g2pPhktcbPFZf EZX6X3GrfjwRMGuXHZq4UNoWntddAcoaaMA/lwgiDJvwriwRe946YY3gXPOc7wca ea+WEBQn8HehwPLIYGkPW4Edzyv6f69CbWoghgT6qzUMNuAQ5jcA9QGY5xlvNzGU X2omztyxcdgslXZryV5VFuFBer67M+hB2Y+VDQIDAQABAoIBADWyFs0GF7HcEO/O 0wsBvTlWFOpXfj0/r7FFitYfhTcVTA8dlmI24hTOtWSl8vvj8AVegQfCfvSLG3dp JTS8mncyb/lgCpnipWwQycwlUSJFKFe+uMgVomz5ZXWjqzLNdOtNGCZB6LlEYNp4 dGYZljMevekjJ53SHuSUEaxKU6vtTv+TldyJcCkde+V2STeK9R/aihABHQBVK3cn JrWOcJxGJJ5ZUE0kxVjf8r9QZx7eUODT9Bn1FgmIlUJtcJvUgzQEibdCce/zUE+T Ov+7z7UoAlynuEshFDeK8IEEDWSOqHqyvc0cg1GwRL7SGZo73CBHi7DYS0odNuUz tJArr30CgYEAuMrCS5otqSL4uHp04bPFSKepYB+/7QQgW/X8NL6Xa6gCRBdGN0q9 E/GINWagSkihBw1lbjFBdEKeq3sz+c/UTiV0j75zU57oN9kDDM56tlKhAvwaz8/K gcm6u0eePioWSGhX8bWUsJrxNVq9/CbJEJ6wjI7zV0KZBoMCvhmmiOMCgYEAtTJF FCQNuACFptHI5EhEo3qKq1tdsKyq/4VJEdEfMAgdwCceJo8HyVXNFXBGTyIdebJa cdVYUHVCROP+I65tc4lGcaDgmfQUho4+HawKA7S2BQSEewDGnDA8rHozq8zMEbNP znPhiqh2iD/gGKkpRol14hp9CaSy3DPiHrsB/U8CgYEAihK16ldhFqeSwAR/oMT5 +7eKzs/qT+ZtZ0j9EUv3R/FZABeD13x4mpY19/Ceg+KQrvxLdXJIPd4pQGfmBhpL v7gsx9q9wRVS3afAp6j/94r1040bW3sfDKr2Y0i37Cr2S1ProibS2sJqyDrtCaLR SSHJOLz3BZQ1UrBBNFlmHZECgYAtBwb/kE4QcaDE6dEAWa0k6ujW2GeZ5e7AfMDB urQDXaD3BUGK13RZ5gaG01XFiHbGrTmonBnMNLd5IyceetQcJ/rnddEasPsAzQxG l2ANt7Sb3pmFb5XrbllFi0CX6tazd1nXthhQOrjp9uWbez4Ul0hCHc8AvHruGb6R YGuIJQKBgAwXJb3Tg+jZ8Eq5fvjWc5s2fxrX4Z1BTtY68CjY7rAaZHoereCB5V4V Cbdw2vEKYpYJFlFsS631wrF/7ZTHdkt0Cm+MVo67lQg2uSKJpGLXw5YcHIag1JhT sIvtzyOnufvRg/SA/sbqapMukycDhFXEHQrAi1gIRadkMJfsRZed -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 4096 seed: Yw== zfec: required: 71 segmentSize: 131072 total: 255 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:CHK:rdrzetasw4w7tuweqpev65d5vq:gzzb2v5fzrduk6kx5xx27mvo6dgnd65ym5lm7re53in7xuudhsxa:71:255:131071 format: kind: chk params: null sample: length: 131071 seed: LCa0a2j/xo/5m0U8HTBBNBNCLXBkg7+g+YpeiGJm564= zfec: required: 71 segmentSize: 131072 total: 255 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:SSK:ohn5yz4as4vgezj2r7ohcudtzu:onvxdfapx32vsz457rp3mqeap7q3hm2arhlje5x4ctndu2jfsv7a format: kind: ssk params: format: sdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEogIBAAKCAQEAmiJHD0uN13uoQUJJFUQATJKvFtFBXoHEe83NDAc+cUkNym9E c0bZmuT+HH5GPAqkhH1PKcSXRq5/W4vZiK8safWJHzREdi+gI4pHIz0BILyd5dmx /bAdUMXsQMZq/QSzaN3HdxK4lJ1JA6Ncct9K5w0QPC+EpL21mFqgvLmnKmEjUBV7 93G5vw+EFCkg5JqKqLJu17KBpTQYJ74DOWuGQ5kgx9zX9Kk5errD8EDSoVBRnfjb PSLZwoCRdibxem31dDmYp7niBjP7NKPdNSa4bpF+mQE/4T8PCvTNGiAqjUZkz+AD OQ7di4pZPThcqold+3DLCQgg2cNJHMxzsllltQIDAQABAoIBABrFb2A5uEZAJSZI lBcMe5zRMXYeHGOE2JLEWSQIshDNJocNsm7vVGZx9a0PRbWyB4c3mKNhkQDm2BoX fU4fVvCEhC+WTXnVpdPmdZqqQuLjv+0nVaIBj+XyqqlJjVWrFlpVgwqshsDRXNgz 7J/LJuBgxXweqMRQaxUuUJLXEDDs7wW6SR90mI2mwEWi0G59CWekRlg882KKhhTv ZTIqQ8J1VN5IVOFuWW6rRiGKLN0FYuX6nXYcTPneQi8uLGJ6lK3hP+UAiA+G8hAK 9w/EMZXAdBa8eVqlpsSXB1cQTeZNbQxo0FZnOY3IqasLo4oLvgmHI3zmce2xgXyV IcjB0xUCgYEA2Qz1AeAdhXDKtrAIem9+1g+H3jguNsSnNL0cFuy9S7AKcH3BlAHF 64j52VbKk5lNbmysR3cYmeh7CUqiuPW2PmOCVqbG9KKxttBSffFAOHB6UKblfzjI THy7oge01FFGtt2GLrtoetpbdHWwGog2paMeF+uWarjz6/1USgP1b9MCgYEAtcsA qEpuih7rz/f+x3zskcqg1qT5oS505ny/m0Gx/H0WkFzhgh2XM9VbEK29npAivBMm wx7bbIjrJnzJQFpHx8mJoZ/IsLXSKOMi83pun0EhpAjmePVPnl0+0JJH9h9RLJb6 esCLSVLViDIr4Uz1DHkKpHNseaZ6mJsr0xkz51cCgYAA+hz0ODUJz7sp3Vr8ahoR DprW9jvHBVWXWC6TL9eeSpmRbg98AhIJAGHXh5t71JnToGuaGsAimThMj2hyGrEK UNpaV3/XxA+2ufNVG8vlNSRnzoiD7RaBuaIClbRLrF38Hr0m4rMSsn7s5Ea5p9lP H4/YHbhcnJ6Edmx1tNTa/wKBgDCTlhFiEjeGG6zur33Ou8gZRPEWFD4lk8ci/nAW FeFJ64WXzApgrc6D7FmAk3KTQTTQSUNKM4fE7lDSd1Riy1tvVv+BGrddXlLenrBA vt5/IOYcGrmnkybV87r325LAu4gWr8etO4rUP4qtHVyOm8xBa76VuR6ohYnRrNwz l9LxAoGACGlXnDUpI90vU+AJ03iIqDDtTklsTXEVtzz7cBb/3iEXtqQhUghhkagv IMrFHv91YW/KqmjWIpsEBSQdkkxzvf83C4xm1C4aUtKa8y3IHA6WwkMWvkn8EM3f s/4FM9Bo7/jtQKGqiLDj28Z5kfTa/CXORqG1kiNtqej2zXXHIMM= -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 131071 seed: LCa0a2j/xo/5m0U8HTBBNBNCLXBkg7+g+YpeiGJm564= zfec: required: 71 segmentSize: 131072 total: 255 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:MDMF:nstlxuewinqw4ugsceyyzo25iy:niiwkb23khhxzlsze3hkphtqke5w34gb3xau2yl4kyevxyjnhlqq format: kind: ssk params: format: mdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEowIBAAKCAQEAoNBCT+MDYgWIm49IgY5NRDrTwPy7aG0OQ39aw8oSPplMVECX RfZqDzP9Dpc3ngKthBz+7RW4FrS8GH1mResg9m6U89aNh2HURZGPTzQRGMju1REw U0y4n2ZSfo+t2GgTeU6qwYCsKhVNwz2pJNfkDp3bcwW1MsYr8KHm4iydt2QEapu5 eyaalBQ6uPVn9IwWH3ObYYVdfaFDN+V1Ztkgc9t6dk9sLU5r4115k1PKPhvPBnh8 Kz70AJauI7d3OFItJqmHYd+qhrDgXNlQ3pyweBRvHMURi8AqWQ1ZuHr76hB3WPhc BsQrS/dRpIupucBu9Gs12xPtvvlGykRk7CsRFwIDAQABAoIBABHNrKG6gLvf3/VU 4hKRxgUZPCs/76GKfUtEtLA7VVS/1P743aZ9ttUzDL+KRzqDkmEvcpudzXEaFj7h 1ypDczVFHdF2/dkwn/cJu+NpYMEtMZ++FOsL8d6Xzec8EeOE9i72YholHCpWjHLi hzDQg+uIV2y/A4X5AZFU40JD7TwJthat2H0i3ReHycp9s5mSkInTtqKDtE3su2gR TB3RZ03BA3YddUjv5YlkFHfVKfWdf2XiR6kJt+tqPo4iNyDKp4G/NdNeppgRkfPZ BnvTZjJIT8w1PQS6Q2vafodQa4DJYQo/EAdxsBvba+RMAxPwhx0ReN5gqt9g9W89 zSAV8OUCgYEAvNUtjEtvS4V5iK5TLFLgE0jH3mvHbn5wPiMwvomralNwxD/oZ4kf Mu3No/qGxz8JfLFn75FJC5JWJ3YU4LJhNrlNyYONKry0PPxEO6lkSZB4gk9fXO47 38rswz0Xz+cRhDlBAVPXF8WHkt7riqTuhvtCcNFPsXmB9hmtgnOyMl0CgYEA2gOx of5J+X+EFcK+YtJpDnxoFU1NxgKqZrw5o4HCExfieWoT+1SGf1J6H06u2gEwqj7Y 7FCFp5wq+Afzlv/Rg7A/lTOIbnB7+dMvJ4+D321Q378ouj6F/0qLabtFJuxvq1FA MSJ3qxWLrlb6xPN9qiuzMHtg3jiqAbNQx+fywgMCgYEAlsmwRoCSTf82rnNuDU3c iumqWK0+IriqjqPxL6WlkREyUjQqNEsl87g7Zv8OExr+S2kq8v3UE352d6puP4OR 524PdKQs3Py0/KIBJpc8cxX/dSdGomHGxA06BSnK0wTUUv6ZLyMw9lWQzjJeamcL 5hPL2WT7O2Ao7ElS6YHTwS0CgYA0YjzVQqd9ppETNXbPgeUyUNwleiyczlkpVEK5 Md1y/wMwzzc75YRnpWaojRxgT3blATLYHUTwEAsXC7oQ5yjtbnToobg/aRGw5nhn FgnGrpqHGIRts8Y4oC29WvzzrE3sqRo2dCSy2/tzCX05w5PHRrbIiGyvGIho3jAj yGzBBwKBgBrogWqCWfccQPYbPla6ba4gZtLGZgdmzZo9IvpKjv9zzWnvSyAd0JiZ dMazfQuBoPlntxBBlpu9waZaw5K0eT2j6OmczoOflow1YT/PseEm9K+JgzpHejfJ Mj3ne9iBD8PfFtuKsMJKEpofYRiQL6e0lr7ogTAycog6N7vWVk6D -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 131071 seed: LCa0a2j/xo/5m0U8HTBBNBNCLXBkg7+g+YpeiGJm564= zfec: required: 71 segmentSize: 131072 total: 255 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:CHK:e2bjefibvz4tgu6jgf66gw74bq:mugtu5bx7h5gcivevmh2gmwoc2kkhmobrzshkuj2dgrtm3siysfq:71:255:131073 format: kind: chk params: null sample: length: 131073 seed: /N4rLtula/QIYB+3If6bXDONEO5CnqBPrlURto+/j7k= zfec: required: 71 segmentSize: 131072 total: 255 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:SSK:qrpf526nw3z773vbald5la25oq:db3krhancbpchrqxt4nc2vyvlwts6yoqnksnt5ayzwq5zlcmgfoa format: kind: ssk params: format: sdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEpQIBAAKCAQEAov9sWVeoF7MGLbhaZtLS+hM6QhOWvKBvrk+zs0y0tsjwWxdw Ojc8PflqtV+xUiJh/4lHDhS4oYhNbjoPZjQNubB11cxOxOmhCObyYvM/DuYXev6s xTGv8dARhZUlQU5D5JvqoO1cWKalZoYeCw1N4yovpuxXQWHvUrq3ch0M+MBs04c6 OM/nbyLyMCq2FHUKqVe8aVqwYxN7I5yIhOjmyzjIkMibDEmfrBZAzvOqJ0SMUjNL IXN11MVpln54wwrNz7UZQfFgHEMzEYb9oBzWSzJu1C9MaE3GSZasQz6TkdcrOFRP XozQXPSRp2IcFTPnQHg29ZB/UaZ5eWUrVfkhEQIDAQABAoIBAASOHdGLXRO4eZI6 hjA8cQ/zDJw/HuXLmANnj86RdLVs/SaWi5jc5U6YE07ZS0PP2SxCgl1W3+gHvp43 eimxh7aqQ0jDymm/W7Q7fAee46K/dGWIC30BS/j2hx7UEbP3A3e2kcKIj52cnp+0 XM+TQht4mNdR4Ihfu8f0lt7WCABFmPm/MTRvOKplxVd3rMA8oAnzLHOIRddYDHen 4b1qw/QjlUzPqtH3jmDVSMu+QLzKKX0d+wPEeGvVzZojs+D6iuztjvzR/om0pUM7 TXJu0FvJ3XjpzxuJrIFlQXplX3bW8WBUM+aNX1K4BlTJQkRbMHJ1PnJmsVpDBQ+c yc8IbWECgYEAz4//F9IaIUpe8m16Ev4ou1WaOnDrVuitTJ03/r27v6XZMEeQSGBT xhiaPPxd0mFHVj2SSGAhp3ChgSVc7CojC3lpI/vKmyX/k6RYsaMlwTpD6CtBVCDl DuzIP947JtfVu6A7ucRGxSscHFQK+DxDRKFet9wBIXEKwBvkfJkH8TkCgYEAyQkU 9zFTbXBktUvgwmjIiEaAcHg6mgnTKeI/KaPh1OatvP30g11sZtXa+YuSfa6FUHWR aESSSVn5omwOd5sDTvS8NcwzBQV5XwHA94fL2clnrOtsdt4eR4grmhSF7N8hEKXs Pkbb5rNMrNfV6NiG2ilzho6+KAABgYGWAmtHppkCgYEAhZxQ/jl+Ho3sPqwgV9eJ ysWY6SPFKoXPALF32SCzmfOdsolupFh1tOAjcTyW/JUoQaiS4MoY/9rt699sSI37 TyiReNtdma/FLHovqfG2nQLvsaUegZRHPutHIG3ir+diK0xDBhsF32gXyViEUzUf rC8gdMRHagqFfBK4a0hrJ2kCgYEAsqeogxjJvEsSpG7/GaXG+Bw3Tjv6UCQFDYar fRTPv8UUhwzku81NZYINbJEVqS1r+hnRE+lEW31jNG102ePfJ86kZ+bFPGQl/UFw vElo5m1u/iPlqykvnYAsx2wPrHaKSuI5NQsBp16V+FCDH7808DHAIcc+xAtlSzLf Hd06upkCgYEAilmq8p9cUry8Qk+h9BWGskzWNH/tmxDtPA+Q1jcB2axiukrol1J1 tHwT2fQBsDr73oDUHBMZTYxRCHFOApff4k/bTBeCNv08xzhU0RXo+F3Npjj4b20n aXOwLcX3b5ihtM0ebzioMdx6PAZSqQbs0lP7mDvCm/Jpl8+DLANJ9jI= -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 131073 seed: /N4rLtula/QIYB+3If6bXDONEO5CnqBPrlURto+/j7k= zfec: required: 71 segmentSize: 131072 total: 255 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:MDMF:lndkjnbiakpgufkyoq3zqiafx4:g3lynxko4n7fhsnts5knddchlym2hsb5tcbbuyhanz7fzjfxq3cq format: kind: ssk params: format: mdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEpAIBAAKCAQEA8JxtwE2NOlHhy50hjDK70rexn83ald2xwqtK3828FbKQk+UY +chzZkUHohC3hWMt9W4mQz6quedx7vtV45IXCDVk7q461Po+68Xb+Hu3fDTV0tke Jmncz/OvKriqT3eLGS2Lk7zLVcOX9wdh6XC6cD/qIQCs+cf5uENZKcBcu+bjLu8l ViHCA91Rs4mXoqkwXjvRq2xfR9DsjXCwCzhEv6Q9fswGrnAEj3K3Aog/szTEu9Ik 0sSC3NtmljeynQyLfVqmPTKuTxv6utSRZ8+sP+14yB77WHnnhBVDIiKaIdCFL0Rf jg+RZp+jwRQI9tRBVf0eWa0jhQKp4kbms15wmQIDAQABAoIBAAF3njEgWKsvQDYq B6AgV/BZUvz1OXwpf0KFMxfQOVpGSuhXyvopyIEBjKT4E1UmnhnkGk34jyqrh6fe WEr0TYIKHQr3P7upxiNLXVmMUyJs2L0vkZpUKS+Q3KmJ1n84OEzT26Yv1vEutSEb rwqz8b7ta/EpgtApccNjRO7eWSvgHAerVuRAmtSAuk6SVPjk3nc8IyvVhAEx4Zp/ wczllb1AMQyMx9WQQAoFreRobXBwN6IQucAXZEvV5BoIerp15B6fFKwqpToG3KmS D0K/2mq59UsjlOyaEFu9vy1CV0M8XmxkQCatCwPShH88/x6Wbkf1ktAfq67OPv+7 QX7PFjECgYEA9VZBp9vOQ6Uc3sVIwN4oucBQ5hRZ/lmk/dyPNQD7HrXB+YY5uXDR BE/JzsYmKg5XP4iEGAEl49px0OY9D+kXY2Vr9rqUjStfB3UHO+PKXw7z+/NNDBYt 99DQ+vkjCKT2OJfJ/HI4lptJK9QWHBdN7VMl3Fzqn/PHV7A18nu7YhECgYEA+xGW 78+JvGRoMAD3YtzbitoQim6TIEuKmICmtNFf+r0GJ/sY6LEjTm1Gy6kNarp6jRPN 0KoxLbHwJpOH+1X5FGcWoI+CWum5jbEdUOvQUb9+JPm0CtiGF9ESrRbAtM7Tg9OO zLQLwx10agEZ2hFzAY7U7cIDW06ZsHIXJ/WqHgkCgYAMh4o022nuVHlj+ylbCD2G NwcqqPFrpwJhIKmDqHgqulecubkq+lMCaFzDHaWHUlIsYXl1jGF2AIr9gzStIlda cSyRXjgF+agRxm1HJrwIHMhjHqrZqixQ0q5Jkv2yDFKy0zWymdbAAlA7V8qFRr9p Fm0BkxE8eAO/O7WVm7IXMQKBgQCUKHMbnSs4oz/gZBGYo6BitgBg0JO90RY+nFzE A3JSMs25NjIizrV5CH9om6AxRU4ghnlEE8rlnkWLXjA2nytXYOY3ZbiVEavP857L K/1I4Gn+Q+R9Kf0nfNc6kVyy1gJ3npZ8MhtmzrDuBSxORVHKr5DzpTP5485KE2ma yRvUkQKBgQDuY5L4hsSoel7/1gDI7Y7LFDO/3YDHUjdezsy0ZcbAk68bC74ZZYPq TPa6D/RU1MzZKhcQCTlx2jpW01z7oqwnTDo4DnprLTzPq2Fz+3UP543/Zh99T/yi 35EmfCANEuaWUWftgpA/3Ed5q4ymuzGBh7R7WfQ1ZeQ/vfuPpCSzpw== -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 131073 seed: /N4rLtula/QIYB+3If6bXDONEO5CnqBPrlURto+/j7k= zfec: required: 71 segmentSize: 131072 total: 255 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:CHK:gy7bci6yvllxzvhh45khcwp3u4:qsfjfmcl64zey4k4o5cfnh2i3mzboahf7bhtggceszrulsv537vq:71:255:2097151 format: kind: chk params: null sample: length: 2097151 seed: uqWglk0zIPvAxqkiFARTyFE+okq4/QV3A0gEqWckgJY= zfec: required: 71 segmentSize: 131072 total: 255 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:SSK:amhnz7udmtjsa2duvirkppu7ay:cocikvbx7m6op3rjflwmx73qin2q4cvm25kqjble2nuf7ei4y2fq format: kind: ssk params: format: sdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEpAIBAAKCAQEAvQZiPNC4SwC7jB2W8gNAM2Guvl6mLO6xiNi0LCvdAzncfRmi IUXVNDbkxqETHRPQzAXz5k9hVuo5+iBqcrIfkHjokPhDfXP0Y4QN8/e4Sz0f8E0R 3XD3FPZWwSLhRG9HIMuHf1ciebYZRSDI2ku8ul3RDZXebfEZ2PhWlOMztCaE6nVf X7+IYBg+qSND924gIbQyIZlKbYZjeAomPzKfSWpZzbjcGLH0DqFjMjdxDIe/3pb4 tpVubg1ssWT8dzisY+K1e7ok/qwgggeqRhy1+jjMWqma5EXPQqBA8my1uFZ/YxLz v2PMHqe+X/rnQ+qia+ul3F0/+XQj1hfLLHO0swIDAQABAoIBAAFIP8IS+QxWKzk3 Wpei3DidAZwZUB0rs7MdDJnFf+pJs8CpEVK9Nvgxx61pzPCO35CrtEXMRENcUmOK eOcb4UY8kNHYoG5kcOzKAw/51J1nni6HCnXv+vnxG5llN/qb6GPHB9ieLr4YZ6lE GKbCEeK+iiTflZSiKJ1ZNqJabwvSnk2QW1+Hg0XBiM4MBlHjvj1IPJ9GFOsgt2Et uSMJT4wzQVAevIEfa0Dg7uB6ahDMz/RZ2PgUDseVMdwKYuiuuT3Uog3Quqa97pYR Pnws1X9MXVHPm85XAOKGtwWV7y4Hgp6YsVT3SbJWLbHs/4EAyw/ExbAvuvy5j1df TcB9F9ECgYEAwhyO/KDviIy8UP1ilcZ3+oHQP9Du5jY9/30ATRluOWrSnl5fIg81 +y3KVlZtnpCwFAPivY2P/a7tsaarJY1+JhwtUomTR3T/m6DBQ+rTujQeDZVEQ243 ucyGvTDq9UiONkrP4SgIIEookMC9+rBjUzTsNZ4FWDxvAfYayn/DBBECgYEA+Uqn Xc+55BCh645lLfl4HCbw2oHaZiv+WJOUUvtwAUpLTWf5xHLWzQVXukQ5gMgZcSnJ tgHW3qKtgpPsqCIwtGU4Cjmaghci9r3TvZ49fUgA0z5ZIQv/wAkIOp2cf3uJ9uVx Pi+Xs5Wt0AD+uq+k1RucGmMhlFO3lxK8kR0+oIMCgYEAp8iXr7ZMVfOQM1FSDbRn sJjUsNSgK01neZdK01nP9MFpHIrmIEKVnm+OHeLHDfBywlo5ey8J73Vs78no1aTg DYD9jAJu061F4/eoFlS8fo7eC0+imcaDVI59SLsn3KzCgBtaZHx9yatQNQ7lJ/Of ZySvqAjXBdX2/fMEZVTZ9IECgYAdtyxkHlLGQMVMUtj0tfv/PxUOttPVwgC7hjvz +EzNmpGHVJGNPTMllTFz3pYMJ84Akz6cF3QJbdLI8eEP2aN8nWQks+EbCK7+Qnpu 6+HggSi4BYKSUd/WgD0e35K8D3nOmGL7SqkGmxzw4m16y10WmgftjUt/ZstHktAv bBD/CQKBgQC2pgs1s9B4Gen0kW8l6nVLWCeQ8ualP1wzG9ZVtayiLKUY2pfGDoDU t1Y+irOwSnXoF/KIixgBjXCSqwVphZjl91TD8TF14Z0KV1DRjRLcms721gaNwN/A C7WY/9mu+FBjue6lFtCnsHbbONilHqXd/GqKpUscosOMJHWdc/r5TA== -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 2097151 seed: uqWglk0zIPvAxqkiFARTyFE+okq4/QV3A0gEqWckgJY= zfec: required: 71 segmentSize: 131072 total: 255 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:MDMF:hqicjls7azttyinzxclbtyduli:cc7zh7zdovyqe4bd6k4q45uz37qg36swsd6yfegamoszoqiewoiq format: kind: ssk params: format: mdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEpAIBAAKCAQEAkAdhqn9YOrOyEP1wO6nmbagEawIgoX1+XA4cglBMRg2VaxK6 /RLYYCIqGPkuAZyX5UTfkOJXCrzQmywCq9XvSScpX7L/JGX6sla16F+FXtPaQPkx 5PTVjDvkyYxR0gr/RsVqzdpe1jjuwCIU4K0mgLbnaGnatNMHofBTeV3n3eW1fb4W sf8fOC+vsYtVJgMqk64hPAMtUqR8IoMgTPDpy/oIZ0I9Os/abCvRUb6QQJDtJRr/ b4guTqQmXKsuESwfC+oZQ6VUMglT/rXnndNV8ixrQtfSExFxfk28uvz1FKAmrUZ2 ULTewSN4GF20x7NrsjzwL92dgHh38keK4IKWIQIDAQABAoIBACFGY00FsLeXLmt4 cgaGwSLSb3rdefZ1TM0twW5l6MlCeCPNpv+y6+SB4CH256cdq4YffFs3v45Ogw9m gpN6kJbhAlEGxKV/HgU3vT0bXG/FGCZsrBdObUvBxqC912Vkfwe1snAupDxv2NDw zsv9lOil2R6pXgrquleyc0aV6Gy+PYtCQo72Rhn8ik2Qblp3VWDqsJ7UwiPV416D 9Hz/9H7FsCW7VLgL52IWgBh3/X5HlGlbjvDI2fsz/idwzu6ZJyRYn6SLCTqQEFjK TYd0xhYCDKwdKBbvS5Cu1j+v4rwM7TnvUn1RE9a5k718odC8wNP2/W0BocMbb1jl X1p3KvECgYEAvHoNB7rUjXZjY/wioqR3o5ETVr9WMF6VJQlv1MFH6Lc+vvRmc8XR ViQjrE2BQIEzyQL+bOXe43GCUfg/5uMosj9l9ZDjL9/BSJUVmlzNXeXlXc+/5uPs Mp8JFjnCPy2eVm5mlmgujEKEcF9neNtSZI61uWa7RrGxrVuVkxNtfn0CgYEAw6DV 3qOyFXJsGk1uT+KMzE+M84wOLJsxQPEmljX7LzFnbQhH8CcFqVOMRoLk1YACYroL hj1x2x6iFe+r+kbIIkuDnZD/wezY07iTBhfqXckVfAX9n6w0mzSlOoGZWlkbL/Ia p9kuBwWWr9FwHefn39mX/y0bzLoHBCMRZBkOk3UCgYEAgVBoQkZwcUKp/L7QcLDR GRt/nkQW+YbbY5bu8JVQJh4b4d5DsOknsKeJBj4DEWPUSPVR5RtuarTFikH+bgar NGkFJpArH/ywW4FWWhuUF/mU/mF8tAjrVOwCywoD+V7uRTToFAgU78zvmz4J+0TX agD0M+mFUoK2ek/c9xUcSe0CgYEAnh2e/wY7583FxjSTVon71x7tA+RNiIwe8Sh4 UaxryycZOy0YR+iiUMuwc0VUg6OlSfqpWeTL45kM1MIUtIMFO4LhbDdIIIu4bNeg LaqiyQ7ACLAm4Cmlk1Snv3QEaNvgd44tMUD+TLqdopmbDvDjnzAWBC+Hap9pEFTv t4HxIAkCgYAcPS0TVpKiTqOek0oBoLxT4VZfqp2E5OlI3cVPBP7qEJTIaxAn/T7O /U1/zJppJIf2svpZLWp+smkUV2VxWWl4k9so46KLBYVG5n5R4x+g+YbgRKioMRGm cbzUaxovQsg4RLRUoKUzqE7fxPCxSm4aZkGjnDb7JvFsxhSeafnmKw== -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 2097151 seed: uqWglk0zIPvAxqkiFARTyFE+okq4/QV3A0gEqWckgJY= zfec: required: 71 segmentSize: 131072 total: 255 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:CHK:pcta7uk5cpioxzv5nxxqsogw3q:gnu7fx56k3j72bbevirn4kdu27yfpvttzl3qk2zypwqt7tw4rijq:71:255:2097153 format: kind: chk params: null sample: length: 2097153 seed: BTBX/ampNfLU+ox7xipBGiaSbgC0kcB8Gy7BkJB4oKI= zfec: required: 71 segmentSize: 131072 total: 255 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:SSK:ljw6sfa5f5n2x4u5asgq2dvhoi:wmjel2jkzzwwbizyoqj44czwger4xrjus7r52ujhd5mvlnkt4cja format: kind: ssk params: format: sdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEowIBAAKCAQEApjugYoc3XSJhbpTGOoIpqlTOTKHVBBfMsvXXlYdFIHmQw+cu 6HorcanqnJzGf3L0oOV2BAOxv1cee/G3c1xVyzqzEXR66esHHA7W1C+9z+H6zTc/ FNjEixwRa7IpToVm056kFmnsNnAW1Xy/OJsh7n8YMZ0dh7GGjb+q07FeldpYJvzm 18gEovb9N/TZUab1MscTf/ne6scWGJxAN2njmWG/mhyoPWOha07WMPvqIRTc4Knj 9YD967WM4Oycml9Mag8HOpbBZ1DH6boKKHqROe7v0SjeRdt9NCfRWJ983E+SOJMV OEtD08tj3rkweCbG4i5lEkL5ORI56Y+9zjXM8wIDAQABAoIBABQJcOIfcU1xFPRq y2AHC3WkBj/Xa+Ez6zERD/zOkscA0DHE3nMYMr9fH0/kV8rJ9PGl5u1B8r1hB2Qi NR4bHZ5DA42RkDU85pz7ruphnMv/bacpxxlAraQk7HaiQXdc/hF4+EdZWicPqLjv 8e6lSFhCioyEZyhRfin81d7xbLi8KFd6+LualDFVrOjFmpAyXneQseUfc40kGC6q a1IIiNTleQeyFjIhvWRV6lxjZ3jbq/79s3UtO5uVcsh9r1xThTIQZTW9NdKAJdCl wdPg8OwuGASuyZkbE15ZTESCbUN2ERoC5ZTflO6qI3wcxSq4LrGA+uq1N2Ez/onl x/bur1kCgYEA42DJos4fx6Q9hkUcuaVMdF07zgw1lQ08QVj5/VndgY9IEoHJ9DqX F1ZdMWzHKKdPanGWiVOlG3m1xaThxkFvhri+hFb+PDQAoAJ2uN2Pc5trRP/lxE0u 9e6gY/bspLxYWiCZwtzZ10Qy6m5KSRO3REC++zNVCd/E3F7GvyqEGwkCgYEAuyhy zyi1lJP6zkFNKtgOhPwBitd9CS2EshSqtCViKS70GjyZWqxzyq7V2SBhKHT8j14/ +vRHvg7dK/lZelwE5qXfMODkcrzdt1Pjpw29xLTTesPRx+dhDZXYOwsd4Edk+t/D igH2M8q4V4iA4Xp2CwrVbwid60B/p0WV6MpFGxsCgYBfSQg2ubqHp0RBKGVJRwQr H4cYafVqaQl/ORJKIYa57Jl/Z/SB7Ku0k/Sp6bPsTXDyYnd7RRpD0VVjZh1XP8TE 6FaujuYrxH8ejunBvteG0vK5D6PyB4ZOeZmtSqUQw/0ih9bn2jVQCLxtkZp/1UtP xvJBwtk4MhYFY5JWOjLyQQKBgC54x7E6qYPADsnCGzglN827iWKBSVHLFKTnTs+2 bJ5PQ1t0apvCMGpGaWElkhpqmf+7ZmWY3GuL4000+AvS54Ch9T58yRzYWrFXyjJD zjgWsmBMWT2q7UVjTLK0evGiqKdGgpY1EH4huw45Hc9fCgqJ4R9V42hztn7BX4zT FL+7AoGBAJfX4rjrlxziDdVLEdJZQmJYOOVVzXKLJ2MF4zsimeNa9FjEiAgiEhBD /rPgnX4ktS6EmhBV+Y6jibMRNWsadW3Ax8rtH0mNuTK9x07RBdPg3aagGJdNBjty Dt09KtA9KOPgwTHhKP438g7Me6ymoZ4e0/1vrvEh+Buejulsahmj -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 2097153 seed: BTBX/ampNfLU+ox7xipBGiaSbgC0kcB8Gy7BkJB4oKI= zfec: required: 71 segmentSize: 131072 total: 255 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:MDMF:xznz7n64qdxjwcqxpo3mufyxca:l2yaiaxgcyx5wch6k2f7x7dindcddjjnx5odka6rbng3gsdkzp5a format: kind: ssk params: format: mdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEpAIBAAKCAQEAkCrFEf2l8ZyPTx29aApnYaAK4d8BpZ7kA3dmGshLdNhl3e9V duskK4G9nuBSvk/SVq5v2cky6jx/LaAr2+3cXdpCvFJzqWpvsiGP5e8jkR03ED5A qH82iwztFpNu8Bb2DwhMr8LFXB18ijPYBB6GPFYsWFXjddbdkMDY4yG2OtofQsUP TwQiaRRZOkvdzSxLKZmsao9wrbE0oJanlWk4B09KL1htIRDv8SCuTGRHqFZ/ve4M 5h+Lp9ykUhxvS1IMymyExm8grH9jLz+kNO0Wgj5UTCa6l9iD9maNW4+2YKeYSako QcwEKEFln+7cBQQ1AQNt0+vPkLgXcoiWWGRIkwIDAQABAoIBAA1w52G82nazHYtc nvbvGAy6jU6Ev6m6LXut60eY9VawrEKrreCsnfkucJ2PlYynyC5hDLhAhsOJng6E w0IPJFSzD4tw9oFM6pqwKrM0f1uSs+UK9h/03a12KDlKx/TmIc0XUtWvh5NbLmwm LEAqEQ2EDUCzypqxzIN9RLCxyRnQbhY1B5Vsq/8anmZPcDIRuoAa8Ed8CcJlbLAQ qmpOjzvbDEplAs61HaIvVBM7H8tzZ3h7HY6zipPDhP3fJ+F8o5gGOZzLGYz39pHB lNrvrk4g5CFj6O9h2BBN+sW3hSE+KFl7jSt1p9GGewO2hnZsjU2NAbX5Rz98TNPb TJSkNIUCgYEAto9/b8ZzJ4OOHYzWoVgOMkhNuE0hNcvx9n5lFrxWNvx79Q4CgrQT Bk6e07o7udcapjj1eWPj+PfVFALSmrO1lbmAg3kYMzVdZrzOjgYKWCPFNjp/Np/L i5iJYJz6lY5rGrzcNSjDwRl4W/iVualEoYFfH/cYt16gUn5I7whELqcCgYEAyils uOIgEBzvkB/tq4GV0OhGcSIMBPES4ydhjZhWO9PvZANfklPM/0MfWWC7as1LSQGV jNVxmhrJeZzlqgaG1e+WDclo5rBn6UikjYxNKOwM1G3z3ekPhzdvbaf+nH3F6PR+ aBTwzT3hIIWFd9+CDeeMXqoNadJZLAk6GQn4YDUCgYA0Fo9qyfmTPaLv5X5bvK8Y Q68BNeiS2+Tmyrt3GDeVKscHbX7j4hNHimkgyhM+fBRbdwb7IrgqEjRWqFOE1l+q H6p+WK/B9Kj4pkhdF3YeHd6oEVq4sDE4XEZeLYwF3gPLNjWyaTYpQ2Ym/69gsN4n Iq2MhkkkELi3sNaIdRhXIwKBgQCnVeinlIzjqX/mdXc+WlIPDOSZ6ou2X3G50rQe BzWB6iiiSWSHc5QgyoedbMNVYT7q3EPUwix5WajhYCx+M07SsLEtEkUhhm1MnROQ Es0fjVwFTknoqmxvCUTTqJXJJRZ3gEFNl9/Gk2zQhZT3p2s4ZSw8g1f8+t9S4wRT C3yq7QKBgQCNp7VRwjdpSliGZzPCcKjPAy3fN8ap1XGRtw7I4AKCpXqS0Q5ee58B yKWnt+B8SZYzZUyDmOr2FwGC2xUG5gM9WLQ0S6bDJvrh1qHfPCtFZWH4zeUWmuPi B9vtQibh5X25jiluU+wr/pyFCoHb4ULo2ddBVB4JalLsrhjxYq0utQ== -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 2097153 seed: BTBX/ampNfLU+ox7xipBGiaSbgC0kcB8Gy7BkJB4oKI= zfec: required: 71 segmentSize: 131072 total: 255 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:CHK:dov2az3yktkdiinlcvvmhsjwfe:dddl24n4cj4ttibyu6j27ujcstssqkfzu6lp353iuezfvdu363ca:71:255:4194304 format: kind: chk params: null sample: length: 4194304 seed: 2NVWOXvtwcAFf0J/g0fDC+ypQbnCU7awZoCxcnhpkVM= zfec: required: 71 segmentSize: 131072 total: 255 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:SSK:67grzglua2vdwco27f6euk6ybm:cn6owpmbn2q43vfd2vdsq7ptpz73aibiboslvzbd4bdpl3frym5a format: kind: ssk params: format: sdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEowIBAAKCAQEAzetleq8DMBrg1fbfhl3aQZH6aWLrtcBK9/TrvK66KhxfXfdk FU1I35e6O92+oaOMNIkQzVrGNTcQ43qqDzuJEBLxLZLIH3DP/dN8c2mbKlqF903s 7vFLyTTCzB5UV5IL1OhoWLrrqkdqtvSRO/gnAD+IjBUuCvqqDW6vWlLQPTzIkgrm 4GobyH8MOHX304CX/zll0zUbJkcukeb/XogSOg86CtTXy/yCbfU7FUf9QdauGGmb ikUAID9L6sXeb7lqOlJPj16cDv3aGKNsQaN+0QZStXHElpcYSppPrqqOuH48cCH1 EB4oVJOplz+SIeOXRVtJScF8jAUHwYq8I/1OZQIDAQABAoIBAAoNr8MBSmw4fmOp OEPiJHwbuW3DGPOjCMX5sjdgZycT2DVz6kdT96EwUjUGtX5EyDKZwoqUH6XiI16d KDJdhhU/0itBxagrT4GlUqfRLx8uk5MoaHnzU++Qi4uAVj5+8I7w2ISvKOmuWkUb xFTONRvrqNSrWrfJ1zBCstL7xL5IjqpZRUF2xvtZu1QPAxKz6MxuGhXxUI6OVTTC F2/hZin7Qb+mRb6S9RJlfWhmMxmne1DNxKNh/miz8h69KDeMKh7+EhvIqOgtxJQp XjCI0rFVtx4ghmPKJdvbOizXnSBx+IbJWQ90IPkNsplIt45eaVrSb5Y9DCJxDWoY xdm6aEECgYEA7XsMax7QwWox1PDqu2s8YA9175s48hhc2alBS/yYsf8adCHvKXIL 5WvmZL9PApMFSCG6K4er9Z7nEdoF3kZBuYRm763k7qaO2Nj7JMDMX9TPZHTb5MYD TPiMvNCkqaV/S9ETPe1aeZU3k/aeX8RekYYtDgQ6DWVaZYg7IHZOvZECgYEA3fpG tZH2+vz7JzAZ07L+5CQEVX3hWCLi6PGOxqqz0cj2xKntz3XcKNWU65LWRNy/4P5c Ab8gfDHXX80e67QXadZcXWmCDLX2BmbJIneclxCp//JdqsVOdILZ6Zc7h5EATWWl 8YEh5fcgDX6nehKUwLv4aBNdzM06m1uem3Og6ZUCgYBgT94ad5XsSzhIhyh7uCL1 Rm/rLAWtUaoecGFWAuyei7pbzQNkyKcAdYEr7NaLUbr7pQoO62gXJknKWKS2n8G6 DnN80wacrxoR4fYA0txQJUuzDx27K39dMRRK40dUshTtV665F9DwrE6tCID0j/xW gpc1LwuoMSm3McfhA3otsQKBgQCOrE2EaJQRUEbxMiZ3fiX8ZvXuKSGMr6eex5vY L2GypfOOBhaW8I8YI+c63r8fta8SowpqCPmNOc/PgJyuLKub2C63z5fKKa4/AROo Nq8MHabWnmX73COIGY6MaCrYAKfsFzhomHI8R/FvGwf0GztHAcowwrnYZ9SShHnW OqSjXQKBgGdHwIpNiKsM52SevnkVcjN5Dz6rrHb3t9VNOby2LkdYnWdOG16NKPH4 IoQ62YPDmcJomTQ2AjEkM93ha1e1SZuQnnMv0IMbE80LF4qHuWXJ/gc24Ruy0xOi mQRQ1tKPZ02L1BTeEfbwSR//OA7trVsBmrck0a7Qjqb/ogpSnORH -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 4194304 seed: 2NVWOXvtwcAFf0J/g0fDC+ypQbnCU7awZoCxcnhpkVM= zfec: required: 71 segmentSize: 131072 total: 255 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:MDMF:pe3cd5dbpwdrm7lmeepipeh26y:3mstlvoiza65p7zol4eenbiz6k4uvdgmmpnfxklaqdvwdz52oq6q format: kind: ssk params: format: mdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEowIBAAKCAQEArSoxW/rzJSR0lE73K7ZigVVYf+cxvOJfNpmSGPZs13+5Vm6p krmH0fUIzmEfCE05m2dN+6y0C9JzfsdWQyqsuSeZxCD70ND/UUNzGMrF4EVGoEH1 vxkyK95Rjbg4q0oeiN9uVua07ry6C9paxjTa/27sFn8DybxJTdofCHy3qaijfHiK t1814hXv1rU1hzpKScDqvU4Y9jG402I5bCdccofKIrs2YX+SSW7CpqQTCoGWPOIs bNJK6hBGXmmmPc4aleACYOzOUsdWZUi/80OOAsMMtYMkQPVbwazHHupKpabIsJ+r J9brBqe6QGiZnd7QdlnW9SmKB/PWhrHrXd4gaQIDAQABAoIBABR97zxmzJpDH6aj V52tJigO/PuZ1Ol40nKoJsFcfBHedATV8KxD115RxHqDxMPbO6t3xKM5U08o1vEU TtGA/dKlbI1op9QUv3oS5M50xIjfOdXiKF42cZj+ZKFEQTSH/2gMJMcU4ylzXQLl EqPtAlODAV5CJqUbaoNTgiOjeqqRb+yfbTZZAN2sEL45XSexlSM76Piv5hJt34Hk 4w/eXRHnr+L9WGvSMnxAHfLr5Yr1PDLyFqRyE+rHQ5vIoD3t7oWySO0+OgRTUdE8 w1gnM2fYHLpDZu0PohpHo/51e3N427IWUFQU41m1Py7EDb6vyNLb6s3PnFSp82RB oaRiMJkCgYEA3fAXRXqiNCX/pX7iq/vH7vaLP6skvUKgjRe4s47E46xnkiwqaWBM FRjTQ169Ojs/L00afUKkuox9UG1Cp/cra311KHiChNqBqba5Eos+sk3wEBpql0FK gwdeKBtt25mJqMjvKGMaM2MYpP9gKn85JFTo9KGVAFeyPqhLCoMg86UCgYEAx73Q y9aV2euWy+0Z8HsZeDKDSB+u1l4xCOIna4gkTTn2PP28xYGVx3pcvaZYWVWUZA8U U+1eUo+s7Qctj6lXuPX2aJI1IlpFxNNyKi620srfnDlirq94b8upZySUBYUD0VzB SDbbqlVKsTyijUxhQuCSFelo/pjC2p9OAnDAznUCgYEAszRINivdiWodUM5xzRkS yVt9+L0Cf2erKAI9e48OYCA3yQmsfUXqaSaQf9ehx8FLNbB2cSo8xPznuudeaS3l e3fj//e+u/OLuzP1oIma6HKSIw6RfuyTc9WhK5VqUWVaiFUm919+KnwbzC8AwY/U 3gdJyy4lmA83t+xAG47iLpkCgYA7ITUTctXvqi989RbNuxNiIsn8auyuJzoq4BA9 ZBMjDXqYuaDNczwszktwFTNoVs5UBKbG5akblc7iaFKTidUfOykT8dxq7ABlcRcF 58hVhJtHuzE8d1OW/NqMXya2r5bevq+1OhAzT4aKC2IvpCHS03pLpEphvEVKxQgp 7skVFQKBgCSmyb/MDbRe1X24pJ9yc6EHNH2XifO8B5/9vI5lTMnHzaeva9YM0MGH OrsTFL5TrTTMpb4nQiQafyOT75HJCCoeA8E6tnNyhcwMuBNOCIoD0CpA30HzKVi4 eB+5h1t3mPf0kWVhvNcdidGMcgyr8ekP8GYtE9GnNT16U3PX86Xu -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 4194304 seed: 2NVWOXvtwcAFf0J/g0fDC+ypQbnCU7awZoCxcnhpkVM= zfec: required: 71 segmentSize: 131072 total: 255 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:CHK:znxyqo2qglpkhjafnuesu4foge:trau477bj5u5wjfpy4oq5e3aziajgbpyfjdv4svyf27m7atpa34a:71:255:8388607 format: kind: chk params: null sample: length: 8388607 seed: w6uP8Tcg6K2QR905Rms8iXTlksL6OD1KOWBxTK7wxPI= zfec: required: 71 segmentSize: 131072 total: 255 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:SSK:5viwo3rrpmhruutemkyjhkna7y:c3pe6gxnospoucmcbujn3mgnlky3vs5dx2bbvqm4uwjreu4vd63q format: kind: ssk params: format: sdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEogIBAAKCAQEAwRdN0XCfwI8rQ+l/QsT3MEwTmvLlKgs0mYInN/KH9uMT2KpI t1fjzryeOi6pPRgnrAFMRXGaUo5i+LkKVS3ULBH6UTlUvJmUVpQjToZFvbLAwg+b GJjM5lDYXeT4LimqlXHxOLCTwjoQljsM2whYgI7Yjhm5LesBgiCf55GU/lw2gup3 zfnpeqOtwmQfwzZoKIpEQ4ZnCzSLldlSUmR62haEBGiEfnZDVCIvdf+oC0Qqgua6 L9AofDFZYRemNZUGIRumAPoJMk/hPQZo5/g8USz7JiJKA0DI39nILF6RLpgAyPxF xX+X7bwcxn+ycSIeEi1lx2LxaJttyUXTg82nOQIDAQABAoIBAASx5dz9UY0TjhhB IayEcIQ2nVVrqYHLsvQ2k3CLT54DqHRgs5LtqqbYtDoy7z+Cilhm0a1wlTGDr8lf am5mxl1p9H2sGLDbRR2TzYX3wtNZeNFfIsTG1liVR6WEzzoEHlcy5Ywc0wLqeYPF nMromYpKrt5JptSEfc1lsK2nPwmuNm4YycfqYa+pXKaO2QKvmYjGVZ3Di3ZBKl2f pf2MRrf87aJj7admBk6PKG2dhs5FI0HLPjNmvk39EKpLFyJiKFWoDFwizIY200xB jFy86Ic0PGVz+jSsTWYH1+f2w1PcZ2Jjss2EF4iZrOXdGCm2xgXuf1R0FxFMs40M 714HU0ECgYEA+H8lLs106NG8OePE+1uxtPKGz8OxGWYZoxT223JMJzq7pxfrr3TK qRswldZ/LlzuZ9tcEyVATZqUUFcRD3kTn4/gQb8S7spEHTTdZm+fhAS+EyKBhVOw DInkl6HBAO7O2wvp/Wy1yz9kcrerQpJLWxxKMVyOLO5nX3Ae7gj1PUkCgYEAxuvg 7+MsoJUi2AymHp2Pbasmfq9cS6tFD2pPh3isFZFvU5hLqlT83BOZAI64jkb2z2/0 ryGLUtoA2zIRSKnSp+cKEZJxzQzRxKlRy+HupXvxqVRLwe439q304OOCclkNJPfv 888gOQKmyTfguP+KvBmMkiqAqZXz+snaHh2iynECgYB1/2oYn1c0duN6Wb3f3dq0 obWCUtp1xRXHat0Nt2iR+EHDRoiT+FGDm3WmsQQTb+2FQ5SlQrsWHqDuxWlEf6nh yuAiWCkVWtadR80aJ0cH2XiofWojdWnTimcR2a1cVAnF2hJyVHy+1otMLgsUwYMm 8HgKmHiqvUo493S4c2iAgQKBgEIhCm9VS3G7ApFmaxdEc/kWa76z13AEaPn98qBr unGVHrhgqc7fYAxdq4Cm8a3C46wEYQiTkzig5qX4GAza///3a755u8FaIKZLT7kC zA5RjP4o2uKGqi4kmILmv2f6OMdwcWHRGro5Km88V0XJFjsAF15EKO+3vRtDXXKj kYnRAoGANztb3ObR6BF8O5x4SWcwwQL2BGQ62zWv4EWESwjlKnRzxcbuuuo4zLjP ++3ZcCv6IXd73pFgq9GNziTJs6xS1Nfa1O1BcJmrnHVuLjfmrTP7UV8xciDFvN+y z876LSrjUehLzbPQbkLbhlX81cIPK0NjXLe0mea3+b1xvtTZlKc= -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 8388607 seed: w6uP8Tcg6K2QR905Rms8iXTlksL6OD1KOWBxTK7wxPI= zfec: required: 71 segmentSize: 131072 total: 255 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:MDMF:prou2lgkp3l47jduvnely4pbtu:styymkrx5ja65ke2bv3w3oywyreq5mmdr4ujovm6p3pskoubodtq format: kind: ssk params: format: mdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEogIBAAKCAQEA3jkvQeQAoJhf+j5aF4Oszl1IwFfW3SKPNisjSo+8Yfixq79K 6mPF7c5zZBioF2fcTpNmEX1hjpkc4BZ2NwFaxvstAtjahLiHDQkGh2tGhuRi/jo+ x8lBS0Jtxh1FfdxvnfrJIk5fhY1/+3bFZcOQILIfROwHGpYM2K5MHOV9a87LBiTM 3Q71jEbs4HGgHAcShOVcy+sCUMpnSv46vvuHKRMEl//JsydjK0rZQo34NX7GwBs5 4wnpCzC3CZ5mH66CE2aYy1owI8LEQKdI+eiU/ka1yb54yS7hjJ/0I3wX0X2mJ+Qc YMIG9YbIfQd0UKZeT0kNmWtN+3JJdkni3G7nUQIDAQABAoIBAGukJXj0OT2RMYRk qX0UYiM/2lqY8dIByH8DnD+kqiqGrYE00tQAakKLqydELj/QJk3FZj54jkXlcrA1 ESQJuvABgMcNRaPeQkSVJ5124B29CRp+GiTqHn+W+NdrHFsf6M0MSlscvXZSmTi0 Sl/Fv82mmjDnZ3WAU06t9t5UZ1FNXaTex8v0+AY5p3m7dvM1DWfJyTC3F9T6a3XK xkupwTTUnZh/y5kbedaXS6Dc6+C3/BG+7MBR3Hix3DcbQFYU/S03tP8DzaQ2aWtb iHbxI1Z8q2QpPd0rOMOuquW0PxlJSQFe9+ZGKThhVfBoUhi4Wm5qjVibnhd5+pnh VzkNozUCgYEA9r9eXVvEgOu4NhKw+k3L2Jdj4ibdfVyMUg60i4NXozEDx9FW8C4r jxgdVYkdnMFKoToPn9dFBxX8CmSf0n0+C2Vj9c1Bjfm0COEFFHq5y1cpLjg7ULZF +F3utDLtnpgRhDOlVXl6E2Y7LD6L98nwOd2apyKgekYIwJh1RwZ5pKMCgYEA5o5m A40Gt0bYW/0rvl86DVFKmhlxKC6mYfDoxosQkMgJzHS/aacef0M1G/wDVJw6TSMv ggVDq7Bd1cnkiqwlZCurJ2LDlywtkpguO/4kQvdtYgO2mzhcC30QwhoYKhG8LRf2 SP6BchNZ6evOMo97VidvCH96UNlexrqGj91/z3sCgYAAgSeqPTPLp6+6vJMMD/io uraDkdzGEthempUX6+7T8Je3YuAwoYeJRV1Z/WvIFEUYy0uY4hHMD+lyA/6nqYXk 9BIeQIsvxSDvG7as8gtLNSRqaccFRTojZd3FFI2T02/Fu21NHXB4da8NShtzKECL fb6BNPrrBRWjfyxONt8szQKBgBIXxKusqoVBewMlCATFhlG7OmaDbpzfpFD1Td8e 1Kr398TiuI66/aqxBH7wtPYz2GNrSnQio5/alFKNqHC1d623u5O4rW60mdLyPFaa 6A+VSTEy52ag8qA4LVN+Jr1ObP0A72PlDRV9rUWtKp5PIjetmooJLvkfRc/EnYC2 uiv3AoGAU5U7ZTY/JnisBTemkVKlw8Vrxx9weT8VGO5VDW2HbGbOZkxORT+njc7S YmsnNnr69IbUVMCGERomyhicNe8c+ePulCkKoKtOqni5eCqmUA7tMLUl4rrvKT4Z DCZns5bcsoRDJXKyz36qFgSTQ2p04ycrm8HumkzH/x/cboQqFD8= -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 8388607 seed: w6uP8Tcg6K2QR905Rms8iXTlksL6OD1KOWBxTK7wxPI= zfec: required: 71 segmentSize: 131072 total: 255 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:CHK:mlzs2hbztak5fxjkrkuuvnpdpe:3myvisewm5uklimp2xucrwep75sm2rizfi2sq5drhlqjszkpdyeq:71:255:8388609 format: kind: chk params: null sample: length: 8388609 seed: yPi3JHKKbWaEEG5eZOlM6BHJll0Z3UTdBzz4bPQ7wjg= zfec: required: 71 segmentSize: 131072 total: 255 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:SSK:keqm4ealssgmiq3acynkwtilna:ro4e4c7ueczcitmppcr6cnlj6lk5cv6tqeb55e3yztg64qwy3wba format: kind: ssk params: format: sdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEowIBAAKCAQEAqXifYzGbGnv/uvDk3WVV3V5T/jlDSMj7hYdrpZkRZNIadtl2 jdsE/pO+l1E5UE4Ov6VlLD7Xqg1BCnisJ+oInJI1iDuZwjvSRXAFl6wfc/F0pVXz kU51t1iyBXviZcx/Cc6P7JxEH2LOiRXhTUdOcTUx2lJKGf0QNVSUgTzfofmrOHiN ZMOU4sZRTYa54LXhv7PSltyJQlxUvuLnFCXzYY/9PZTxzpGzP9JtpFbccX2rYyG8 +mcT8FMQLk2IiHTK2AgfPDzJEQH4riMzxBCEjI19nT3c/spP1Hqy9UvZRanNsMKg SDTC0WQy7hIrmY6I5iraa2xuR/iGyjkYYejeIwIDAQABAoIBAATLnYr5J7DXe7CQ DZHSylRaZBkqNe+h0Agv+bLHjgBqTiSif2nMPAZTwaBtYnOGpxfF8F1Fhz9fF6PL ICu4nHNb2rVH4rIxlH/9B3VdVK8ZdnvVMZX7gFgbTSFULt9hUEvDmOCu5rTO4tni tpUWt2tOWpc3eB26M4dqjmvnw/gXlXE/kknYLbtks0cgLuFeXIax4TasHXItI7KO L6/NNtp5QARbC35v4c5MfLUsAKBptrx0SjIw+9B66r1Jsg2JXw5+rsTNHGKK8I0f CZJ0XX7TG8gy6RTXQxEi5FTE2XXJl2TRWRS7l9pbyM4vJ63Fn52Hdquh+sbUuyxc jMxlf3kCgYEAvdyUEqOf8MoLtXPyShODkVUwwQzMxWxzYRbXL2ZFBysvPgKoekeP 6Nq/a79aRb5mgKxMeh3RgDd+JQD+X+mIBS7dxWlcoheKam5pvw8uqF8sCzop62YY 0qtvp4CkYBNdUWczsT3nMhjS9QJUQCyUIx1tTLpi+DAKJqHinUl62YsCgYEA5IGr uZXnD3BAf7RIu72UgSF5SKx7ucqATRmneFd4ho4zvaw2C+GzX/OuVW56EIzFojL2 TXxaWJHe2y0FlNFNXRvTOBHEv6Mlm5UVAt0ULliJ2O4DCO6rYxAtDrnu0wOAectz lw5/XCoKDAFGSMenTxIRrHZ1wFkqVxcsROg+MMkCgYBJzG15+UP4EnEOrOzmwkMH wLdcsp79tjP67yfhcr0uFikcz2excBOODUkOlqh+J44sQczQQrrmPau4snQtz9Zh PWBSlau+DaxtxlEwRLR8GdJC4u7cYykO6jhSQXyjI6PIOncrU8aEAIYvWiJpd2p1 Y8DSbDiABBxN++rb/G3WFQKBgQDQaay0whJStHE/iLFl+o1+EYfLTvYyCI5ow+NJ EY6uOvjaID3TLHIsK9dvuCnA+oQvYgffuHG2oqT+htu2VggXyg8l7p7iouzkMF9P k1CazMo9fyhpdzX+TnyqF8/JykHd1ECDIAftibJMLMVsEB17MuHHyOuxGiJR+KK5 3pEKcQKBgBYbOkjHTSnijJf9xkqvnk6M9bJu6LXvc2JjnixB2J2YxqlblJWw4L7n 4Om6dx0vvhEx5E7XvNn/3ZyuouzGk5z6iKxCNFq3YzIOhe8VS6nozs9l5X385qUn oV6h6h8pvwzuimK0pm4i1V0eJSagW5kwNVKiYekaplU8We0O4yPU -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 8388609 seed: yPi3JHKKbWaEEG5eZOlM6BHJll0Z3UTdBzz4bPQ7wjg= zfec: required: 71 segmentSize: 131072 total: 255 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:MDMF:fojgru3tcvv25rlf54gmpurhqe:czcchpdw3g5tablqxfl7vezf6de33un56mrhpkswdaclo7ekmeea format: kind: ssk params: format: mdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEpAIBAAKCAQEA1YgD17SLc99hZfEdDFeozhbsXLe2bbF2oCY4mx5SzH75Dn36 Pa5XwUYhgq7Ejv+nHhIn9w1byoY/pbaqs0WUxV9481OCX1BjCVQwS18ZfKvuZx2N SQs0I/lEZVoLgdH2bnabZG6qxPGXrzYgCYKL2D0rKadCCRbDfIqs8EgosJ7wE8pi 57CkHQZePKyECdpBivXzOhAnBVOIIBh98LbEENZfzfuG3e3Q+LvsWQQ5PUOVobE1 cxvzWbHN6QLBmNqu3suf23gu13ZfwtXKZMxfsKVVOoF9qYDJ2SGrNfQdoMIU8MuK kFl/ur79Ld9Bu9NBxA0TwjGAUtMniQcz7FkwOwIDAQABAoIBABirV6JOmgPfjV2d LxlzcS2qKVGG6f0fURCsicKmDLPSgYyikkwY/ct3Aj0aWtwYfiKzv0lEElRCEU1g XrVKdyccYhlejwPbAi4cO14h1Qx5wpfIKsADGtmDHVtGPWkYrEtTyZ0fSfxp2vfj nWzr16M6Yee0iqUJK7mSPeuespDA/e0/zrJjMSE4hjblphHJsPS71Mx89zEJywJb dZBvMr07LL/YT/I+B58nEPu0VdS3ekJXGWW2wnBRLG6n7pX/X1cTZGO9DRw/mXIc MDtpE8O8Wn7Fpzt1wN5DfSs+SX54YQmVHMpnS/jM5DXWQP8pgVUH8nHlqVDlZQjH OWL/+x0CgYEA9cwUxl6MI/PFq6TtWnHl92/dIWipAoW17z9iIlXVS2RGDgOGXLan xpf/R+877+BxEcfmE11IGpu3BpBSD7gX8glqH/VfhXQjzrVYj/33s08KSPrf9J6a /JJvgwLjrIO4L1X4geYTjDNHHpNLiX4Gg+OkXrtcQHN+CIXOM0n22y0CgYEA3mUQ /M5S2tyzDHK6cmyzf9WFZwyHhwYMdju7jOwErf8VGJCn2rrCDAdIlNWG+ru8fOz4 F/++Xxq8fYr0caOzDghK2O+IF0fPaDxYwFuHBT4vRHjVjau8sYidhIeK4V/JLrBF uZ1NxA4kX3d7f+6Y8YWMpie16ple08b6yN9VOgcCgYEA5HoyqX5DeDvl97pUI2mS YWHrRF3cFIsj5eOeHdp5bR4lfGtMXywuYozxb/VyWnTfxa5yMHfaSVmLVR+cGB6A q6ySqGhWxV+C1Wd+jkJ+GIAVSGdi/CjeWn7oBvkNl1PNRrr8SAsNCpqztjkm0wSB m+Fj7ebtRr/UXKm8VbKgM3UCgYEAgwUj1uxu38X6LjFBKrxjm8Jdj3JQPfoQSW+z dLhvoVqQQSKn4TL5s0BvQE/z76++whKRrwHaVAlaVtQQYwrAKFo2Tkv/70c9J/m1 h83kY/BYxIwzs/0jc6w6sKNx7IkT60+qJEpKUGDMiPnJZntY26GEVTc783Rb64Rk pwb8HO0CgYAFAmDbWNKQynEFHE0brggHalBZiAoBa1hmOqwYto4/phUIFdqjt9Pm dmiA72AuK0YnKfMP481tM1IcCwzUIYaxbFMDUN2yOuzGVvQppa5TrePm2KM52fp6 6vZZcaQ0TXRp9Mo9BroG0CkaTQ/XTm+zfPpoDWPE9cq7L39g8meABA== -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 8388609 seed: yPi3JHKKbWaEEG5eZOlM6BHJll0Z3UTdBzz4bPQ7wjg= zfec: required: 71 segmentSize: 131072 total: 255 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:CHK:ljiodj4tijkfzej5nqdk5h7cnu:3fb7y2osytliwli3oez3y7ece6rjwfrpa2zn7uwrfmn22cisk4aa:101:256:56 format: kind: chk params: null sample: length: 56 seed: YQ== zfec: required: 101 segmentSize: 131072 total: 256 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:SSK:pijpudpxiovhh4mwyatsoo6rhe:j4zu6olizd7znzfvxqxtjkdr4vwvmvjncqafvjqkfi6ppk47gopq format: kind: ssk params: format: sdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEpAIBAAKCAQEAn64dg0fS5XbYmqhhNZVejtWlUnL5oqHiVo7tMYh17LRrq+lL rJ9ra9j+FATvN6rYY4FdkN614veLud1eys/hvt4bgJ0gWxOFCkCfvlak7U8CPw3T Jmg0xA6bwJPI6QUBysodMZQTNn7ZkFOglSAxQKsssPw9FrOiH7LwYDG+KsbrFdcO DN5q3zKELzVts4tlZSwuzKZe/DNiwgaLYousjbqInS6vxYJWfuXL4wP0igjWt9ON 17pfF2+/K/ys4VxjUoopbxRyp27R9HZq2dbmpv3DolTXDH8sQtUK0u3EvD3qemBt BkYDyGBQ6uIU/Rc6RNXUwT8r64amQc1+NpG4mwIDAQABAoIBAEkpsgQQyKSyy5Qx SjkO84Bmi5U3cQH/QoF+g1eKut113U+rWS0C7mk/x0rM5/6NnRAamhBiutv/qFnF AEXU8g5OHjPTyptwWijUa1z+vhqtdM4HO5QBcwvR1bNrA0chMC0GZlHtEtCJVo42 gwFQ+sAyrgt5x1O7grEbf9/TatqWCF7pOF6fJMkYfODAWbvdoOoG3icQxDj82Ya2 UakJFH5ZDKKc4B0fV9CSiT7HCFI6ZGBjJfFkRphU+h0+qdgQB/qWSwBZ2VhK1rKQ zMKjluwn0I+nrQUzmshKe5StHLwTezkWkaZ1iMvQaHl4fnw32d5c7SIh/hmy0s7j cqHoukECgYEA1TVlxFZKCPTJxJmHACU/tMq7HZqESDojplOlSv4apkWC6qJkJYUn 5/VwYmiVbw2Owk4borWN1qaCI0EqbVCkVi1g/pk1jAkRdG0yz7IBVGlronAvttWk n8aw7sBXkQcaNybz1k2KVfREFHb9kQfZ3e6RpiKQ5YiMnrFVUku32ZMCgYEAv7pw nK76qG9/GiXRZRjJjUbtzxczwnG1jN0BqW40jK5Wjku8Om/TwceJlamgNwrCYwb/ pe/a9lhYOT4f57sOmcHn86L76CKKY1BL93khF8AFFGmO1bHrs7PaJQOtr5aVKYRP ehFNOwgJYCQWPF9yKoen1Lx+MJkwsKAAHwyvadkCgYBDDYcS51xjUrD9/pbBifVu I3ATkFvX50j870OFwUKaVjQlHKtITYdOYRdWK7QLeAUUwMHaOyT/g+BbvAve00TL wXvGtmJrxxJRPmKDhWT7qifqr0OiSbB7e157x8wCVWx+Oebn1/0QqUCb+wwmB4US UgxGZoqRVY97/SNrPVr1twKBgQCDjEJT3uLwyn9ky2neeaFgo7frDTpgQXCVk9Xe EFVR6RROUbx2Q+AA5w2JeHcLDQDOvTCPBAEyYO83Z16wunGMIbUqPzujzH8zIRbe V2fTSdayaLKuAIN+KvqTxvBWt3TkpXl6gYCB7kOwiVIQXlSQxb7rgeD7K0BzD3TF 2QhKEQKBgQCNQVMGBWjXXo+pvyumYHBg8GxO9ZsfI5bzDyNgJqG65MCQDB47hrZg Ng0UwIF99v0tjfDWmzx/ZPIOJFhHYv8rSKyh8j/u9U1GbquH7bpsq7ICNl4DZ54u AoI31FyAYBP3wOIBPLe/SxA9MZhoIQVBh7d//CKn5+rnCZyRDqMCBw== -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 56 seed: YQ== zfec: required: 101 segmentSize: 131072 total: 255 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:MDMF:uqzfer3za2hbweacbjh5w4hvjm:ujlv7l7tyq25e5l2ctgjunsutsjx7nxv563675sshijvc7rpd5la format: kind: ssk params: format: mdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEowIBAAKCAQEAyW1VZvVCO0ryuRbM14typdFJ5+iZtIiILWrHcR8W+AKXxOdk vP1XFbyTzJiigYY2V4z6yIYqq27XNcLpPZq2RuxJ3bl0VSaeRC5DzTkaKtfwlL+W R3qE4Et2YpgGusuG8w9MNOsBZQ+BbhvrOzU3aro5aCBt74C7cdok1GXut/+JPNMi AkAP6ZWfVaUYwFzTEwmhwLFotyvIXrx8p9Y62XrtWNTKIvr2WUIbW67lyPkPcvU3 OMs0iPRP8TnJpo5GU5pg75uuMpsugl83w9ixt1Kwziulo6uflBkXbeB40nbrudmH ZBExZmvcyHs+MM0hPI1s5NHwG6/1qb6fg72oXwIDAQABAoIBABjLnDU54MbSwZFW RK4N7PWLj2j8YZtvKTBKEjYTKS0riIpFH8oB96vl1F3dtjdykZLyeFah6XPEB7sG /NZICsMtSCSCtVbcE6R5+3+yVU3L2kI9WV8ALoY7091sMHvjHQAjtHJZMYlCwOCQ kELwGJvLQ9DVGSNf+fMYcuswhxPgKVv09VCjOw5T0Bmi/acM1diimO+YFx1HA8uQ gm1Ent/v2z11zDPZiAA+Z9tCxALdLOmGqyz/GWvfO4p4S26YD7JbF1bfpBk+2EXZ tHrVM/iK8BJdp6fOgjgOGCxungdU+YUfv5c65oZwQCM+qzyFzaIsICyXWptvX6HR reF25nUCgYEAzw+xZ0D3AjFSAC/WqkpwaWwpZRGgAcGRQpIp3VYNooNZhVJ+jWhq UEIEKuF7DEQq/u/y48QVQz07mOuWpFNa8/t2D3kt2RHWU80kLVxvBnkbsvnInjcB cNQSVGT7zFGqdQJI/GxYpQN4m7PYcgZSkhQo0CuwP37TEX7Nf/DQYIsCgYEA+Qi9 mwkcIWRWI8Y1xjcDwUwbitmm3tD7AJWmgLyTtW7f03XLk/TBLBJPf/9AhKGAN2zL P431+xoab+tiUeWAlJqYkCqrPD7eagEut9HOI7Ttl1c2NN3E6o4yYu7IxD2O1BoZ wx5a7hxywIR7f2pWsmGhOcZtMMUp1wI0IrHrnf0CgYAf2oguHD5jpfa5dKKPe/gj H7KWi8mTu5V/KkEqfayHTbGd4vz5ABEq250Mg7eMQYhjw8IX6/hhabAbbFK2YORj GFInOzskY6wXJD3mhIvH8SWjuO11+XxNQTK4rPhXjFCuw3U67+gLKqeJPHeVwwc8 1cEZlT795aLO1DUE86T61QKBgH+jgTrTIn3i5VuUnb8oN159WaiDAco2JlAYY6yb +sEFQOcq+tqsmc2y3NhnxXO1Kvg9ZLcAVdELgf1XEZ+UF6ES05sgo39PYcPHM2C3 wgX/F793zaqu99yYYS7f4Drkqi3/6rBdAJIGNrKBtKKLqD/pVi88in5yr40p7frS YkcpAoGBAJ/hMr8iXNI7cOPDx38heBeB4AKSPFBDJhmxNfhk6NbZvnZgyUmbZqUJ T3ewhYYrOhAJpGd0sn6O0ReL/R3atMIPKxdMp9lbxItASmdxLtJSFUx2Q285ml/T sgmrKsiGM+UAVULBB5kVr57LSmCDzGkNqup+k6RDt2u7zWPbz0K5 -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 56 seed: YQ== zfec: required: 101 segmentSize: 131072 total: 255 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:CHK:mftnvh5l3brshwoupvu6kz35cy:sncfokby2tjykbr3zgi3i6lhgerg4kz7fsrslrlxdy7upwkzq6lq:101:256:1024 format: kind: chk params: null sample: length: 1024 seed: YQ== zfec: required: 101 segmentSize: 131072 total: 256 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:SSK:24fiitrvlsely7grnfn3ghrsgq:2kgylma3t7u24nj5tn3p4p7vfyokvyryp7mrymngl64yib5zmfxq format: kind: ssk params: format: sdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEpQIBAAKCAQEA4ofaPstvgkxaUAL20n5dlDVSnjxTiNPDctdvy7WoZ6fGx4wF m9F48+0Pa6bnelrJgQg977Yv+Ne65eiryX1QS8IomGJq+8xSWYT2wfiOKd6RthPU dp+XhuVOdJNGjfoYrcKDRiCDhAfnSF2rT0ECVDDbW6B1rMoQihbC6nxPMKAgroMn 0cJfq+Z3/9txo88BCGXYqNTPJ0tB4U5kQikoylT3KehNcqcu3wBHf3i1JwAAQugi JmZPYb6KWV8w22MBuvijSW1l3GZeHrwbhBo2QfNMeEfHHuzuy7jmfXHvsMM3vqUV kB5o4Cvxs/zVAX5MkteyRZITudQ3UTV0D0SWkQIDAQABAoIBAAm6xFzFF8LUwl5E ZP3bz+t2h4K6BuvODfYn8FUjYMUkTM0zo5FdOQuKXOAGo6jlhFkb6wPEOKGWASDb J6G4Vai4q/iw5XUaLKv2ojM+QzuOI9uvqgT29tGkQK87ciDVTeu3T98aX2ZeK94M /ozMDIG5is+6f8NjobvknWDn5O4q0HsT2eeDbDf/FIp/dIRDjtp6BJaDy3Opql7L 56RWuBj9b3on5dC4zuzKJSM6p0wVsXyiQxI2U6qxDdH6fdzolpeyrpzjpM2lK9I7 rhSTgm9E1D3nqgC1mCC+5MVuZNXuTc19Ma5pEP+Ntc7LonOKbFfe8ysC48G8Gn14 oJS5Y1cCgYEA9wmAZuzANhyraO2rltuBvWSVny8YiYrVNAatPzNiGTtQEUCdTN3m MJ4yprzslUtofLqADBmCTN7eUpYBVuTFSSKK7DJAaDHzIy4h7o12rS7ZD6tkj6D9 H3HbmUJLylU/qzx7eHX25WJwEkU9cK/ufoTc799noJYlBhSltNLGIO8CgYEA6r/i ugpXuN5fZ2Nu3T+WPQNGEgwQdhrWD4J6HnjlEFfPuj+gxIe42vEU/1slDI4ujgHn bFx2lN+LsmbkDsweOpAJV6KqLUALcqdIrInX2BEfhs9KgqfifGROZHf+FwTb9UxG 4g+FeE8W7FDz4ENa6i0GvuLRery+zLAoymApwH8CgYEAgf4X+REf29meQSq/njSH wteI/CjWKppJsoTI6XbqagiSC2IK5AXoOTElyiOkArOZmfixpKxPqo+kQaT5s3XS cregjsWqqqmOHbcK1/LMvjjms54m3oWCbOeG/NCr/R560GqVNkAs2WvBOXwB5qhN QXo8oGTYrOIVPWvj/pDi/TUCgYEAjO9a/XqMI+9Ns9KckrRETKkUfm1DzMRb07/v 9S97xo4Rpq3gpV0efEPU6WIdIiaSiKtX91Sj1MlJI3hmXwPo+hvToAuGw9f5h4Ir PXscXRoapWL6RuroLOpDrknkAInoTKLYw4uyBALnrkUDxZZqlMEnlZ6zSU7b9iOk uat2JZ0CgYEA8sEwTSjLOBpLUxnFSm2oVA2lsn3cNr40aHFfqA7Uc4C/Q/znSfV7 MfmYuykvFAemOzdHqsveZakeOwJxe9gQQ2Ar73kiHtUt/vVQxCoAFnumW9SBvjIs 3/8J8VeUUgbLada2ki0hVIWWYmIG5NmF3+QB1c+/Jaerh1qeM0wUnl0= -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 1024 seed: YQ== zfec: required: 101 segmentSize: 131072 total: 255 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:MDMF:gdvwak2g3obbqzs25y62axubiq:c4j6xu4nazb4axrd3xpouuhhjso4oedqyulo7w643rimve53rkba format: kind: ssk params: format: mdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEowIBAAKCAQEAsc4qFPbdyhiuLD7dSe70KFACIY7EvzKOzcWH3X4Dx0LZ45TH 92euaqW27FGBM2T+z2SiYaTAyPEDeqoOiulRZxZTk2HFv+VVUmjNROlbh4gQ8GwG /WwCq7eYTWW/N59N/5cSlyO81KqF2BTtVEjdt4Hqx+5D8nkwLoDDd4hHy2EJAL64 +TqdS9030116IVSHYuMdW5bn83UQak3+ihLX3AB1xASfv5h1Nwqk9xiJ8PevugC4 /gbcwJEqAzxMEXE2jBXrzLaHs4qeCl8wgzg7R/pHlQMOYBzUCDna2ovT7gHHC2Bs zmBj7uhHm02uu7dYK0S3WtzYDtbbA7qZrGMLvwIDAQABAoIBACTdxtru7sCrCl4R MMfWHFjJcg+sLv4nyPVAajHSIY1svonR/P4+yKrDLmDka2IRJEYzKvoM844Wbu69 mONTijXSKsUJxjtKHT1HjpwluH0rCLwY4gAkp48cM5+Eo7ewN7dxhwDAf7QmoTbS 6/yIWTRl4xzOOddqKkPSHfVIU/6GiR0taBb44yz7VCp7iMSqa1VHSHcXBxU+lspp f53a8mZ8+nupsLGTsne70JGe0ipq9YqE370Rz4J0Onj1EtzOgQJN6fQdZoD1M/Jq ldCIs7D94XZ2J8Xp1WXkiSE4dRwNyfJiANFvYbXt9UOCx1XXGm2tVYTUhFbLDzlW R9YFYNECgYEA0L0QDQHCJexuuH4AS4BmGcLj1tSrHA7BViSrnhPrQTjaWeqWdiZh Kv01tWEeIdODCmSIJPVMS6aywoII2bgQAFvzOX3tAl//Kc8wdFSiz7WASyatVW3r uv7dtTGSsBWN7fXYqvjr0eB/mvyqVBlYzk8qgSpaWEhkkudn/IG18jECgYEA2hAl hl2LDt0D8En7LxiKIIB+0s7cFMJfkjNRHyXcEzbmTNHhwTQ6puYE5TwbSRw4JMiB oDIvIAMgA51Hwk5aC/aGoo6z60wvHTzb0XpMlSWvytuhN5fRH6xXY8TYM9I39ROI xU0qvGbKgQQulwgEzRVGc9RAk6BJ+9dN5DH48O8CgYEAvqTuk/KXL6vRNA9glZSf q8ej8AIshWO0kMjNNYNbyiXyx0zKPv6uoGTDOPWKX7qeZE+NSLQBCtclTSEWlELX 2nwgmNG6NgEXO0hQKO9kA/DxS7H3fZ73PcKpG2Q7ZTdKeZugWAcg2n8ADL3XkxfT VBpZ576W5Sq5MLLI9oZBdwECgYBMQk9NMRN7bDF/a+/q5XMQsL8pa+wtWlhf4ZBi CzRuh8l8Xf3MOj60tUZLAH0uUS8VNgWXB1XRpSYh/XPl8M6u1lT5LlyfUfI8EFdz Z4i2tApJMAuuTGp24CdjnahaXw7wpxcyoKzsXCo/ej3s1YIQUntj8Htw5SJab04v GobLgwKBgGzt/D8Kilx8RmDCBSYnUhYGqpw2m09knujK1O61S5yLODcUVgzp1CL+ eTJQGAZxtGH//P1FHSl1VE3nGCuwCv44a3xZ94+BooL0NHFyLbLSMbekz4Imnsva dgkZ0y5hxm3MzWWZex+5YaGOLTzHCZbv+ec2SLnpYT9h9BMlZE84 -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 1024 seed: YQ== zfec: required: 101 segmentSize: 131072 total: 255 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:CHK:sshu77h6opnto3jnngot3lwzl4:4cjhp6u3i2uzdzrjitje3inlhs6gnlgwc6wgi6vtedc2grgvcpza:101:256:4096 format: kind: chk params: null sample: length: 4096 seed: Yw== zfec: required: 101 segmentSize: 131072 total: 256 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:SSK:vfgmoktxuv5e5ajwayx7qn4h2m:n2se3m5k26k474losb3nhwevksf7iewruqwcwvk52dyfe6ihieqa format: kind: ssk params: format: sdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEowIBAAKCAQEAxsCouaG6eszb6f1uywMum/M3wvq8N1j5JlamB6q5AJp8WzG2 ABOiemaykaAf2p47/yA0V3JTYS4+lUye8jZhMaN2iNKk7Z4aSaHNc+uZEJQxVeVW CblpobGIgtSdVHf+bsZBWh/rS3Eo7msxtmlx8EZ9H3ww3gJMwuneIfy5n28R1+mF Mxq0iFO46PaCP85kbvFwjH9AUioImx8uP8L6eUNdx/L2IqAd/cQhT/3eVXF/n+fI 0rydpLT3F5cKSdKqta03A3mFD/BLjbwbx4dCFYaH9W5IRWx8ytx5sRcalBe8g06N kXA2WQeAqCpMVTs9KmIvYIU4QeblU2d9SXchNwIDAQABAoIBAEYeWChNb229cmRl vb2vnLT2JJkMPnTEVfn5nc+conIdDnxZ2FzEkJDgRGVt+W72XjJO2Uh0lAf3+apQ gs7u8nFBuyLgNcGDAsExbTtVRgX8Uj98jlMV77dU29VUT0EqqD/Kf+nc0vUlsgwT E1HId6MOKzx9YvwgEZa+TVjuQUqGlIhu/inRXtJryNKXoK3p9X2vCxr9bGcj2urt 3PWIj1EhcDagoBFporOFcvknjEI6FdYFxHUMHABDricPmsPlyWgR0P0T0BfALDSq 9n21DNn5/dJ7SUt2oCsHGOjVLw6aNZ3hVrpiFnkWYo4HsUBrDr3ChUj38SC/1tHc iM6oRrkCgYEA7RmTZvzXxZCMNBwFQWsTrlfl8HEdyKZNhT9VR3F1zES34Db1FeVI Z4VyQC+oBIVef4A8HNEmdA00SRDrxEGVuxBMwQZu15d0HlZT1NMutYZQq7E7h3z5 2YIgVRQ9lG07/jDaEwjfcNLo/yCYFzfhx2O7xCKKzdtlYIwU5AlWxSMCgYEA1piK jJ1kmM5zhb+jxF+lHbUd2W/d4ZqQUyQ98Hetk5uV2aoBbCLiJaTiO6r/ZQafgX1s VWx7dLC3sV9qUKI++ymkhV5vUmgg5eQkg31hXg5SIUNVWypN/jOg33e5P/TdUDJ3 yycy4YuwptaXL9T9Qp0Js+RyK9toC9zSJ2ShZt0CgYBKUDLYG7WRca3QA1xOVb5U ba5fP0UDh8RSWDhlbRVr0boEJ5WHqFaaQ8Q8g/NYf2jP86Rjr9Yql5zkrc4HtDq2 5/P2qAqDvi+h9pLN6OcB9DhCqAktfSleWB/EKtTmOZqNIEipoKVP2ns2w8OHu3cj pInMfrscrIBI038UviyZGwKBgBLdK+i6eTpZg5wxQXMkuT4ISsxvYgDP9nnoiK1X x+Fe3uhYYnGgC2MlwGFgYbz+vQzD+r7zn1KdqjgkXBMkgAbSHU1ABOcokiPDT1Zj sihzd9LGuX0fFeYPocejHZy6qK3BEfjAxF9BSVERMg8ZWP3/EfhHT6X9ToMkcTDX TrzdAoGBAOahVk7AQlyZ2doxTPLA8+gcJevkLoEGmAHSKIZwvyWoAitfAlBNBEnV zfGePpIJF39DbRrqyTbYTVBrKp3/Z9KWM+QORxqWlwFd//lw3bTLMGL7o0hX/nAN X5HWJ0r2CwMbRMFl0Jgwml4ONor4WcbM4GGdHKX0m1h1c82EuGFc -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 4096 seed: Yw== zfec: required: 101 segmentSize: 131072 total: 255 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:MDMF:wosfoa5glh4le3cvced4iz75au:gmw5hlx2n5vmryjujpt6bfxntgfgbdcqwykgxaq7sqocsgbjtkiq format: kind: ssk params: format: mdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEowIBAAKCAQEA3Z+dHpI1JFhLpqYkOBcmoiEbOS7m+DJBxmWsBpy3owBVcKHW k0kxM9+bq6oyiUBP4MI7OPcH5A+1dKaUaTBUKRGaFvJfPlsO1PHV5wyD4KdfOIeM ZrcZ01glAIlezAdINKLXUgnOE9cY969/q7hcMLUVRuCQoF23kSI3pEQDQxC5+lIQ /GjyNAL3mCgeYGsVeG4Ey+cNgrX3SaBjKMW6UJRNHnt5oszDPNi86ErsthDwmg6m lgj0qqXdOYYIc8JHq2e+3ERsota5ucBHnfS1UHpcFxgMkteymrzGmKd9Cv9alAtS dRyHlngdt4VK3Jw4EIlaLRPnVqT5xY4huxQPSwIDAQABAoIBAB07kwl3xpuzK9Nh AdEGOLnU/RbHWX7ufh+RxKWgoVZWUm7HYhrOYjeR7KIxknXpLkAazp3+c6OA8PHg kR3o2okKQdV6BdcfQq8S8SCHVZPZ7+TweDQKPdVTQJo4BHGMGlmbCyTOl0ilrCzr kL8RU9O25wYQ0/LbOb3ikg1QTU/Yk5SXqbquyK3c1Amz79NxuzeMTDEjCpRlTbmn Sq2PW2k7jNoULTuCsw0nEl6wtHADPx39xMYWgfXoK4caiTY+JbG3Ru3TJYHHytHF MTCvJWFX3ykjVPtl054syo/z0krqcacBmagocA2lG4Yael3AcxbOox7Z7zwAZjbm 2rxAeBkCgYEA98QXfpX/xuwezaDuABm+5cQqIXUjL2QG9cO/7yvdQFs2QNHDztqr /WpGxlJCOAK955HNWJ6AVQQaNsl5LZ2HaxiRNwSEiv+FKLUo6KknPDE7eppDu1en mpKrQa1W6+cG2Ufsg5BHr5VQr/0KbeIQ+izulsEfqxMq9PLhEYpnNkMCgYEA5P0c T7ptCa3eGJEDq/Q1zH0BMJMgczbljT559psTmHVcckW6R1x/SN6p9wH5vzUDqlVy qeD9Kz1aG4YvWJXWh9JgSqgkeJVvfRm8SUR8CDunKshQ8fSbqsFLDXaModIy6sUS dEgUpC1c/ufkrPeeGI2CZJy2Uwnuqa0jUtGm5lkCgYEAySTsAfuapA7LTxroPTKp lPU1UuY7A11MfTdG2c+dloK2P9dMBOHoIRqnjJf5ZGltbNMkh15eRybGdVYJR6wM 5TgTpDvJsuKQYyT3qjKxRJ+fbwBQHoah7c5GtFIaL/flyn4mmASI/hXVZJqkXeLa 74+MvtzYbdVo2WVYrRnUgusCgYAdG9vcertfrp18C/smgb3RB9b94MYQP1tA8D86 zQ3ZpJmi4SBD8AsyLTP39WVVHB0iKwiPdc1ZEMyCkTU1kp6Z13FsLCGuvnhUs8/O lIkb1tFyS9KWX1zmgPnUdUx9SaY1V+X3qC4PjMC0mq/kGPoc7ugzeARpW+rd4OeL oKERyQKBgE4p6k5qG2JtFc15uYeRX9Dg+zcGDbKDJIHK/NlF740a9Ffwao7ul69z qx5HycLgIIk+uVamRu6Lm/8if1E7bwf1Phr1GV5qLm5QeVQXhAjCFL3d7ieNrbQr 66YNSC7CyUg8oj5r9CPGrFrAQnSqT/OH8Q6i4RTMMaX6YxK7tHch -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 4096 seed: Yw== zfec: required: 101 segmentSize: 131072 total: 255 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:CHK:kl2jayrstlf5q3gax7o65ybivy:mnn5doti4wllssdg35ymojuiv4hd2ir7i6asurgms3ea67watm7a:101:256:131071 format: kind: chk params: null sample: length: 131071 seed: LCa0a2j/xo/5m0U8HTBBNBNCLXBkg7+g+YpeiGJm564= zfec: required: 101 segmentSize: 131072 total: 256 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:SSK:rems4oug5xkgjc24vouktsjheq:jkytq32l7ih2xsaosgrrjhsblmyjnxn23zacvefyg6lnh2s2fmxa format: kind: ssk params: format: sdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEogIBAAKCAQEAqt7c+hoHCRc82m10tbn6HkGoPCP8ezDEw0jxvH6b1ads4+Fn tzavlbpSY6oXnSvISGLfOuxnDX0iBJ25m9qJaeZufP/uc94UPxxPIjyKrC/87ixo Fi8Fh4KMd3ZbJbIoJi5MSnONEoecIx5lgPf9ckRsq0UqsHHRacWiA1M1c5ZTRw7u LZuaB0udxBFuvsPSB2vejvqLMqmiLUYn22yrsbFPlufUw6hh3QLbjiW/qQB3KflG 70W/6Gog2aLErsu45RG+uxoM28HHuzinYmnoSAUWmA6D7P+gEKtyS508u/3EWH+l O5BC4s4CJRDDaoVjShWwcICwotnQGF8yCjwlRwIDAQABAoIBAAeTD+D8Wgx9SGLM ZayP8nZN5cseCJleEj2FPxtifL5OCNsvM52hDHaK3GUeS06vJqviyiTUCZtvonGU RlU81548q0WE7jPvsnO7vwb0VRdd8eHi+7g5XGQ4lXPO6NUfySeBd5CjVQEZxted siqIk7vG1w8JPA5h9UJcPZrVIPJJ3eF+wtJkNh5Vv1CUyR4yER0ZDulSJkrccxqW ELMkpLn38wc8iFsa2yDWJy/h/e9j5ElhXedqYC1Ii/WKp2jPebd+rZ94/GXu7/yC E8xABg1p6MwVkVgykYwYrlS56fJ3hTz+NdGpVA1e6DklkD9KvyL4JEm7Wmk86Og1 A7cQJ7kCgYEAwSC34k+SpF3Gzci1Bb3DJFSwvX46ASHr9jRQBItP8NGgw7leIxbo 8ebQo1W/9peZiZsBGGXSDbL66+ZJ9BD16yF5hoKzE3w3AAEmYqqyl6PQfGMpatuI wzl4AmF+jQ7dMbGM4UDXrGZUbM6WgCcDcat9LEQafct53bcHPxK6oW8CgYEA4n85 iuvaH6HON4lik1yDMwlwgZ//pt1YpYWg25Sb7SbhzLCXhUP0r0XYdCrXROos0AtG 59c4uMY1bG2lXXpKEv+r6yHkmVSshag8gle2LBPewYmeULxoa0FsPHYW/Wwc8SEK ogkZzuTmPYXJbqVGqSqx5rdV5hEqzGBDf2giHakCgYBD+BjfbDPm5x4lpIKZL6zz J19AgaE2btLVxpl2z/Tlg1F6MM4BuXloUVySb4Zs6fPeaxAanxMrQRdwWI8kd6el BhX4Eh2mOOw+cykoRn0uQzgH3vpfoj3iv2IOLHPWfym36I31ZNXC1gzWcmqjVZev tLQMFTfhl/Ae6OCDATtvvwKBgGe8XV6DJyPVt903zy4u8OgvKpgz76M9PZyR11q6 da/oXwKg3sTqmuar1rdd57pohp3CjHci25fFMDK5BUQK/mI1N0g5/bk8Tsfohc4s 3gLSFvQNU7UmlayKCkimzWDEY30M5RHRmUBpFgqXe+pxSCuyokhJL85vjmqMrF1u FDIBAoGACVQqNIjcyMXkw8U2b2PhIIlUjTOp/l4rZh5xXdvzqnTPGCDZnVSKO3++ XNitZKlJUNWzJdUxIE7V+WgDTcNu8RNZVfBJetNnaLGol4hPtBE8QgD+qs6rOg0e rNFOU9pBTSeDNM4oovChqmOj6zMkO8qeUlOmlLg7ELkHGLfwxak= -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 131071 seed: LCa0a2j/xo/5m0U8HTBBNBNCLXBkg7+g+YpeiGJm564= zfec: required: 101 segmentSize: 131072 total: 255 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:MDMF:ynzckg6zuaoi4ttwllb2b2o23q:sbrcnxnuqnnefcqeg5vglctxu565gl73fubzmapouivrxjqz33zq format: kind: ssk params: format: mdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEowIBAAKCAQEAmCloFzFxnh0SodP3jlmVdWPwHgx4mCHU/qAxxWYASfX1h910 uMZTsJZ+O8AsIFfkQtCy16C6VneL68JZ/Aq4qFGgajfWziGNrFbPoe5gxpl+06vM ViGLjYE4Pn4qM5tgIe9U1xby62nbYEQVhe/ApFD/DMhCfIhhpBp+4vUtSzXQaxjy FFyusFTNPHD2fD7U5CFB00vStQmvmSSXeKcdT0ciQmxJh1dSgPVMbKFO9tM9TeNq HwQzOR/YZn4Y7zxqsHJ8wQS83YBYqpvDfvK1ILrieboW8EJmZsiM7Tq3NCcOBOwa n4dYjNqgrfkcQ+R0eMqyhTBoH2U9dPe3AMZ3kwIDAQABAoIBAEsb9VOthmYD99SF 6ycLNWly4W4Tvdtqp9bggHDuPqpDjOV5/UnQLDN4tesMmzuD5xrMJdumbRSNgjXo A78UE76SPFryIUgy69nsKCXIo2ClGCOoI/9II7i/1mGSqYY75iIaH4jkvRhTcoR8 Vxt8E12I1b0bhSYvs/LrWULyv17l+GBW3QhLZRYpAVpXipySK2hvzNS13z6SPo7+ wwyYg5Si5+WhY7DJP1yGku7ihLGw6MbHv8Fqr+L7bceeCAZ2BAoAP+UlqQo1UumM d7BDpicjhbQycscf0cNVpq77uPtp6ULQjkur/GOEU9CkKr94uThABqVID+oQ59TY NuevEJECgYEAvEF1f151xSw4jZAsVw/ZV6F3xslnc8cJjsXAD3mb9bbdVUxKU0Ms VQBwIRpx+FXM/gv8dHPEITM9U4DM3DjAVZEKIPxmCQwiaf8NQyCMVtoD+2VOpVbX GSu4E43uDVg46ECdyrTz2J+cc6so87sWbX4MMXI1z7kJOKhvg2cDm+cCgYEAzurk DvXW2k9XvvEv5dYdKRqgUpjNqNqFij0nQ9W9JHvz4tR5eWgSXMgfHyeJKMrr3Cbh q1IgIyJtF+esFJZ3cPdMzf7ZwEZPF/ivngnTgdlOQ69lO10pVALmO2Od82HBYjxS m7mvRoK88E6j0PODjmcJqLXM5MN6eHKyvwlPMXUCgYBdE/RXNEoAYfvYKmdx6Fkq laAV/jCTMt7L44QxYow08eP/L4g0IKtDn8LQ6zVcdnezSBPbM/3N+Hqi1bT0UW7v H5YldwWwBXric4OIJAifTI3Zd15qK0SQomgR6wO/P1Zrpr8doVhLS6dcHU1TLLZL Dp5SuEhY2wDvLYBtNLq5EwKBgQCfXBSs2PXSSQ1BR6wmDVOEFrenJXwvMa1rnFGj UvhLIxPgfNfZgyexQYeGjQJ74lzovyFKuwN5S8hNguXrLT9sR2pltIOsK/o6chN/ Wf4FoYE/a9RBdiygQWNkFgLOMVmo+OB+gvHVElfFlCtigEmv4Pd1ch8NiOfH4D5+ FwNhCQKBgCJIoZu9Cef70VcRWjdsNxbI6Da6zulD8gIbsw2yyR+c/qjQMULIf0GB SFEFdQNNn6eWRgwK0hyQBi1UGpR+kLEf11T4bepWD6KNbVwEUuve2TRTCWf3GjKp MTMJv/OAU6uulvOsoBFhHnhgOj+IV9M8GgeDyVPXYL4+IOjTfxCb -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 131071 seed: LCa0a2j/xo/5m0U8HTBBNBNCLXBkg7+g+YpeiGJm564= zfec: required: 101 segmentSize: 131072 total: 255 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:CHK:a6wuekh5mynr4c74ma3b2gswfa:b6lovwnbv3523n5kaxga3ruxa2dvxef6h4a2jnl6lasn4fwvvdba:101:256:131073 format: kind: chk params: null sample: length: 131073 seed: /N4rLtula/QIYB+3If6bXDONEO5CnqBPrlURto+/j7k= zfec: required: 101 segmentSize: 131072 total: 256 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:SSK:n73xrpurczd5aoyiwcvpnjnmtq:zq6mwbtqsg3kutvuhwrlfhmiu5s4qmtbkccv67p226zowcylkaea format: kind: ssk params: format: sdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEowIBAAKCAQEAsIX7cTKvU5wt4iW8FKUMN5i+uZCAfZrq/KdefeHFnYJStQTl ov8IrO7aArYUh8gK/tPVCz2Yl6JV9A1Y/8qyzBqRVAT4vIDNwqyVwlhUJQc1pwUr xiSsFroLCR4ljJxZkp2CqJSyrYHjVwAs0Sy6L9v16gV8wOAGS/Z879nb2UB2RZzF JrRAlAy5P54DvzLA2t5VMzV4TeZgnz/YlYHLBl6e1+Xj3JU9KB0YoyCbSPV8kgKn PxmKKCyVxfFSpqqBWN0M3s3yVZ42PffvErzcXjNzx5LFj/dCRBmU9aZnwxHpD1SK d6EVSEZI6Yq6Y5PhYW6bqTUImHN6+yobbPIkKwIDAQABAoIBADBicSK8dVEyGGOJ 200/ViNxEyoS4R1Mlsds6toPRdbgD2J9tqHgTNT13TzsAqGbI+RoVNdxaT966Btu gywNt8d5KseAW1tz5LJNEvmDs4C4wqyGntJ/X8oU8YxsvncVrfmhgdxKcdVcKl/A 9Qfavif7HyMnoOPPI/qzU9h8eyXHb/A+HBAUsofDpwcATFTCfgDDAQcY8k8yAD7r xI4VshbyVlIZrYgdVLKjkHNf3uuUISQ6Lam/pMyatutyQmGyaL/NSG4PbSYSLlg+ q1tEmHqDeGWsLupb2qD9Xsk/bpDD5XbZpernC6SVc1oBSNb9AV8w1qGT9JYBVcHe g9rEx5UCgYEA6gqEZVjgo0kbt+F018JjtDeC5Vd13nkZ5jVxYngRZhx2rIu5bysr hYfnW0iEWNcY2x0PJE0Z+mO78g8w/pLZFYl0FIgGFSqIHMHrEGG0oexos5ndDl5R BlToPxA7Z2tbsNK2YwlKgM+0JeMiecJXEdQ1uPmFKN30CNFLzHi53N8CgYEAwRXv SoDEYN5YZJ/rAPLFedrmm/ima6YCBT3gkVnaZDwX4ii3hgSpr6VHktukf+dIgi9O /bLId1DmGqFLLfaifjLBD2DO9UCzWP0WOz8POq2XJg7Kdsc9UUq4dfH7/rGdk4L+ zvWWx9ZZWNaACi/JXUpeAUyDIVWHn1KLWXXL1jUCgYEA1HxZ+d24jec5WDhEqgNe HGft2qUOab8PSZgp6lnSih+7iyqMYCcUq3ZZEeKD7ljTw1PdxHqP5GoaYEl0lRzk JQ6XqnBY/WyRCXLyJPxgUEbgRHekYIA3FgWOmnr1RA8Pvzl/x+jOkKaDC4btbRiJ jrFZWSiJwjHJdxv2spzFOocCgYB/V04HnsDk+f7l7in46COg5+NrPiPTnxp6BoMS mWXU8WT2/M98jZqzgpefnUfyKsDBSx4XZ0+akToQmguQ9rXX8PUuhTQ4v0EJEXEW BdKvakjjCqIwj9o6wMLC1qLRKKa54IzYRVP52731PxIWpclxw1gYFzPsShI12ySY DX4veQKBgEXU69EPIoJcr6VAkK8nBNR8IC3KyywyYXSPd2ClRxFn7voo7anlwbFx pgsC0o6FkIswGmo7sdDbdpWH/Th1w1dsZVX2ZsIvG9JATj7/OJ3trt8YxTRz73SE 0c4pi85sMLZXs4mKyxpP3U9rdIHi2+2FWzDoHenktW+pGsxRhc99 -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 131073 seed: /N4rLtula/QIYB+3If6bXDONEO5CnqBPrlURto+/j7k= zfec: required: 101 segmentSize: 131072 total: 255 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:MDMF:bhlcqlyropnz2rikjv4f6fcaua:tfd4e5zhktjok5wdltqic35h4yya4ohubpnymwu6w74qxopby7wa format: kind: ssk params: format: mdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEowIBAAKCAQEAtt5BBPkmlQ3RzinuPNFTEEcVwo4ApkrWk+zeeAv45dR3LNpu um2wFXqHUc3t4dukG8bV7n2zIbvfJ2rjWzCqBXAPgzHqm+ks4fjtsazKTu00vU8G W2+zyhDvFiiNJpXl9C4E4Pb6emD5vAtUyc43v1ubBgmPUB2gu7ZBhZHdQY2jL1aF EQIbtV1tzjx4eV5MpLiv/NnaEz6WxRdd0nJ2kfecxgvTZWdXxn5HnKEgPqmF4Bw8 +6sn45Og1yh84TzqYAzemdi2eFozKLztgUH2803XdPzrf2dJ6a2QUOOxYtQCiTdj 8EM46IWvl761+XEnHxyjvNCYUwVSRWO2QYTrrQIDAQABAoIBAAdk8UgcMv0v893c QC/hXvR3i1+0kj4nJIoSt+Qux7+zWaZMptGPAeG8dKBQLWBGm1osLhZYqtegWyOi 5NKZIybZIydw60WmphP1FtdqXzvVx54oBd/IooJ3MNO6jAqVYRkAi131Xqd4KGD8 LE+EsShhseEKnerlZ8xvUDLwdPvjhsZZ+ia7u4mHTr3JGHCVwotiIjKt6OZUvmR+ CJ9G7QYbScTp2MgEfDYZwq7PnzapZHSuaIE+bLlZs8AO6IhVBfJLORTSxudU9SJu 2E/t10ubaI5j2BvOcv7WIca0SrWGmb0q54n0UUog5Qdc7vNnI9GYkVygAYLdixkQ IzuAtVkCgYEA0iyiXyNtrUloxrEKrDRjSG+Bvh94uUxm7MW4uZoIWvj5h251Wr/O s/WIEEwgS+Qr+CUWBjzgRRlrRzHB13B3+qcgkOZsJ2UKsikcnZLUQKetFK3MaTjR 44/EzlOrY+2gc4nmf0Ihxh1Sn3IB50x2xVW9Aa0SoiabPYBanoIbHLMCgYEA3r12 d2SbXFTh4rWmqq6lUP54rQEcxfInxnkMnSNmBXjj0bM0i2auc0O601724H+HGybA KHtF9lNLNuRSsFVgcQtPSUCsJooliGY4rlXHoOcXkVYf2ElthYbRUgqLcEC3NDZR Z5eKC/bYuS3bHFAJVYsflr1/J+/WqtVWu9DOxh8CgYEAz3leRFqd710zQEkOxxXk GGJzCnLY4trIE93PT/D9ZIi5Evd4g8Aq1b2Ats3fZ+tzeD9r8XZw0eWY4Cv/NaSB 2/7ViBTfGTiGiX9KD0cdnkGn+2ziB9EeaOzIlAFGhJvUM5oi3ucynfbeVCXgOStj Z8QOk7P9W/KOdvTY//Zhuz8CgYBWx1/pQiQZQ+TBi94EL4iu1oWzeXR5Vk/SzoRw kEMGLMQthfEZwoaC18do5F2wt16u4FkLLIPkZS0vlKL2mjy5rhtUwcKQPVBEJPc+ TKM69+3BrNk5TdpCpHTWzs7mjAAUcnkir/KTmLd05f2wuSn5zvseonNw3ss2wWlK QR7eJwKBgCzDobkvlnchBuDD+TPuhU2IiNLkLTCoLB8Ucifp1+Te7gB6nVwWPkqG FPafuOHUkE5HbZnQJr9YKbKdaRJRF4uq7Bm0pgvYiZrrLalAzuaR4XlxkHHGhQcL +/IKsYJQc/APe9+VNpT85NTulKtBx+1tLmSAq22opfBAfFbu0JrV -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 131073 seed: /N4rLtula/QIYB+3If6bXDONEO5CnqBPrlURto+/j7k= zfec: required: 101 segmentSize: 131072 total: 255 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:CHK:pmuo6vpcodol76sexq7s2ojej4:irp6rxnkgwbn3h4g5fzgjpqgsvhvycxobr3xrk3zr44r64wimgqa:101:256:2097151 format: kind: chk params: null sample: length: 2097151 seed: uqWglk0zIPvAxqkiFARTyFE+okq4/QV3A0gEqWckgJY= zfec: required: 101 segmentSize: 131072 total: 256 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:SSK:mz4aqmo7whthklijqffcfixkoi:bghtfcymccbxrij7odre2xmjc4boyyzxxqy6r5rjgo35k4xmpi6q format: kind: ssk params: format: sdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEpAIBAAKCAQEAqJwmyXuOk1WqyoiVygVY90sEyxvUgibDOxmwpbkztfTykTVo pD71+dXfqPuHZoPX2LCLZaHdXdQdxV/zmsL9D/DFmZs+DeIL9DEEzgo9ehOKThpo /2NTxdFhIe1f7oWsZBStDcIvgZkVeqNryHpt7i+jSt7j/LR3MyLNQ4hVdAoSB7Gq CzvpclFr5BGs9icArp1xcLu1BM8+Rjk0WVmRbVqUdW3S4ksagQl+X78betPl2fb/ QiOWPeNhKvHD978C+iwEX/mrTvTL3p7a8kBrxO9NIt+t9aEjf+pSGxUZi5YCOl4i kREf39CLrgCeTXdvnYJEQHVSAM4RaPX46Ce17QIDAQABAoIBAARrg4DIomYuWrnQ dszC8yAYcVm5swpuZbPI6p6NilN8xlcUJVgY5m3UM3bEkToYvrHJfv39DkaFZvpj l4k5D1U5pJRwQ2ItyM5v8oZMMmxe0sNVYec//VQ0Nu2iwV8JVgmRmS/BJWmqT6vV WN/6haM20HsH+MYJHQ7UHLlme4b9KZzBv87rD/UOfOU+oB6V3ydvMoxdQggm02UC b2f/Mz9k2e1Vn5C/79Q+V3PjPCp30QZNX1MQB8bIAXhMhXK6Dm5MeMcTpJPHsKkJ GW10zDfnn9+OsaHc9exFt2otx5l+55L0/xxoIx7qX9/gkQaIf+EsYmvBhQsgsgXi 9z0WnsECgYEA6+q0Da/bPCjwWQddvEAlGhmJDR7e+UWn0PLFPtzmIvCbv2FCbEtj MpPCoGoChco91gRnr51OpeA+eoiZ08FW+czbN6Z0L5wnJCIAMO42HarZzGKstCAX iB+HsETXwkSSdqbAU76KDMsRI6mjQFVXxIKns5iCvTCvbC3X094taUUCgYEAtvak Hspo5BrSVCsnXoARI3tlwRHRDTHDqFvSquqg1+XunerPxjLYUpdr3ez91+UKXZmZ M5CpmdGbU6oX5smz29MtxVdo7LwAjRrUiXLe167R4p+hXUpF/PGTQZd6K5OHtQWs ZpqcmjnHCYHEDL5g35r2o7VeDDkP3vXUXaem4IkCgYEAubUemSuWU4wSbrKaueZw jlQNi3OCqAyJ5rRESpDO5DAtGgCwrdjGNHkWGvp4E+M4u/Dpwdb9oxubcw92r7ch BTCaW/s+uH+eXBYbumi51q64FeiS9JPSkkfnovz+LqGV/aqT+RgjSaDMVBtkM+86 UKlc48YpHE5nuKt5mwDpFFUCgYAiqCmCY5jmzGXW5623as7UR8WIgtV0iF6lf28y TOtWugkvBJGC25K6YlBeY0vaH1qNTFEGwXo+1sNzX57TapWVKDVdUidf4GTCVbi4 qHepp0W+hbpNL4p+VUwteoH7yyBDm+WCMftEA3m+RURbnZw//tyFOg+shQqKk0o8 y0sC8QKBgQDTLI8FBom/UNpBTOwISJzNIQbDv6ws1iidoMXhlnB9G/Q+7DHDI14J ByAeiAPOSav0ogNxoMOdHu6KXXMACriTuFa8s/h4Sno4vXETLqJrukjs265TQXZU 1EB0u+QnZVYXUmEGIfjr8fhlOfEkUGh8DhEW+U2QyM+wlXChr3rr2w== -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 2097151 seed: uqWglk0zIPvAxqkiFARTyFE+okq4/QV3A0gEqWckgJY= zfec: required: 101 segmentSize: 131072 total: 255 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:MDMF:far7xpcwexy5c6rsnh2utklpfy:fun6nzy7kxfr2zsrxmkwy3rslurixuq5dgsvzw5tg7cl7mnffzma format: kind: ssk params: format: mdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEowIBAAKCAQEAyK1QDzzJ+fA4Qk2K9m3DVSqHCiWtCNPWu4JQtn4Rz3ligo3C 1Duq335a1RVYMayuOma1miTKAS1rp01Xi37q/o5cMEL9Y9aRrehvomYoezDQKfP9 pO4GQJ+WHqFCDeCj2Y2/d8fCOzmc0A94yNAJNf7+Y1ymqLefS2aoL15ElG6amQT8 rHKi667pK5rcHA4q0GK5osaJRQd6TR7jmzcZeNh4fq0HkNb+8fCYq2ZXOwtUM9Hr U9E23/zpqZCvHNqXVv9p3H+psPW1ZaN9yIA8RCtGuWgkt7oRJ3K0ovzhe+oNOq9A zaMFMTTvjURAvLC6FloZ2atK7AKgm70fIBngVwIDAQABAoIBABSpP2pqNFNe57Pv 3uRTVb6hg8jIK0IS6XNhzeSUI0pMsZdGeC44vHWJQVnZ+jwXDtMlewIVUpT/c6uE e4R5u0EdMCGp7API9jPFECVUxks0seH35IAEH7GsnJyntrDOFaCTBwkSkI1fd6U2 SpXGUYR1LgTV57TMPwLY3W0PFBmCAQF3TU0RQFK36cOB+ufGEKds2U3xJZWQV9n6 gVt6Izd8D8s8bbPU1bUH0Q4TZvsl9D9yabAkSmh/3a2mta3l2Po0rhtjfNXTTib6 yZU5IU/aYO2fWBWXK4GzqjE1daWuc9zXQQQeRRh5OVU+LVJQxAbjEsTlA+wqOj7J aCgYIdUCgYEA7Rxx7iM2PBjPm6xKApLNWAyk+V/nd/ccnJoWx4uZJeiZrp257wlx XBnSQAFE0HHvQ32RROnF/dHaBC+98GUH7RQLCYcISnsphya2PdOZotkQRXG/GB0p 3pt+YJAHygnSiKxtyWY1Q+6PzA8WA6crkypq1NYrc9+4x/MHmWKm1u0CgYEA2KnY GO/ESa/dGlaY0z9Y5iIiwetnD/zJGkb76wsQfB+vjfqqGgSFisudzW3O2mdDz7Sd cjTtvdplDpVLx6r10jqyWU/2HvJpLVfpmu+x5jp/RQpqNnkXLyfrTX8K6ZvcKLX5 dja4z6Fee5c9VhY3/PRHE2Ovdi4uoLdKjYSHR9MCgYEAj/WLPphmX0p5Ef0i2jkj L2hN6ZJOyMlht7reRbz9+MQmOpxMvVKwXsjWnEGo9B2YtRNR1dNRgG+evJf37DKL A2f944T2hbINXp8kWplUWEkN1fvfl9ZtC1jA/AO2lvYruwtlhLfncx0udShbp1Ah 5rIENsDplOqqF8v4OypoPWkCgYB1jBA11z+DSuqGM51OXvv4P2TkGLcdsWPZ4dEj QCl9biNswCYxX2qkVrwSjBTB4Wyk77TMFXM2oZpaQx2OAm7D1ByW4A8D0zjE5QFU kd7OrcYGyxO84g12BA5hSR++hlT3sWLag+3YmBAOtYsNfZh6oH0/Q8IaOAwMHeVQ yiorCQKBgGFHKWecHohrZozSwg7DJoaQdWw6h9RWMA/oOXM+80CYdbbP7GksWV1o Ay9q1NuZGfe2z3xXYP3F2wGjIWehqGsYc53zXu3mGGYhvQJzxyRCwAYp1Vsn8R5s XWsmcm5jw+o0N5vCjq5vDqlpjrnib9Gdbz8/8hK6wXZlUWyeudSC -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 2097151 seed: uqWglk0zIPvAxqkiFARTyFE+okq4/QV3A0gEqWckgJY= zfec: required: 101 segmentSize: 131072 total: 255 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:CHK:fh4boug3wkciqhkgpusjsjyzbe:sy5lwooxchqu5mrzwr3mgc7qtwfjgmpz2srdmwpaip4iuj3tn4vq:101:256:2097153 format: kind: chk params: null sample: length: 2097153 seed: BTBX/ampNfLU+ox7xipBGiaSbgC0kcB8Gy7BkJB4oKI= zfec: required: 101 segmentSize: 131072 total: 256 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:SSK:vj2bo3rs7lkwz5lla46j5mfd7a:zfqfc377sixrjzajsk6kf27ouoz5fpdgszpg37k35q3ccoie2mxq format: kind: ssk params: format: sdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEpQIBAAKCAQEA1Ct7KgFNZeSkYGbFtOwIHbMXiIr9zYZLKsHNkg15XGBnG/ii 13p6Sv1XuqugHcTCk+xXjWL1GjB4xxD4Bxo3Vz30T6EpDVd+6FNXB2YO97NJVrn1 DNUYgQ0UihnEo2WThNGcRRmthyYBPYGll7TgbgNMdhazOtX124mJM1qA+OlRm6dJ fF7rAvfYUfXSmKKk1pWxhX0Fk2PhMGQi7HSMR42dgy9ZmpM6I5v+CRflPI9Q1Lwl gDjpZA/nTRhiALhHMEHmOC3aCSznM7Glf56MOvuvP6L+QUnK4DYDs2Bv4oVy1EVY dZFQ+2nh/OWjW9igPOhYzq/y3e0XCvV3rGXWiQIDAQABAoIBAAG8aHOS3dIkReMk 7QNqnaV+yPeel/V4iWDo2QiiAHqlrmQSMroBEBhtbHNyVHzUiGsIiR+96mzQNZr+ 6v3wVE6zGRIyKrTuy3LOS8Jw2VUrSF4cn9eNCXhKLRu8KOfxytGl1IRicZ95rwTq CRdDHvBOm33FoxwAy7e8y65dcMLt7mmzY3V7eaUruJx8fRob3dA7POIml10NbM2z 1TM65uqrVQgihMlQg3cArL95K/R84FZs23V1+EaobpC7gpvFn9J13pgXhT/pBP7s V+9tXPmITVDaaoO5Bw4Z5EqCQCEEzqeN6v0agl4b57k2eoE/mE8p+sZ2gyPUH0Wj llFujMkCgYEA4X7MtD80nEA+dg6uRhMnN9eLVmVLHXpQpuMdcdg/5EXPUCOUz4Xk I3N3Mx3EVWOc2vw8gvY3ZbS7vpMEEzCzyoYbEPa4fQ24UQ4aI1qL8gFuHsA5ZnUI FcvnTyZsJgoT8rRTm3+lsqmkzAhcHVMH8W/Q3uJhtCtJGNEzo2yyiPcCgYEA8N8z /s8gU8HA6hFQw2drjJCglJnrzK8jGq26bnnmLykXPnTuOwQLH2gKY4PRqPEOtgvp yEMXSGXktXxMNvdfY7FNDZvmdBBVR/wAPFbz2yZgmdAQdZHpmRAJPW4kJlXSTmg8 6QgW8bsYV4RNL5f94IOk7QinchiF3jGaK9GWPH8CgYEAmWNLlAC6pN7+ngf2fCxj LRUt7yMQKYkee6daTCqxq3HhR74sZ83IFmVg3CCPgRY1iLCz6NHbdQ+v9j7DMtqa MlVu6+coL8i5bEmPdiUNtR1L7xcK9Kr/SPRe7/RO9ME+OIZ5qPj3mcTUGQZGwpvM d2t8RWDw3UHkg0ErQyuZdpkCgYEAzKtsG/zVpDXDfWCfNpp/GV6fBAXSBgdfFcE+ 47ayr6oDtS9Yak8iQFqAUVTl5t6FuIxg5qiTdRIXh1qJzD7gD+7M4V5yMHbccCEh 9iOQa8utU6UnBy+nxUaKA2e+UdCktbj+4KfeDyMCKQMjLujAcXCKyFqNJXbO8SFG tisNtHMCgYEAiaub4VuVuJGuJKYSdlSL3G6NGZaXYqkeyx3AiBZw+qBoECNIjVmr afsUnv5ocfpBc+7zQGb/hmEpAeqxMwza0oMfJSt68kLm0ljL/kUxpRR6MSv1yPMQ DaI4cX1tEURu5Wqs+iXtKiASxFNCXcD+tSL6937wef2BDte/QFWEyus= -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 2097153 seed: BTBX/ampNfLU+ox7xipBGiaSbgC0kcB8Gy7BkJB4oKI= zfec: required: 101 segmentSize: 131072 total: 255 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:MDMF:avc56kylzakxlqqhpo52hd73oe:c7x52qxnbsjgdc5fgxlrc2as3rie6x6ro266qnu62dbsncn3l24a format: kind: ssk params: format: mdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEowIBAAKCAQEAn2qG7SUv6kD+8MZ13j8G9Us+w8B3/j1QC4SdgV7qoQ8ziAFx mJe440QDfjFQNhPtoa17RRzzBeZfaMu83oBscl9oqBVf+WuxQJlHNh8zVNPZEBtw VkA82X7RHzNjH97YU3DV/DRypdMPCmuIUW++TI+t2c+W7KsKPAOJx6rrJHK+3RgC n2VuyQHhZkLqqNc3ZexwoKFGMHIB9XtHSCSAMakD7o7ITcqp9W55xMeLPWLtEvnr 0k2uSWjJ4AmKNf1NKLQ5eLcIzBi04SiOAUm3p1gtW+qPfIChbV72RB5ZWL1miiaS XpE+F424wm4pZNf4EwvRKKcX8QEcBMH/m2sVywIDAQABAoIBABv/E9oS9VLA/mTf nbSdwgWTJN8w7oHWV7fmHtkpB7CoYEbq5f3D64LyH2DqnSkaH9oMgwEUv/NRzYC0 gyNaT3FYoyMdueCuUo9DO/fby+KCX/UNtJFZL7aMqII/vpFKzBf/UX084sOPiO0u DF8s7jE47HG7nMhMk3wNrQVFVY1tWGlL1VUgt4ICm8Zz4+o+F+P7/Zp8fZB9iGNj Zq9z1q5VMDj2wdNEt+vvLcWrvBnphmqYj2axk5VWi0LfMMVycWGtsFV7JHurNYBa iEJa9Ew1LxZc5Vq0s6RBtNjT5rDqrlXZwZi3nyUHDls21auke/ivK4PhQWz6ThZP /u88Z3ECgYEA0aeS+9HC2wsBW7SH87N0Be/J7K7YSiAnWRE2gyi9Lc9pcB9zsZHC NkJykJeiocrDduAm/EwjwSgP56u9ESpIh9RK4XdoSUmlYlcxmCekWNZVyTNoWIY9 3wDOCeEK44bPPl5/dAWqlG3/t3BP3UddoZoosJXNNMtwb8aq8yJ1TZsCgYEAwqfw 0mxNAdk/GzT6yAYf8Kf+eDCYhzlnnG0v3h3Eq6wm5enuQ4TqRplem3m8bs97hYsM ofso6HjsmAQtm7JMM5gduBERIx2cH1uIYRhJhxgQOwq8yRTukzRGEzi+Nyfmf/jZ Ui8aPwRxtx0h6g9C8LhdORVxISR7Mj1mD0fw85ECgYAnxGBl8ZjDUagVS/4JpL1a LuyfP175WHX+N/yeDkkr+k6mnOCmCt7KyfnPIWQQylQfJU9fxdV2WvIBYJsBOYL+ eK8nay3V0OlU6PMYSFStISKugljFidkMhquORih5leWTj/se98AuXVsG4X/UmifR cltLe26sF/agzQ86BQw5BwKBgDc36tHWVRYEKamvIsDhM+hRz5cKugoKF8FBHAYX TbYhVLt929AdgVPbqAHUy8ZnZzPf2QqOM/GWdA8/iCyVrJYqPav8c28RtDsU/SAG Ar2m7tvA1QL5xB/QAVzsiNEeqX19+zAcGobr3NJEGl3KTIP62L8bvQbY0XXUAwKs tsZxAoGBANGJEzWLUx7m4ZLKvapAU1FPx0pqqZCyzrU02mg831Eeht9K3s7B4WBb HMNM5wdVuN0eT+hUEmazU6Hwg8JvPMCrWVLgj9SraBgyTWLEMAbKY0TYYjaOwhdy 5B4F4j+30c9o7l7jr2x3I/cZETNM0avvB+1sd8AgKk9px2oRYqEv -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 2097153 seed: BTBX/ampNfLU+ox7xipBGiaSbgC0kcB8Gy7BkJB4oKI= zfec: required: 101 segmentSize: 131072 total: 255 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:CHK:oakc4ghiol3loekapj7br572kq:exzvg47pa2b5v6tojc4wguux4pmerisstqacowbzwtbqybsxtxcq:101:256:4194304 format: kind: chk params: null sample: length: 4194304 seed: 2NVWOXvtwcAFf0J/g0fDC+ypQbnCU7awZoCxcnhpkVM= zfec: required: 101 segmentSize: 131072 total: 256 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:SSK:obw4p237di4oqr2qlt6gkodahi:4f7ql2uzoornpccbjnmaucgnsiie4mhoqq7lnir423nvfqfj75oq format: kind: ssk params: format: sdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEogIBAAKCAQEAnP0nyUESoXrvtjUGHtMh6BCdCVa4SwMDktzM9p7CDXVjr1jz EH6I2Fq1EmhJJ1TbaXod3s4SRuzRw5OGyAaX2PIzkntzpdHKai4VEeRGJTGUAh1+ UjNoUHSTgoiPf+VuBfl8uHUWe1uIKPEEqPDddvzGR6iFWTfp+eQHz6w4piAeLlth CA8+Ti97iuoqFk7Q07aPSY/v80dhuyNL1nMwHSJWePYBODTNGc7RsUa6He/K5dK/ dptSZGFtHPZ8PsI+C4KqsKtGoyIcbefUQ1QlFJcxQbq8pr1FtCHZXGXzGcza8od0 uziCBzU0iVHyqrkCgDGcjSci+z82a+HEEtAEzQIDAQABAoIBADqIi3KXAC7USxd5 SrhoiW5g72RhgKJ7U7RI/mT/yaPB/rKM7EfcngJpQ7VCy+/NzGdWAFgoJplqSEXv NiRTjP93QvJddD/B6oJPf0yl+993TlPBkm1svHqvFKbpavPJZA33OWD/SywgczKs tsuUz0ZDtlxWga3D0sn5E06DzLVnkyGqDW55st/lYx4sTJ3luRd6tF+y0vz9v871 6N/t/j5LcvaOjuLTkOmvoCG2cOsHQGj7rlXKIkxiHaIbFONEYuhK0+txs3XfuWsY +2yfqQiavT/1UyfqZu7J1GXS8HrhtBxR3a8854uHUiaDP8ncGUEI9vJ6m3uM6rMz UlBhP4MCgYEAtn/vyTqGtzTIfh48aUfiGFOtshZ5MRBVtcG86GIoGSK1wN5Xwoku Vtwmz73HPbctd8WXF32OpKPwoobM1L816sb3yoDSTLjC+nl2vXlPOjY9eVFZEaht jQhF5V25CR9sb2RwhnriXz0rfW/BoADvYm3lh5a7l87enIQI3vcovs8CgYEA3DcA U+S8zq3o+PSk9ywiS5rJ5W9mfGzVg0v5UvCNTMBbFfkkh1206Oolr7TlAKBbgM52 Z8XVzosNjU02ULZFA8UM/g1/t4kFf1LmQ43rZm/TO4Bm6l5sOsx2+Qr8s0V0ekON 6F4dex4n2WQx0ODikEQkcwGKVwOejTz+ydySyaMCgYAJwTbg66btvf2FeDpEalo0 cKyVG0xpCfV63JsrVKvOBCPw5jGMrWZzsBrG+d7fdp4Qi9gyojxwom6nUUs7h+jq 3q25/j6/aRTK7JkjMYvBkcqhZG69WeJZKnsJ8oOEcFCMd7LoDUNyUcO0VbfkxIgH G9ar86udRqpxdUFAIbfk0wKBgDqZDCZGxJL+pfKxLsBy5wFVRAogVZYgY8RXUBXo 2sCkotg6/qRipAQiYjraGOHMyeyBg/JjK1yVldqWxDBAACdbpPRpZSXSeTsDNTCe sBgHA59esIQG8ifHRpVIfiu5/J+YIEfH23JqeNIZHkRlwwP+jfBoZYZ3+RW+OFJA tnKfAoGAWrOXeeycShbIKjWS1XljZ3+Nxd7ZARX5pf7FvP+MwqVpW7zEgcG3Rc3I gkh1In8t7bzKhUVMLDmJIZnpMFNUHjIWAAshIQHivs6d1EnErqCHkTEofQnnIzgG HzUTEC2ppRiXZLg/5l+opBbah2afagIMbODMgc7KEmVfdTjkDwk= -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 4194304 seed: 2NVWOXvtwcAFf0J/g0fDC+ypQbnCU7awZoCxcnhpkVM= zfec: required: 101 segmentSize: 131072 total: 255 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:MDMF:ytzxo5777my4naxdqn6c2zwmpa:shrbjb27qpwlmcjimpuhnysfondpd54j44c73h76eudklkc2kb5a format: kind: ssk params: format: mdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEogIBAAKCAQEAwrgs9CQIUW4BZ228HTF0MJ0xGU6AhvNcNwmSJ+Of5tPj2Yx5 g/MNsMJYyOxxnMoODLPAgSX/J1EqU/+/RqwmvcXOw3OBmLdxnMk+VYPBZxbup3xw fYCygPHGR/qcQvJKQ76XvkiFaiD2iRTtX0aEG7iZYeLgcQD6PQvgMKsdX03nKmyn l1zWTeCpUBaR0RUJK1kwh4wO95Ma6PLAkQles39N/A4I5pBlVpN5j5olIHyG8pLW hvbNU1yV+5JY1OCtpoAxoKzehQx2vvKeCwlqh5gViu+FN5j1/7trA2dz7RNAEGu9 X5lw3tvqZ3IxhX1/4Brswvs9BYvFfPyFzb8sVwIDAQABAoIBAAZJSlCHK9glufQF OEWTAO/sj83fa9vUDRBM85Y1qPz299U7zdTH6spNljfFzT4JzVS+XSduOvYPV9Nr H1lgYv07A2TFilznl8p2FaFzRdaEPzpSlKSOUyvT0BonN5at/svtz8eZqDNFkwSP gVPBCGc+IlvTAegYng3WDYdUtJxDpUnO+BDg2PoZ6lzRlzyNbxiPBA7221yyBk6U b4WZUtHl5mfCcfMby7vaWAZtwtFiYXDeu7bOZwBtYyjpI1T5l4yUVw2dF97eAsWd wwbSYbVs3bspN66VFfaZQJrZEeX2gYDizQo0iROySC7hcyXQrNmnufrsgGIwhrXl AKch9mECgYEA+sUKHB1nR+Zl7QzB8iS+aIK/XUgCYVavkg7eTreUzVe8/CPAEWGW yQnvDlWKWSnge+LtEHEWYmNedUhMW8zGx+b8/3aGMFr9VeWDrFKGe5yNnGms5pkm RJrpJxC1CXzullHoIQ9Gw4XQStaSqK/ShJowh9q00QznV0owbKMXW7cCgYEAxsfc eKvlwgpyddL3WNSXhggqOjwd8cBIku8GoVyfzrgsPYHPoZSUZCO8iZ/Gg04tbEAe oRLLYQ/5i5kcbFD5tbWji895Lwds87Gw5TAIAc7f4Za339Vqpl6dqOOe2pi/q7F2 LjLeaOfWPFsnCg0qZ3IzADiZ0cY3KqQMh/td9GECgYAaeYj6tOP9hEaIg0tKjDSK Bhu79mlB64v3qJgxyVHtZ/Ds0b1qWFo5+VGCuuczSKeJjMiobrgFRSZozWw6WOE3 o5xcQCAkpMaQNf3zyHaoQDv3InT9l3eh0JUC6dGjIcxylE0kiF9ZLxxxejvbkUxx cXHkNePXGjymS4/XOFSz+wKBgDVvKkvh4Xw8tLIJiOX/F9A2x6sp197RknC1AjJE JM074uCR0Y+c0hrtJFRWd9V6IWm0/sbLt5Ia6jjlaqePSODYt+LwXaIPu/DyNhwV wkFCLBqHGlx4ERgx3O22alBWuUddB+i5UeIfWA6XbjIcgeaW4zDPBkJGpzO2L4wq PQJhAoGAKGFp1XoDgr9k3y4yfUn8AsLauTsZn0JZk8lvYuZQqMxaV5F7riF6jYkQ SlkF8LATvS/0QUpLOuF3QiSafKlXpAodQoTG+vBkaR708U5qN6m7fFyH/S7seaKL m23LxcRu4bvsRAsPC29MnLdhECtTSr6SBrfqG0fY7bm2Bnpolls= -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 4194304 seed: 2NVWOXvtwcAFf0J/g0fDC+ypQbnCU7awZoCxcnhpkVM= zfec: required: 101 segmentSize: 131072 total: 255 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:CHK:asmjgajvwpwvylxdcf5rm2gyd4:imwtdi4eqq3wm4bvk6npho3ta2fewceqzl4f7wyyabg5i4s6h3mq:101:256:8388607 format: kind: chk params: null sample: length: 8388607 seed: w6uP8Tcg6K2QR905Rms8iXTlksL6OD1KOWBxTK7wxPI= zfec: required: 101 segmentSize: 131072 total: 256 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:SSK:vyjriuuydnhdjssca562b4qkue:en4dcdgixll72bqyhfbfyzp2xoood2ti6x6djtvaritkvgg7ktcq format: kind: ssk params: format: sdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEpAIBAAKCAQEArKoEUAfL/7SQTQnIgVEBkZ7WYFHMKO2DHSDnHT3rn4xn6D29 u224k2GNtOQ/WiwhGHyCXKAY9fhBReTkmMeR7NZZCTgafZhI8LlVgw4NTTZhHPxX HPaLl9AtWWqPqtSl5NB9pckMoFm/E/dWOlfVmPFAV0VtcWxKis6B6maFKDxjifTD cMu7T01bG0/WDDYZrok18CHbc9ci6YsxwheOo1O3BL9Id3ZzS7MkLjguT5pS7b/A O7+ll2v2QLdgwR8hXLR/OqYdQA6HyGi38DQ0YaIMwzKgqE9US6OYCpiD8SU69dcU y1IncnfkVYPivmfABsaxQNndd3OSWc6qoJnjwwIDAQABAoIBAEr6Fx5TjGuw0lqJ rYYuEXLVGP1Bd9ir7pv2/jUN/uPM+g/4w4uApT5mhbzvwmzbLdHuu0MSiFRDJcD+ mJ+ZRc4k9AvTT3mLZ90UdcQPlYoaW4hVMVTT4KEfVpn18oX5ikI2oOEdUTzOS/GV HV3/ZzLfTBO5g8FVh3cIHpUVSKxD+h8mbCTiFlgL6kDWEiUbl+LdON4c+uatBzUU LNtviE4166nWK9ruVTqGVL9TQ6kTEFUwZNKsVbAQdpyV509SWP29LtWevXyFBcWU e+RCZyPZG0cStomWPrmbktfgSV1ZsjgNdajQqUYzc4tsZRnQiRK2+0qcwc07r+kD ZsU3rTUCgYEA3HKc6riwJKRIAaSK1gIl9sqvTPtvU8zTLW24Yj/2wwFpNj0uGYnX EorA0yt5PY7bW55TXCgRIk8HpuYdQiH0atitjEd/0oVzpXW5hFDhLYh8kGQv2aly /t+y06ZIpw2QF32S+PsIfDztUuNv4qLivtu78LJHxEZPIW3UEdf1Hp8CgYEAyIKd YAOZ01wfgFhP6gezFhlHgn5ThMpYnNYF4QDwfwK6iQ7CagzyqW8/lfggYooE/QCN hqRGWsNh1/bndt7gXvF3Kvwhhx5FbHZZdfU4YNna2BVtYxxXUEgxqDNCDpe7Q/2w 5iOg8oXMLxt6fI8iObLNa6EInHI5rH9fbOlPvF0CgYEAmk90Pe2oBw4kBVpbgPCi CH3adeWvCRbgX/Vk0wl5PwmWz0vGIERXk3gi/+53gLqmHBzYtzKow75UWeTMaEWC ZORln0NRW1jlGdYtVUyUQx4+K4il4hP2FikacYL9akpZKchSAA0g5G51pcbkw91H IViI1zTEfcTFkV3iy9bCk3sCgYAHoJhV960ZUi7Mlg9sKqDQXWPP/fg1W/Ek/is5 FO0RF8x6vDn/CMEOWvIDRW4N8YwhB61aitM2TqphKb5CUlYcpnjPBMpNtoQTjSj4 CLz9Siw9/gqsM37KygRBjrmbjoAMJRFen8pWj2pl/FibdmJp6XhQ+M44DUxOWIYL wZBL7QKBgQDKYr2S186YX9X3Vfypoq6s5Jsvq//smGYzcar1xIfpLKx7+sY+Grym yVE9Af5tsvYQuzSmZb76E4ro8uoq0+5q09C0me8ZUhtDyotl9aBpK11eldCikWAr rCMfdanQQ7BakB0tj4GBiTYbH46DwS0lEZ8hcxQdBrL6Pi2TU4ydYA== -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 8388607 seed: w6uP8Tcg6K2QR905Rms8iXTlksL6OD1KOWBxTK7wxPI= zfec: required: 101 segmentSize: 131072 total: 255 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:MDMF:sdheilfncok34xxpgh52hgiyli:zq4duuz3gntecfnj6o5mu4rqkl3edfzg3ohpqdn4jjy37okcefta format: kind: ssk params: format: mdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEpAIBAAKCAQEAtNBH7DSJbQ2f1i93iJVTEOt55EDlz+BTI+E5bB++KTHobKLn RJFvxTX3wfwrqs8KpCNh/YNAgwOuHlzmEvE8rs6eWXWo2Ik/YvIeBaTFynYjYaoK acMBhGCMHyfc/1vQ80+PnRYMCjJjNvtfxIJKWcT4EmGxO/gtFYZ0OJVpHJMTiaQe XPl378CS/bTx84LzZNGkQ9VJ38Qq3ywx/zMm2jYtABggNv+ygMRdYC3lFgDY5duk DrbrninZG3ghBRo6LFxYbWxBd14XrnaoT6feip3lxkSMg1BKNJxFiZ67CGp9cLj+ EbAADOVwFN7u4g6vuRmfGJFpRfYsTmnLeEQCYQIDAQABAoIBABcHbBGmg1ZTZOcn toa71g+Snjy3E04NmSk5t1GRHWwrwhmMCf6Os3ifrgWT84/WvNk49HMQc3f8UQHm /RquhPcSs9JbDP0/RcZ5Zd98JADsWQdIW/kqcBgHH/Gb5ybS7+L9YCI2u6PU1RQc og7qNUQHtTzKGoOz0TIrpMPMK84Kqp3O1RQERX7iLef5d6N30FRguXByX+ixLXNz elAeTaTzexLdRwVBsCcYAEaCerqNznHbSKIpHsIApYYp3ZzPls5qRmFX81jTngwp 2i9WV44/B4GWH/ttHHlIrpiUijhPSYvvEf0dwKtELxP1h1BgHXL3hy2lJyGUHFHf 9sQ4hp0CgYEA0CygU/QTwP06IFUlyH7O2LIGcDcqeTS6TPuaxGpzATMqWDVhqNLw 49FTKzfLHqnUSB86c38215CW14dXU2DqQDG8zeqhdFeLnelcQw4Zp6BnUp9YyVuZ S8OScGFyt6k1CmDRiVYyaH8YGQqWBkORSFRcULIVGS13hFUiXTNrLEMCgYEA3lp8 cuIQBerGla2zRaSwD+WUxSxa0U9lEvV7Ov0KEX6mf21veDPaoCqD1O/Wo04CZQ0d 8GLKm7OL2A7DqpCs4BAew+fWteoHUsujii00kfxh41WcjkN/qzj4K6sJDkuA1wnf usa5QExLz7C9yFOsXVTSlimBCvlzGMcAr83ufosCgYBhzKVh66ggIZdeO0Jt6A07 Rp+5tmEQ4lGn+whhwHTZGnWJTULdMoSTMvM0uZiGhljBrVIjkp9sNHR5Ow8uj7hd gkBmKRXC96ITBOAgbI5m7ve7nDr1FkB1lKLGgzGG0Uqm3odyUvmJmDP1B8Elnjax 2VgpXRCGbJLaq7hiOtbdywKBgQCogaaCYbOG6G9qi2Kqyq3qvi/KZVzF9wdAIO0s vQreSz7enw6054ctjkquGrxssfe6oQApZpTo/l5idH3wSwfYHh9Sk+XxotO9+TmM w8ltQPjmEcE9RwX/uoLIhSutu6Z+UKtOnr9RbQCe+vA4WsDrUhbtWbLDoFuLUXTB xqyRFQKBgQDA0pWlsy6Al/7cokmPuxIWX6Un+o0DQb0Hia+eXlFvEcuQUQ0W2rja JIgZvmKVu1V4yI8M9NRfC+E6QDsKNAsDTKBR0T5xWwhSeofP8+BUqEpbsTwgup+L DIoa/pu9VGGp17UcNUhNy9pQ38jHHw9TBtQ57HINmKyFNQzaYJchYw== -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 8388607 seed: w6uP8Tcg6K2QR905Rms8iXTlksL6OD1KOWBxTK7wxPI= zfec: required: 101 segmentSize: 131072 total: 255 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:CHK:yrznm2uuybfbda6jlj7tqs3yum:zy45lpc7ilfzgltxxqvy55wcokiay2fbbbv2wdvu6ubjluh4gf4a:101:256:8388609 format: kind: chk params: null sample: length: 8388609 seed: yPi3JHKKbWaEEG5eZOlM6BHJll0Z3UTdBzz4bPQ7wjg= zfec: required: 101 segmentSize: 131072 total: 256 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:SSK:vivq5nfnxznyjdxt47o56uef4m:t74laopuqbdodc46r2xm36sffbonespuirkipsudey2qyza4cdda format: kind: ssk params: format: sdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEowIBAAKCAQEA3IYI6nlVJ1Jjks7Ovoh6Bn7aWEoZm+fZD2ArQQGdi7RWHGDD hG6T1e3m8cTx8uJdVDYT9FfMH/Q6ux9buLsk+CYii6ftGeXiie5wiIZRCN8hTFbg CcDv9SA6sF4CRo5V2MSnNN1tTu+WHVE0G5T9n1YSE4kPvufq9tBaWda0dqEAXMXc zv5nWMCyJHB+zrR8GbIyRFgQGFW5xmXzXgBIrNFAoEArrinzjje3AJTRGG18i25J HVAYGXFbhkZQIl1D+hBDq8Cncux8z9VkTPk9t0P4MH3DAYQcxyA3oSu7XykYwNml cMimZpdBAzKxozLD2Kw61jAJMTuJrfXRAMFw7QIDAQABAoIBACzebrj6h/twtYbc 4k79KMrii52UMiK5KT6KJDLdV8dhoXWzsIRlFVpdRfSiTFJNgYzNVgEMziEgScTi DAEJvutoovXEbKcs0YucArSck6dY8wb71CjX41r+PEK/VfoyIsBwvs6wUPnTILmF WLNV/MarNhFYfWYr/PBME14dI+nQ1oFNYm6XSZ1XHmWt+tyKt2quvtQUwCRoo+HV D2zymB/238OcxOEK9w4cWC7Rt/x9+C/Sij6eNSrDsJ9UqCeQFzWwd3QAcu1zaeAf r2GDG0eFQ9Ylepg5dempFTzzPs6Wg0UJ4z9ZDd5vazGHCoxg05/JpfKZ0khBkdbQ Z+8ANZECgYEA/fptrH82rIyYFFxJHtAQn3bO8o8lMwYxMVmvLVMw5EVSPMyqOoWK idf9s9e/FifdjzhtefIng9tkiL1eAWom++KrCQZfp9UOa0/Qo7jrWQunZd4HtLlv C5uGHPxqivLMorCqAbdx1v9CKMNpdeRojOFYhI3qm86BNXpCDxv9iKkCgYEA3kdu OVDtiW0zBGuBTJM+QWt39+tE7MWkPoCHGWaz11pksI3Wa3ShwNUUior8+a9zs7u3 TqR0VIYQ1BIYft1gEjS3rGzrJDD/MFT0oCYJ3Zfk4oFu8qHQbjGGVO3VV6r4aXB4 5RSEBxQ3pFwKnxQ0o/tJRfK6WOi+m7o/t86C/KUCgYEA0QkxA2yg31vIP9nFBOtT AyySH+nZQCm0i125ZKC3+OllSk1ZPllzMQjo8wB7cgzVum9DC79W7pvAHxtdJ+Tq uR5Sj1cDm+srtv82RcqJSfzhhmI8DW8iCney5mCKgFpeOvkUs9z8gWwOU+aiAjpA ItPGOzNjCWHpzs4VWMI85iECgYBDU+mcLNo2dUAtx457rmH+GNpW2wmemmMcl3vU gtpYkcXMALqBA+v259P3/w+PZcirGWH1zTR7Ybx5MB4BV3bBLPyxmrBC3yB8+E68 r6jvWRH4VfJQRhlHN3MUCJJFosDp1yqXYPZ42nPcMhD5jHpBbV0Nde9h/OW3b9vb Bg+BDQKBgGkKUXDFHqCT5w5I6MHReATnuENJmRTUGGCZa/tfN2GDDZA2/HRV3Rr+ pk48N7BZH8gst8hUwI+2iTuWnm1zenuKkiLbF3J9kzpyj4ujagXOWpGgiE+r2hPP ltoNSOSWFBnJEiLw4CbUInwfmwagHvThaHN97eoLDbGuVOFviM7f -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 8388609 seed: yPi3JHKKbWaEEG5eZOlM6BHJll0Z3UTdBzz4bPQ7wjg= zfec: required: 101 segmentSize: 131072 total: 255 - convergence: YWFhYWFhYWFhYWFhYWFhYQ== expected: URI:MDMF:f75y3bpbubssf334zpkgvuvbgi:nsiebimpy75qg57jqp7266brgjsrbloegjm6eepbnpb2bxltuf3q format: kind: ssk params: format: mdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEpAIBAAKCAQEAqsL1Nb01oNkFMGk4oZbitKIILdWtFu4Awk06LC6cLbZSJm/q drIYi/43UWHFpww0UI8E4BZiG1+AUmap2fsYK0vrsQQmiORhxwM7F6TeWvfa+3L7 1XXAnjSFnJXMNrozxYAprhapt4zKLS1xzDWXlqH9hS4dbEercfdui0XeC4xGpGpB Rw7CSY7JJf/RG6XCbeI2T1lwkUhNeajy/xsWihhTdG4zUsrvFzu3eJfH9kgYs/M7 eUK+cvF0K6YgoFwzg3MdbQXMSuGUbNz1VJjWZK8hDrbNbY6vufP0sqmmRWnd2a+S gSAXl53o/y626ILVy02rCG7Q5jtNLCgZpOW/HQIDAQABAoIBABbIfD73R7h2PCMB ZvToVMcU91JmN/nfN2q1MxXCAkR3Fu7Z78Z2bKABAxBwoxZuomw9KMFdOym7zDsy R2c2ATuFnaS4kQuirQkIfVHiRWiNuUHjTYZld5WkHE+QDPcgUNgBCY6Yp3w95Juy fIRqghcu9cxXIsXXqiBCixU59S5FCRsuoCtVvKrZrD+9NzwC+mOPkNf4uHnsYP/g 5ZjAqJNQWbRqaslq8R7vJ6lLzVgjGBLFlaJKepv5cOMMpxxlj12j3gg2DWNSHbZe s3Hv9NyWhje5sI+96Stowc5oiuzfbiDvNsD023Mz+9rfheZdxd4l2ELrU9BVdkA4 HxJtOWECgYEA3/evZTGPlmpPCoy4URtj7j0fpxYVxCPlEHjpdghEywqA8EMz0MZa 2JHxWZKmeA79wWCbb9qvupzMiys41bK4qXsWnjg7ESNTD+HhJtpNXNkpSs+zZcC9 pLjABEEsfvWs6/laclbrqI3IhXKQRCjBtfAga562PkqG/v9aypm0cSMCgYEAwy8y cmT1N5DVq5X1Fd1LPpFvSIDjoZNsSYaqP40FkWnIMSoUswHY8XWW/0LTXz8b9bgg CijCavAGFdcK26TlAo+k/zQ0MwinjH348VuDQPcp8P+LAjeEqUwYngXqBpi+uw8F CEUIvX/lV98y627XfEHjv/nk4H7bsgUIWjYpsr8CgYEAn6syzc/RcAiGJR1BYgFG 8td8s1/ZUKXObjnlJpKqiJ4KYj9mt1ZR+cfB6nvUVg9J9Qzsg4fCdCXI5QaBVEg/ wgPQkifAZG1skAwWud4z/ReMipscaFRKXx6fNelI0ZJQH0L7qjwxcU7zP7/2/cCY qR5x3oedoTb8mtptXbbKn0UCgYEAuoC4tXIeli/A26n4fCHuKiURrrfpypRxnngc 6Yi4z0/CyKerC7kyMNbpp5OVIafN8ac0hkCYNVKQngHTEDmp0h6rzGd3kWQtpSMh 4o5NBqCl5PBpRX8DNjnONAD2s8L0TQ13A4Xjah9xZ9uQbkKFiOf01ZXUy2asSphg eMLUaUUCgYA/J5+yMOFTta88jkSCFcdlSH5WqwSkU7qvkrHwO/rEPOITpP+UbkKr fWj3UwDW1MPx4HFrhuDpCaUX3dq9iAyXe/6FemPHvMR6YMC1CqimT/XoJtUSruIY SCXiDkDZcPwqOzGy/ZJJj6cXRXbzwMxnxbPSt/Se1RHJeY7aTobIhA== -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 8388609 seed: yPi3JHKKbWaEEG5eZOlM6BHJll0Z3UTdBzz4bPQ7wjg= zfec: required: 101 segmentSize: 131072 total: 255 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:CHK:td223psbzins2k6m4frmfw26xy:opgmb6zhnwsksydgjwzpfdoz7epm4ynzmvkjuw6s2jntioqk72ga:101:256:56 format: kind: chk params: null sample: length: 56 seed: YQ== zfec: required: 101 segmentSize: 131072 total: 256 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:SSK:trfwvi5t2nxr6ag7n37ziyiaui:gpom7gv2rn7pfsoqr2bdyr36au4sac46d4cbxn626jsc2ls2tboa format: kind: ssk params: format: sdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEoQIBAAKCAQEA24TZYXTTI5iKyFpzzyXUWZgltXj+oYogvqnh1Q7z/Yn2qxno 7r+lssZobVMFPbLaKVHZwoY+zzzNu39/Ahif12f/c89Jk133m6xV0QNnwiuP7q7/ g4hXjAiRe99xnjf0lU1AsuIFrErhoCHthj+Q4ejI4kl6vorfOJRLMdeeWEDhP+bM O0r6ElNwg289MaedCJUl4xMHeclfTVxtZ5qJp16U6H6iey/YF/6vN4tN18mjYygY vOk1pOLRHuWjVy6K4iOyJpPDcXzARLazAQQy/5SFMk+g02EIvEGqjpoQY9F5tFCa 0bullE/52NzDoi/5lS2iV9E64nK342s2giEwpwIDAQABAoH/IJWXrt+od6As+ZBz oEv9OU9cSZOsOE5IjgSpgPa3QOs5siwmZ0oLTn4lAhVQsdfaikecC0PiDuD3qN1D /Quqrk7BnG8ofLd2CaWLF3tp62iL9OIFC4ExfZfIAJsqQlIL/B17fRIPxYNX+m0O 1N6aou3q46wEjEQQ3lUk1EEtP+wbIKfC+9el4jmImdroBiV+hQYgzYCKvC7ZysBF wVPWZ9w+BecTwqwFelkvNfN9FDaN0TawGuTRYg2SsDbTo8NfddmoluqOryGEkfJj xUWtpt9GGeERBXytKY4oyEMLN+xglSvUkTdgREHyKvJdIJUI0+nVD2FfXeoB7YfR uxLhAoGBAPDXyXTSgs85qUSn0EwUlvLkqXX4CSr3kPudAErRe2vnB6GzIWLoixxd coFvLgSCYTzlQ3TXfHpykSeNmadavsDFuaCES95OwZOl9xYHLJzdFwVv/0jv9Flo yzOyajThxBLV5k/vnoR5igK10I7YsT2tTHoPI/DA/+gq7VoA/IuZAoGBAOlVgyPy L7Ntu5IVokLFIWJI2dmBvW8fBUjo7+lH+XBkDzdczxlr6ZJMRFksgP8Gk0tXCah8 c/yzMEZB43Y71MHeR5Vog4mDJZpsdcdM9j8RTteEpssVlyQ29EwqMQduECAXWglG 2iSWgSoqgeNyR7/hfmUYFiomQSGgp/xtp0Y/AoGAaBD1rZLgjuYda9sPODCVYPLI /n5kh7pdXTtjyvBlYiR7ubULMg/FPEZsmd0Oh0hG9+cglLYfxVEHw4193UBquCU3 plJD7hUds8y8zTngXw9xSRoxtrRoYtHTK81l8t+yt2jRkay6VAeoSK+DJJYhT8M4 Dm3IW9kpOoqB8KgId9ECgYATNxSOoEInX5EDzb5IC13dbyxpihKklQRlZbFkH6Y+ CC9smrr/V/CrOJakVVLmLY9xs+A6vMz8cXE3R/PIZ9L0iC6S8kFq0J8HIYlteTwK I42/l8/4h3Wj6NajcxIIj5rKWcHzY59RRgerBkceCOo5tgMnph0lKXNRpp5O3mTZ FwKBgQDBC9od6eTL5KYL/PU0S3im4HA0Hy4530WHS5TEJVsuv0Jnc6vNayimMnGV jhauS2BivD4GSe2wdI0MUtGUW8XN8qf1jNBY07vzEn9Ag1k9Rd3F/K5blYU+B27W XZMNa+DYA2bZN0rDnX3Y6otwPRuWHS/Bnt34oORhcle8AD4kNQ== -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 56 seed: YQ== zfec: required: 101 segmentSize: 131072 total: 255 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:MDMF:civy35latqkoitmqun3f4vlg2a:5fxqze6lzmdznzaxvpk6dmopps4ixesnjkp6m2vpjeh2l5imgpea format: kind: ssk params: format: mdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEogIBAAKCAQEAtzVd7X2GXo1jANrdYD8jCHkl1Zt7fO/TDWahvNbBHQYcm6Uv UI6l3ZLTW/nRLJ/ZvB9ymauAQ3cAs5C1Ok3S66YhlYqbvCmRHYbvBiKIun0nElxG IP5Ox0y++xM0MCWctRKi7WhidQqywivNBZglERI5L4/O1/TipcxEFedLTQY9UVl4 dEXrw2zYw76PMlReg88Q9jJcHQRcAJqwaRvkOTZ2qjtLnYk8tJfBEqHG44pKVzvL sLFFZLNgl9cCq/xHLPpCfpwpm98KR8uFfYyudX2fG7TY4i9mCS0SgcKBJiNAV6Aa IkQHrMs1PQJe4/ychz73SkjKRvoOreIVcESHYQIDAQABAoIBABiRSThq2/0raAKK FuQMa2H8PujTSf6xuUNDhz5HrQs7kdQEVWEv08mv4fRkPlrFz8CUlf1J3HAPkfJC Xi8ElxtfAoNnXCViDJHhUYWo1V1uoXHqmkPb3kwG/Fg2Vcn8DTTR3DPKSuOnjNuR XJOauKO+phj854+ZiNgTWXD3fGdGS9G8WVmGWy1yrJaL7TgjjrfSBFOAN0yyy1qO k6WUFy/tF74UiAT10vMnkfzMMtaAOt7BL5siGOfndpH40hM1231lf39oWp+DDxOu /yt7MysTvHIWB4L76k/s1XehAtEPCjrwF2ucqHlh4Dv4lCLIJTy6hbtd9VnDE/ZM RGSBZI0CgYEA7oQCFLKAUdcmpsmjF8Nb+CsuwhmbOLbfXdaz+gG1pfqMxf4kw7NG Af5Z6HgnheXRg6CX0QPv7qPe/V66Fm1WhD79Tja0OWJ742e9h+P6WoBVj92xKzVC +0CHh6HyQLVBfkulMVNu/EBchrKlW9V2y2ndE9Y+gl1DRQaCI1g4DrUCgYEAxKN2 XeI3a4oYtT53nGW0zQwALWBfVNWznjx88Ywlof31GFF8gRHgXYtiPzeN+mOjQMhG 6d1q/V5PJguMw+2cQIPw/+CWQA7s4dwRqk3Jc6Zr4V40Gpdn7vUnGbTAw3b7QLJR BW/Fp3/331v1LPbJziWvIJe51SZQOPDIUcq+lX0CgYAgiFrsTciY4RrBhyE6vYfO 2rz+9pUocDEZUI6t3AvVvs3yt452Lv6uiO3kencRmV4xcPckKEBSsYFZ19DT/Eff s+PDBk0gwqEZTG4amers6zJAdEGVHiers4qI4nrzfoWXX2QBzVqHB5RXPwi09PHG HwNrkD5oc6YYRSH9BixnPQKBgCPFYLj/d/l2K7x82qF21wceEcIvb+gs3/n/IvOF /SqU2ktMN4v7Rod93aeGYauVCJO2W0Ab6WSiDV/sZfUWeoA6AFNr9ak9jdYghI4o jGcfdSyQSIY12NBdhHlmqlJkiJxiU97bUGeCHgNh5R4C3v9DV7JkJ4gg6iMRmj9W 1Ii5AoGAUzrIstKLKfJBPXp4UP5LTglZSGCGqDj62/iBAdIZ4xz8703g/riq0h/d uAZipYubDV8U0WipfEhjUw2NTY74FzFRnl3Ya31Ms1gOXwuc+EagS/M+ohmb6eK3 HVaVQohUL//L0C+jqlxfjl6sQxUF0IizsR5XPm9VkFSJqEVYb/A= -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 56 seed: YQ== zfec: required: 101 segmentSize: 131072 total: 255 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:CHK:ihyeqq5peg2pdyw45ct5mmj4ay:2rzhndv6o4knlvo4po6rvbnknlcitasseea3mirqjor5c7quh5ra:101:256:1024 format: kind: chk params: null sample: length: 1024 seed: YQ== zfec: required: 101 segmentSize: 131072 total: 256 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:SSK:vwfthgrx5rep4ixdssp2psxjja:vs6adgwkfwnganksm6w42saotjlebku2jni4y23uuczi3q23i6iq format: kind: ssk params: format: sdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEpAIBAAKCAQEA8TipW7unNnPRZ6aycrHuDFicLcuI+QXAG9F393JiO87SnmZm zjI2FWGFDV3XpTb8+6QA44n9TTWiAT62b9kljU7s9Wf4dBfaJlYIrC5HIS209i19 tpIoQgdh31TYqXy+1o8oJDhTBD6mUJ3Lotj8oAKqUaVTKqJnfNSzPMdChYHYvE1c /Tr4c7gpxXMXEMTD311HtYZd3YnZ4dZryWfyrdf5/eQfrUFeY2MuPdl1ag57+2yN 4mXynTGHmENgG0isd2OKgyBmlHhNPq1lNsxUrQda9ht0dcwYPvLL5qNnVc2z3e5+ RH/2lBzIh9QTjpo+pdMxqlQR6od4jfZdrFZSDwIDAQABAoIBAFnzVaAZ+VAiXy+G J1wCwrCC6HZhRCoMPWeCNHinBD+mL78Wk3aHnchaTam+2TfIKg1SSmyPG9BLVCaf sptBv7GSgWU/yJPOAzCxe1lthmO2bhkwvIS0uuNoalRECOkm1ekfiAn9oONf01gT h4ip3oZyh/2bJ0iqN+oCTPY4nbM3j3Hx7Q7NI0AJ+rutEhM6Ina7RjE/VmDSmM1a ICGCXmae7Zg20bM8rg75BZ8Jf/td+5lvtfaEK/070X4AGPri1lAJ3vdlHjQbvJJw atSA9Ky1Qpe1t+rPjMBEAdUxp3bzinMGRwt/tSj3JNXKfyp6JMO5zM/dOknT4C99 WkM2O6UCgYEA83SKCn/lZhGBZD4thyRHRCcyVeO3GxlIO9mjt1f7JXiay3hOlqOu x/sT/w/Hdn+qSD8F3l+IFYP8X0hToN8oslu140qPS0RPuOyIFigm7jw++VqnZ85i OhZ/KuHsfnlxPmc+vS4Qh3MkDiqG3xfLVH9jUmCXDhQG5UhOMgOsgBUCgYEA/aan lBQA2jyoXiwnlE9P0HF6ZSbxHZaUQrw8xDuufjQcBR3mmAg7p8Yb/EOnQKTjage3 EDzNJnqPxtYjubxKtBu/7rjvaMkvavfH6JAN/Rl3fkibOcKbkoSa7mGmuUE6P2UR 6tV9vd1ITN359AE4VdUEapzanyuAjIhc8ehxLpMCgYAsMOM9tKl3NYY/I+ovta4Z +ONyI7uA973c30yQYy/7RUET3eql/WAkfLbMfZi/Mb0/D/GIw953yVVuFjrX4KoK dgs2Drqj9uphrs2k9/TZGaZ0rLfmZ9f8o0jCB/BdpL2hjiwdOtdVPtk0mROSO0d1 NwpYUaAZthjqVY2cFn6hYQKBgQC/hTJLML9kCSDn2lcYOLp/HO/ZqImuWaAgs5j+ YkHisN3nTyhp6u2ARKmk1EBZIydDTAgBjqcoQqqE6/OVroKJc9p8Gc9LQ302O1kK VJr7XFtJUvFBr5tgChghnkIQ5xtf+qSIuCJ1Vbvdrk2o27L5vBnVlhHM1T/+3Iex cFzlWQKBgQCsutY68bH2Wx5E2iiO/Eay9XQRky8HRvXAZDZSG0Bfn7dTcF5EddWk vhRSjEpU/6wrHcVJb1clP99uIpzgK4QG7t6mZNZ/StqCHTtyNisaungfz/B3k+li HMntS+IYxlYIsxBNLIPJLqNPBqAlQb9SJkoO1noTR/+4OQlSb5KNpA== -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 1024 seed: YQ== zfec: required: 101 segmentSize: 131072 total: 255 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:MDMF:2o3r63lhfs3qvi7r54gawvyuca:cl62d7ozemosy26dkj3xuvdjrzuf554mbqyawhvi4jj53ddqrisq format: kind: ssk params: format: mdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEpAIBAAKCAQEAjNzgfrhm9Cs93A9LJ9yUmKkTL4JBhDUXXT/O09oWWn3wpq61 Ouf2NcMGwMC+1tsNWA/QQYwU66JE1s+iow3k5a2jd+Ov+pRZJa8R1HO21iIERvjF qJOvcxa+PBL37dZcJP+ubnhK6O3sIdgvZRy8NOM0iscXoDgJZWJnWlIIcQIVy2w7 32596JhdXHH1eAd4WH5NtjCQQId5/29OPPUlo3CscH18gymseagJ1Al7ixSRnkp4 AnyHxk1I1aelpzRIVSJZPWrfFp1eIm3KPha2SPvcHGugGk+faTuJ7HWFHN8pLQcy qTXCSMjw4tsHKEbDvb7A3LCnAtTi5ovuYHt06wIDAQABAoIBAD/J7me0PfsocdTr oA8nFqujNSr4g47JNBFoSdMqGaFVEtuIlk1cqeRisvYq0sEdZYeRca+dLgQe8amN UYshSZyw6yvpkdGZyF0GUL6ywANsWB+DnI7ggj1N+UvfEyNDRWsD1gv0sYeV5q7U 5XGWd6xDj5Gg4xQNDEQ7Ma53I5d4vD9QgQpIHBrpTuTlz3nowkGIzXnpVQ+54Oae lPb8oBmLBagIHVzWY7Bh64oFh/kr6o9db/QkGIz0h1/UdMhj6HU0+kOUG/pSWHbk 5fRoHL7mbXgNzKZaGPOZbATzgXcha5uyWDs0r8WekKrNsUvm7E5vOE9UPdLCnM0O 7YHe1+ECgYEAuwDpwl+IzdAtjPNQUtsOBKuDD6/VempGKX0oyTwf4SvY8VmU0Juv AhHIq3UyPdJpBLY0Be0UIvbQWNXdmRDnriADLTq7g2QYDwVOI/bmRxYUI85FXjIC RT2u8nQWAzFfOhURsF23PUwB/rdjtBSvAdfNSletKp8abFApFonhuNMCgYEAwNXS BQ9nw20GeqZnYZOASnD4a+iW45coNSkSGETGO1J917xWtXzvGAAAqlSv/kNXYYqr RA8yWuYc9nLBx7623cpjt9CH2vXefodDLlUB4QSKEuP4f9/ttsw/L5xmJVdQ6pTL MNpU3XgtNl8qsOVAokOORpbY5f8leuuloylaxIkCgYAhSyRTKtccbXfupFMkrUNt qWuIG3ISfWFIebQNP9sdJ8VUEvLfwRgDck8b152+S/vOjvHsLC1tnCuz5T+yxMO6 yJBIOTCxT9zIr9UdqhONjGzBgzPudVDaKwU+vVQ99UhS+vVPRSAela21P8lMgnI2 DcnK9pkqAXGe3xaxoJLDaQKBgQCoASJLelKC9xfv/86OOr5JHQeyrB/aBbXoKvIy 5qh2wrYVIWfCEykUFdx+ie4TboRQ3Um9sCfE/js5lF20MzqLHWunmCzk3dWNEze6 xCEw9I1/S9MTRfuLiYN7bZ2o5tv+pMgqte2+TpfFiUBegj2/oW/xnDc4mwUChQ/4 iW0lUQKBgQColYu/R9ympb0+hMYMtTDrJRuREuyNCB6429vX5OG/RIxfduxCvVmI SASInZIbWFSqH+zt8l+L2BEi7fzpGSk3I5/9o0obRRM+9HnJD0c44P7Bg5IIsFv4 aIg0X4KaSZdNaLndO5ieyffsY+dEH0pSg3GRSrSn9inqiogQZB50pA== -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 1024 seed: YQ== zfec: required: 101 segmentSize: 131072 total: 255 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:CHK:mvk5f2mghudk7xnknqddhvnl4m:c74myeugtix7e7l6uwybtrdtfselha2qrcc7hhbymqr6brtg4cea:101:256:4096 format: kind: chk params: null sample: length: 4096 seed: Yw== zfec: required: 101 segmentSize: 131072 total: 256 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:SSK:7adguvusvvbmctuzlf4g3arfv4:tkh7d36kwo7aexp5kcxl5qlhcahkaox56wgnaoz3ezeciurh5qua format: kind: ssk params: format: sdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEogIBAAKCAQEAmvahwGh2jLEIK2xDDGU7d3aleXiC23PzGCDVbngFhe61hfAG ViA5s4fvMo7cgz/GGc1acIPKaLiZESPViz2P1QPNNMCFvTgKkYdRzE9JWks4Vzx8 cv3V8x6fwgzPeuGij0y/20AosukVws+9QyAvEXleO0BtDnddPhPPfebe0JOr64BE 1ky8r4VE3wIMKdxigyqWvCuIrSolLDpiCaj5Puw5KwMN17+KMuyIkARse0PFGboK PB+n99MyxpMZZWeDesrOF56HT0Wl18AdEfqDs/M3OqSobMy4SCmc91CjkXEnw2dz GVMJbyutQn4/5vrLVZb/Y8I/y/gzZ6YQOCNtZwIDAQABAoIBAEPv9KIiKjcsNeSz pgF9MEEDpzBGATis8NqXKnsv61v4d2StAlon7qQi6F9F+q8f+n29ZfUGEmsu4wx8 pVZSOwisjf6emQOH2jpLFTV5XTNU3vJ/9h+D4ZSgzHGKpDu/SEGC6Gn7CtzFC2FJ KjSPm5MRnppjeGxrMFnS3ZjY6r6OEFe7wRVjH2wOYQqHOqPOUpqWZJ1bktz3DzkP tK1ZQ+rxu27kpFbxeqLohOLW3JTrgphxPAgq8VltL0Z1wC9Umd7FjKb0irkcFgNm 5ZtQJdHwXHuEyuYGO+zI601vw9XsBnMpl6NOv9nvhX9OFEDaft7RnU/2GuX900y0 dG6zWUECgYEAvB00OoqIKnXkBHRJDGlL3degn6oYZlrQOjzQpX80HBA6/hmC4yy/ oll+pPlppmtuHlzniAQXcUsEtoaP5F/AXXDALKn9Ebk19VZUUBKWrMBE3jcV4ULT AdQhzfMExZC63TNKmn9v2naKzg1HtwX9Ms0ze+jKVxLDIGWxg6fy7M8CgYEA0uLT G3/xSrQbhYzBk+pBIIzPAZ9HDySJfrtje+qqmO40k9dfIrA482SlPUN1m0vQvMNq yvuPhiX1gSvnEFbVSpNUsezIU8j3JDtzxmuTljYqURE7ZSpSXe7WYCgEAZeBQY2n Ncp6jYRptOn7O/E3wyVptrdY5RZc8Y4XwD+TC+kCgYBSZuiODEkBcIrleJrXGPjm wKHXzwbJL1avbBxpooMNF/7/d+Vh5iQ71cAoPCkPgVfHbSLu7fvm4Nm7qs41V8xI Ii/MYNo+fUcppRthyALAwahpPvASsNcFogr80Etyz6dLZkBz1QcGR48eG6sifTkg m8rFqH+aDNn0wxczeMps+QKBgHEJE47R7UvVbksPP1NBZNdFok+ESFpdgzViy9hH 2FlQlO4Jqvy06FHNyKQl3Iv4/1GujTdvz2ZgQk+ScK/ZW0o13lfgSyBdv9qz40Kf tuP09Imvat626J9gvZec20jfJHE2tEGo3jesmdxW7ksa6IC5NQizDfr9GaSAPUrW yMLBAoGAHI8TJFiri+YF0HonCiJvQzSb1dX4CQ8zWoLb8bgRN204Opx2jF6fmGER B0RkYizKEjWwpbTAaTiYvMsjOn7eClmsyj2V/03cQ7mppZbFo+TmxhunYrfiXKpe AiQTdpujt6/dkc7b/LlDVjZ1eNRCBy/ChJW85mfpogC794xyiWg= -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 4096 seed: Yw== zfec: required: 101 segmentSize: 131072 total: 255 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:MDMF:vd6qxch4fcvs2szi2saiklzoky:btq6xrsirtyy6ugjjgpaguaajd6zopupixy5e3ngo7odya3i33iq format: kind: ssk params: format: mdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEpAIBAAKCAQEAseah1KDj5lbm5ULH1Tkdtp2aNSeWxxYzQ7ifXjfJGDx5yh7j 9aGPRxmB4tmdABQUfNY58aoaYoJo/JWOV5OAq68RdGGHx3o3Pa4gjpDMdIq52biJ lNnAGh20TJcd8lirD7RRtEoBpzQ/mWDBKiWPxm24imUUlaRzEs4BdzjaK9mASuu+ etKDvoLy8ajTGtomcU4gKAwbLYoMJwsG5hGicJlnVJ53PTjfsH0egpLW6vUfglEH tgaNaGfPtPvj5uJMxmm1vrXUufLok1dUxHaFPHzX4j16v8FUCZWoP5QEKPASh4Vq 5/BnFdWuQP46C/Wbwbn0Q3F2WqwnCWVuvXe68wIDAQABAoIBAB73PaEEtlagNsWe N6lyLS1dxntNHk4eG6NEjhz4ydyZnjtj4Bsf2ZAvLPAfH9hlJmHKakCZ8sjF2V9p 6uJsjt+TdA7VcSx0Jgxq9EjMhIIeqZXvrKcHtgv1sq4IOdK2w2PS58vhe+MuUYmx kT30Vs/bxlz0lj4r50nVKOUnNLaf9PwSSR3SoLAARLsvjpCSjs2QjxUDcnTmDp8U DAffOA9lGz8Ch8m3BR561h8h0+muLAXxJ6qx0cZ0+FqXof/FWo9MFMJy41+IkO6O YNRRD3bPQmbegvAx/yiKiVjfo1J5Z8bEgYNM8nKX5IUbdVd3sLaobEFXX0CAOzE6 6JA84cECgYEA8XRtvIkAgcp3D6bDCTymxwBX8t2ltCqYezjQif2Tfb3Nnoem5wxE CTJypmX0BqVYlYmb5bZ2BAi5A0ijRTwpZx9mnXB3HKmH6SX9Bnqlq8EiZMLeTYgd I0hHdd2P3y9vbu6MuZX2KYbXKun70Qb54lGfi7D3FWQ7e4V/IWkoYEECgYEAvJ4d CMDNiqQOHZXHLEJgUID3JlrIFEMrp7pc191pjg7hZ0pOoQzonGzfzkxxrtWGsA1O AyIxI5Wutoe/VPi4dpkM5oqNiYACUhwTCNmwoO64K95IUwyCsFJ/LXhhHVpu04GJ THB56kQ5HbyOoAZimlCrw2LxwOnicYQwaHrpDjMCgYANBDbKPCR/2rdSa64F+HQR NE6JdDNzo/w2YFi1p6rk02+bRTrVJ88fI84UdFiUZyOAZDu4RX7VNtcqeyb6G4Ur 3wB8KkzxiZ4fDoI2cDQwLyg4gFzVlyni9gmMLBaOdJMwSsHhW1k64d8FnDmMCjE8 ZyQPtsmLKK0gOpEg7vdTQQKBgQC3xtsFP05FqmkyfFAvGJFdfvrQfR17WKM9bsCt d0c0qd0HVghctQYj+5TpHeSac+QivyPmu7bjNCGiKYvMD/czXxaJvi//7CDWvhHx yqFlfJMn8xHHEWZ4xDi0JhmBjy5ymEEdoG25SzXXenQBCZejQbzJyCtDSt9euWyt MCzJrwKBgQDUR4StEeumfmLz6yGPxIucP6hxYBJPfS3i9dDV08rJkw+2Ubzk+oGV AU5NGUlSKdw/pbBRyXgICdWwB2I1NNv1qHiGoiaavK+/OmI89MiXhZ19OvrUc9P2 ZX+BDrxdjMWbm1uZxap0yMTAc5G3Y42M0HHFgM+fU2JZF/CDqDsW4A== -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 4096 seed: Yw== zfec: required: 101 segmentSize: 131072 total: 255 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:CHK:rvytwcc4sbmmkrdwh6b23ckv5u:klk5nqpbv37pr2qavcebcugfnikarjdefjlseo3ru5uv7guai22q:101:256:131071 format: kind: chk params: null sample: length: 131071 seed: LCa0a2j/xo/5m0U8HTBBNBNCLXBkg7+g+YpeiGJm564= zfec: required: 101 segmentSize: 131072 total: 256 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:SSK:2nkvk3zukvkwkomg7lcmwi4k2a:ulzy6ok2oaftysynb5uqactvbqa57ohasakq2qe3bd5j4jiyz5vq format: kind: ssk params: format: sdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEpAIBAAKCAQEArb0j+SDFBcKDajB2eSNb95aBayyfNNwnuw/flNkdxKlwDFEd k3JU+xAfc3+CdmMVlugQtXFNIAADiorcu/zCs1mh0Y1nLrrQGKLdiARPkUMavbgz Ubv/GAl20OMER3fg87SguoHJNvkeHOSSsd3wiczXkB4H8tUZJffGe4Syoxv5CLhY /9H1sd/Ducqjdf26RTbFDL/jLTPjmiOroZAv7p/yF3mVaXsQPgN1TOSJs71BmFKv SlLHwKjiFycMNbOoEGHL8iEUzWBU0HIEVm9ISQY91kOIb7SGN/PkOPQNWjyq5yeC XLTL5YQ2qIy3J2CjOTUR1h5gEuK5S1+ig1j05QIDAQABAoIBAA6tC3zf58S0yaUO svNIqVwguo3zFv/AGRsUHC7WqE0UgwKHV5g88DDFC+MVwk99zzUQJVkuWPV7CtGJ KVw33bqIt8KbzzuDTFDIcS4sLwx2PqwIA03EM6g0JHVAt/vRhI8RkwIuNHEQWhrW tA2SUd9SDmN+Je29UoKCi6Gjc/OTJXcc8hTHIoAcoMX23pNQWDjNEKXRrXs6F7Gs Q2yfauPXf2cM71cZCeV9hkqedjIln0R30SdtAS7I2IeRNTsB25w/S3zE7xZryZa9 +Jg++rW8hlbmJpm/00iIaJWhHIxbPO7tyQIOkr4PCFMvtHLUKnzCjmL3Gsd/BW+m +QAZ14ECgYEA7O1dRawkbzapYPUNed54lBWtIlaC/J5vpAgnelqbxDH9BIRhxQnV T7Up49feR5a+2x0QSgjsg2rUxF0t6vP3BAHfe9D0zGFU6YOa4HrQ5fH2/wXx2g9T aOBCtwOL/C/DUc3aMD4EOHLXxismQXXWxPk/+EBtLUA0XIRLZSnuj0cCgYEAu7mU XH7no73Xo8IqdJbUhfKfZSsIN1klCcEnnCZEuIi77x8ZvPM8pHyrJomFew5jsvZ+ EPGTvd77u01/79wLgKE94s8GvYnggWE1ItScBx3A/8PnQODSdEMsr4Tdm6Qx47oZ 5tgM1u7kYBwao3L9aguP9dChBn6e93mOqQuYqHMCgYEAkmkgcXSuUzeRNgRZHo13 L/OxOP4DFf8GeHQ9iSPDDFvjwk3YaT3pXsdSKqV0jALA0IDGVynqlk+HSg1W2dGH PSe3Jjl7fW1MXr1gEQZ0XxTGkNPon9tGrRGgyJ3dfKs7ZSrzgUphq0x0wNZbXqpm XPS2LkAJ96Osd9udB9gAvvMCgYAvtyIAyLj0I8L1+tpzvArU6TCetGtoNh519kSt KgT5qreqNguCvYjCfnW6W+YzuxqYWJL+l4joEA+IMlC8lP/PeCyUw+6AqtUHzb+F 1Oi73lI6MH9NPFgB+TkYe/sgHoIX0ivXQz8wOpSN4VbcCNRk6f9zic4EKpcZbCpY yXvKBQKBgQDHDB8/e4ZaiWY5VQmCk1GRZmNmuyII+vC0ZynlSUP6HqXn/Std/6Ze EEfnVCZHEvRkNwRYAWcaY54uvqj+yeLYFeIX8vi+rs68uZ4jKI3Hz8vn7rgzPulP Aa6fShcdGh8xhhdl3bM62/E87ACcr97QSvUsOuhIYwr098ivyyStCw== -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 131071 seed: LCa0a2j/xo/5m0U8HTBBNBNCLXBkg7+g+YpeiGJm564= zfec: required: 101 segmentSize: 131072 total: 255 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:MDMF:jnnfwffwn6qx6qtfhvk742p6g4:gzqrivbdeczwpgkwkjna2taooajonyb3h7aijcyhv7ptaxtycroa format: kind: ssk params: format: mdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEpAIBAAKCAQEAj/NZYu1pNcdeSCnt3N/BDg1jmU7jlbD1pgKaSXLAblkSAhD8 CE1VLCsWfNw+dbTJtvStGncEiw8gK/fQwSoQjxxP91qxD/6RpNAA7RamHol2bMiQ 0gtjT8H32LmS1TBYfszI3AD07+cFGAqbV+X6lobQKRK8U2eWD3x8V1pD9RHLJr6h nMPQ/epF1pxGf22+g7aauEEWPOStTUH3l8sf33Wg3WN2SkxDqSRyZXiETpta9Rdv oMjcsWHqGOTy9y94oG2B4AJHWrR1mefFN3Uu0ApfpThoUcVyI7k1XN/7y3HhEZru GJPaFafHjH5DZeHC6fxmJkC6vXSMLClNcfY/RQIDAQABAoIBAAmf2ddbK7hghM16 3jAxFqGHpCPdKiq5OudXT7T+8t493s4cEBnG/92U4OtGt3dbt8vfdo5pLDjW8U33 QvITS6mh2TfezK1W1iqIjLNNWxx6ENyrmUEt6T0tKRLIr4hI8/XAX+KTvsyma8kE doftNLCpQVQpsEU8TQRqjI6zlo0VQo0sPKZsvSfN6IOvPciLu039H7oqHxknHRy6 ACQIkCvAEH2U0OY3+ABE2y7Mn4/TW1eFsh/lp++FngmGgQcMIYZUR3TnOWx1wzwl IiPfFo1jVAXa70Up4yHe2Nz7zjNh/VCJCTMA66MbPczOjIbUtc/GFcCvDp3qCnCj nRjMDOkCgYEAw1kF9qQNgFlLTagCh+uVug1hvkZVPeDUd9MXtIn2TvjbdSkHilsn wrKguLRSgo/OKRj2J63xhvfgb91cMxsXb1+OHG+YNU2qhFhTq4k489rHID/iVskN Kr2GaAMGXQb+9t/2CqHCjkur5m5VxZSIM5OgRR/nLutM1NpHWG929r0CgYEAvKUV yqv6gYMnolZBcC9LhB7nZ8yIBtlmYjXRHUuh1Dotx6UItlA41k6GNhFiZXL4xPbf F69QzvV9Wuir9iW6b7tP2JUb1gnfgB+KH1F5PoI7hriUF88VIHWFeSg1ezwIMQvd 9IlJhlQTk2kvADo8jwCZahVGDwsQzsf/n2xw1ykCgYBau7axYHGE8/SuFSNXzmy1 BhIoNrLREuSc40dXa90jwSLtwCjocn59SEquf9LzIag4Hof21iwg7HEqhD6W3jZ8 XH29Z3fjCjfxULVML2hsm2lx6TpP5QJgn7cWCJGkE+PI9y1ossmTHkKxvP3Jz7uT eTYv5SmT+WauVtRclylCYQKBgQCsBZZTlHQA+gqAXEub82TXfB7kZnx8Um6sjAq+ viM6Fjt83J+PMKRDuKNmVn/1ptv3MG/Ld1EnCHFhHt8AvPK/xH1RMNeLXMF0Yk5f tLntKHEDrvlMpMfNK52lF+d9EwcdQocJ4M8tMSoQuE/l0zU56f/73p5eRWb0SShu xkI30QKBgQCw8SH8qMlso796eW6V4QKOD/zdfNTrZWfAK4BaFR5l4LuX7G25xZNL rNe7CbDlFzzp4ukkW6kcJrxR/4RLJA6nRDR86kvzl4A4fwrGKCv1vY5y27ioQmWM WAEYfQ+aorfZ9lbO1+QEZc7kjXlbyt2/SiZxOqzm4CutbetMlaG7nQ== -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 131071 seed: LCa0a2j/xo/5m0U8HTBBNBNCLXBkg7+g+YpeiGJm564= zfec: required: 101 segmentSize: 131072 total: 255 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:CHK:ocqx55dopakqnzyui6q6euz4gm:wklziyoctwrz5zjfg5wfejz4bnaagtg57fgjlspl4ba6ij6n2obq:101:256:131073 format: kind: chk params: null sample: length: 131073 seed: /N4rLtula/QIYB+3If6bXDONEO5CnqBPrlURto+/j7k= zfec: required: 101 segmentSize: 131072 total: 256 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:SSK:7u3yuuf5t6ixafggoo6rozlkca:c4vxsjiots2kqxh2xx2f7j6e2rgxtszskkvv2geby4f3flm4zcdq format: kind: ssk params: format: sdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEpAIBAAKCAQEAr1qCo/bz9q/oRev0Upfgc7gVB3b5/2e8jkhQ7hPyNL9xPzZF bih8Lbn1bX+1ZAR/cm8/Vtn58TTQTrJH66TCbkBYHryvodl2yA3gLExnfxoESpWu tNRF2VOab7/RMrmseicirS706Nxk99aMk0Mc0kJ/3DnEmDJUKjfq+/zjCPUmrfv0 9xEiHAKHPN6EncJxQbPqWW1y7QPoEKoK/vBYusYrXa7OdDRmK6cmKWgtmNCoEvGS IujkkeP3lCiqrVcofGGC00VizESa0q4Iyq8Fuz1LaDHNT5HO6EYPevNjW/hy89Z/ NZG4tOvodZsCsGqD5XlwyIrmjxrETiS2J49oPwIDAQABAoIBAB5GtuK/i1zu/2A/ PraaAYeJY5wf10dZbm10oACTUhD4cwGyiadc9x/gCTeoQrXrruOfwKRqy2RxtXu4 /YD6uBVYJ77a3kpIJWGiP6/2WzJPWClWkc0oD41YCYS87k5fT5/hrPOQ9XlVQFuo YCo2/r1w/OmV0dNjcTO+5uQuRDbx75xtgYgICfHMOkFnwqJWRsNPtvq1oMA9VVrI zu3GyKfKQXWMrJvhmE3s+HLpv3j3whh7W5yGw5Dum8bPxh7Atc+15mYr5mC9eoQh Wdu1dlY7lHX2uHJgcPwhLz53jQiqlLgZ+8nw8ZNvSeKqmqeR6nhrDaUb4idQBzBI aWiv670CgYEA5e5J8QQuiF76HZqDkAN19yvMue8n7vckKyoHuhtTL+JOFdPkcob8 nb+WZkxyce9MjrRzSnXmvn1+HcY/g0+hw4qzpcQFJbyjaPREGKH2WQmYuJTHTJ5f owfvfSJqK4p1tNqxAtCHHWJfOA6v9ulAhHZBF+2BoZgl1xZxOrGLXAUCgYEAwzwf slrlisyPa04r8TfgMmwt+Pc/4g5UFC5WK66rWKMCzs8p1uLLhg39QwnrmaRxa/O1 t7NKEVz1uOwikxmjJZCvnUgHrYVeCp0uVfzS6s2hY/iMh+36Ocy4L1/cBXEHXbdw 2TAkQwNYueexeO8dvBkuXll67PQaHsSfaRxfanMCgYEAsdIDpT3SruylClgA/1Nt 2+YnwnROseS4OBmdODUBtLqUIRVqS5hRrb4JlrvwlmS3FHZB44gjF5b9/hDf9bGU LSILpVtfj7u/tN+T+mjnmBxv2/BT4dFprS/p6yC+c0X1mhS3aLHUjMkTUsspEw95 Mfgyh0rLQinkud9FWlsMp/ECgYB5IpnsGPfpeejWxIcBQRELWBHiMs7hXNCQQPvY WKUZ9vKsDN/B47Ax+gYVDVewWdbCC1HJrCWdxlb0KRd+u959VVuRM/sHkAN8hHAW jCr14yZrF/Fh+adTK5FwW4LxoWLXpBURvQwSxEXN+1MjXQHPDrS1d8GMuhxm0Mqz 9hXBvwKBgQCpF+uROS92UluZoWsjLuc0Tz5YP5ykYzXM8e5FBD9dzs8DEudMyhe6 vsSmuxaSPNNEAKG26wO1+774W2JFJkRLeJOpeyX5nhEl1tKxi3ACe+cMQvd2/bHA blKtALR2ZvXcoU6QC8XVABQ8S3h6+wMk4i1DjJ58UgHZcoHrozlBiA== -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 131073 seed: /N4rLtula/QIYB+3If6bXDONEO5CnqBPrlURto+/j7k= zfec: required: 101 segmentSize: 131072 total: 255 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:MDMF:kwets5gntkkdhed4hwdsfv3ujm:ws5ltuz5fytzncl3xkfkqc2f2zrrdnxarr22rckmkoruglgyin4a format: kind: ssk params: format: mdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEpAIBAAKCAQEApvGy6mRElXz50jCzj5EW5hrMZo8fXQHgZghl+yaQ0NJoVagZ TOjtkuzVYnOlF/9QxOt+yAfuc+XjPn3DJGbnnIUi/8qOUI6CwQY3uUKBK8cSRMTw A/LihRrwM0R4Qws/QiBQhD5KmanfcMlu309Pg3enAeAVpt86I8mgMxE6f26iXgFj IaLFx+5el+PvPYqdOkcEDcL+HXVYDVEawzifkXGq1JtWWKnDhM3SsdmS32mTh2/6 RmocrD7o6LSdOhgbhNqWdOvRt/ZLZif4Mm7TqG2nHFJ8OYCkpHK4m+JpNOiKmM5W BFdipmKRo5jhGZWG7w8rWAqsNY6GMbcTXwgNoQIDAQABAoIBAAHqoq2Q8N4f+Qy+ ESOn7GHAI0JWqIskbT9yn3wYg19YWQkJtN+miWqvRBxdHEM4I8Tc+L/CYo0LUbZr EnFRqp1IBIC4AjX/ytW9NOjQMAQxBP9L3P8Im+vgBSurgK9xWryvOwlnnyrgMb/d WiPfaNfnKOBLQqhfpe7Y/tkzPI8PsPWrhXL1CZJ7o3zWjyn/l7TNvqB+i7AgcxUx cgIV4XxY/HKOHHDz2OsPxJk7G5/iLZbl0Toks1G28NP26rOP49UPBq1eycfnZ6Q2 8oywX+ivnK2AYZle1cMynCgIOdZNE+e7eb+bB539LjUtjmDSculRFvUn5AK7TV92 gZfCrMkCgYEAw2cjlJhfPg2d+bMGcRPDGLEfincev2r0rbok7beEra5OqNaH1CD9 +ohgZd/WgrNbHsF0aC3kh+6p/kUrunbIYy6ZcAQKiQ0vnOie/TuLcPHWocxSTrLh xm1qt/Y2KBeSaR2MyhQVChJSWosRJYj4e/ErfCr8aLiMLRgA3dGr9SkCgYEA2rc+ vTulxOHBKAnlBS7A2wnfaCdyYL/Pk2/Jec4yaSd+xsYgcIuH7p2EjO6xflU1oyZk U7b6YvbpU/rwkJksdFiDrMrN7EPJFynYK4g0CRPl4P59Rc+gvJqDsVG2Vh2/ijeg 0ybnNZFz4sKyKRVqI0Wu2ewTcDuduW4i5uemK7kCgYB4rcsoq44uycQmAa3ZykW0 izeakYT43TptzMef1LZpeXx1A8Fxfkq9HtrCMCLQJ6r/7KRS7vz0Aq8ULW4bQ97w ekgjCSvkhrNAKd5/MPYmdAWFeaXfmtSbctn08WdzDVPL/YcFCrAPv08DQl39m4Ez MrgTgIzQtCFGfEuUsziLOQKBgQDI3+noZMMACxObEVNdKi6IPg4Im8op36Dm2ZGi pGWaPGLsbwVWOGB1IAigY41y6RGlMVqNpI1cnUd5EQ0m0PeKN81fwrfUGgGzm4Pl n2ejOrozpagqmOIYtpTjI5giiZnkeOjlZWKOyXM0vfphT0C2+oX3siG8P5TBvMyj Y/gzSQKBgQC15p03OtcgZJpBwQ8bEGYVLu/1YiEQS2TdIKib+XOLwUfgWzr8KHsk 1rv9A3s7OxBZu4MD+iA0WyhcNSzexf6wEOvada9LHQl7eRmsW5OKDJYWyh5wo6PW CQLCE0HGlEZeBF6vexE+oD2/9T12/x/q0ylExTmXpWlkiydG2Vw55Q== -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 131073 seed: /N4rLtula/QIYB+3If6bXDONEO5CnqBPrlURto+/j7k= zfec: required: 101 segmentSize: 131072 total: 255 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:CHK:lf6qub3bkdhfozpdis4bmorj7q:zte3u3ohbuom3iqjjx4odrjylcpw5myw4n4kwf4zcaif36vngf6q:101:256:2097151 format: kind: chk params: null sample: length: 2097151 seed: uqWglk0zIPvAxqkiFARTyFE+okq4/QV3A0gEqWckgJY= zfec: required: 101 segmentSize: 131072 total: 256 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:SSK:m34dbj7wfsaq4efuz3viwht42u:66nxvmks4d4ebbj2c3behcobuk7w7y2i4yz43ethbflgqpgf6nha format: kind: ssk params: format: sdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEowIBAAKCAQEA79z0LJE/B4nEn+ehBk24vaDlLdj7N9ieX2njI0PrBfe1hiPk YOhLDWXh+112jXJ9wLBdv5UrJxeFw4hxBADycqPSkC9BVte646lCANS47G5sHibX vpmVoeuIV2GBA5OnWUV/5Z57JrNim3dcqJXtifmj8HmxuHOIBWxmKdl82jf9AXcX kk+F588b7YRjUNPNpv4PROeLpFu6xv7RzdyZJ/ByrSCU/WpW20OKCqNmBWRp0uV2 HS5Ii5NCql/Cl21O0ITD+XmKefD0gz+ek0XHWkRMb394Y6igeS+ZCJadMqD7FzBE Jm+n4JOCmJjicPRHPFzUWRsTOTAzTDZnzy6/MwIDAQABAoIBAGuQhQlVa1QIkp34 5Cus//s943h/dQ0Svdbg58ShSQyIjKVmhBx3H20XMtOkEq2U2dLm5GutS8hAkrJg hfn7KL6DO8KABoeYv80nUpuHyZPxYtfUqGxneIQ/2QkChzYg6WutsJC61NRCnqZE TU+myHrW8g89q5ahbK6t8VS0HPrIQZOuqmYiGM/seTLbsQVkppli6ZfCASMV1Wqo 0A3q1RiHlLY5UCMYUxdPDv1MrhkRqMZ1HScHtQuqRrpijXcfeHYQwU50CFgPs2nJ kk3Tm41jAFzYoBqe3E7p2R5Wlc7rbC43yalQ73JKRneS8IdAu75SWWcRH5+QASRa ojT2kS0CgYEA+AVYqwznAuhEp5x6w9uamy+/jwEOK1usM7MOo7PrPCu47z52t3Vb M9lgo8eY/PVn2jlgSMz48iZIE6fdr43xdtpxsZQZVu4bpXJ40B3YdNmob1easivx 4fPjZZ/OTb46veFN1GNsndyQ7IP49NkiyOStJUey3LDxJQiIP56lxJ0CgYEA95Rr 5S17yepS3dwkcr3H+zN7/UK1CnCzzxNKp7iTqK1YYXeOjNZb5s6qrtwAZx9GmRWi U3gEWTOcKaAQGPjHA+ohYU/qDpYbY+EtfHVDJqc+hMy648vyWfeyZuVghJxtSPxp jDJT/ACxnD6fqDzUXRtViBfaSia1VKtCipr7Ag8CgYAI7mhbAIPxHtwaDRB+rRHM NNP5GligRxTUZ8ZHLttxt0FZnC46PQejvlg0jaN8uHmc6iQFexwb3DUMQCdDgyEG 3qbpdiPTdY+ZTZ38IJcC3jOqjsULVXnIYTf3GOIc+pSy8cITu+DVbnPpkHcOmiMe iN2TUhmmyNhmNQBzCgt+IQKBgDO5coFC62XX6tAnOgYu2CUHMJRM533y5d4Rbbt0 uIS5EonqbIHIFxM1gjteA0eIJTu+ZVeC74WjXrDjm/lboFiVBbxK8d9yRO6tEM+7 v/fHYSxliXYmGc/qC/+rVGrgM4TYF0UPDrTLgE/gVYLUkpmRKGFyekyboa66yQAk OOTJAoGBAIm61KjWaZZlE5u4SwzYijI261G9M6pXbSChEjket+TOd2wHMRK5bBnx G0wESE41BeycrL44V4YWzJFCB2qB1efwnbNSeWkvxOsXMFok5IZxFOzKjPflcALA fhejJzLC8m3nOklf7hudeDOYUKDezMnrr/ZW5VFHgMAw3rqq6pF+ -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 2097151 seed: uqWglk0zIPvAxqkiFARTyFE+okq4/QV3A0gEqWckgJY= zfec: required: 101 segmentSize: 131072 total: 255 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:MDMF:36rzjuc6qwedeiybhkflvevttu:as44hfkawdkbcvicdc3nfynzgjtmznyihu5773wyll4rt64bbtvq format: kind: ssk params: format: mdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEowIBAAKCAQEAxts3c6ig0ClkvLx4Cl5tAT6xpfuw0NrZJ2RhFFBKz/ddOw+X QWp6zneMjN3j/vPZzJLq3ZECGqFdsl1o3qYSAtZFDLnyh2cewAiYttv9EyZLHeIF hSKqa8IegYtem3pNYzp7W8K4yxv8KIq3cO837BmeDaRNUVv88sO03AWFyb+4B+z9 mCJsHsHrfegoXCAKKSAILPTfHB0wbLEl/zh6u06yIcSRzSSGtbcun5qoCH5AGsf6 wOWDJfZdJXF35tQ8RZnV8edYMmH7KeNQkpiYiP6eGN1fYtLJJak10nvy51460lm4 t59VbMG5UVmsgSe56xNm808p5nQJ+XvclRcCFwIDAQABAoIBAAnCOUrs6Wi9wfVR 1HGXJ7H0xy1g7XuaZHy+kYBjM6tYCtRlU/lrBc17EsPY9yGMYFGzRgu13DdoAtvy zJIx7oFYIuRYfb1FA2kjeDHFGFAnxBjv03CF/nyyS+KfRHch0zQS6ySE06JtZFGt rl9ARhYekwySqxQFN85xaenq6swMjTpslHpkcNKLLBuCIXbQT/351JqW90p2VYVT 9n9fQ5teggKaTQYcDkYR7uPMUJRDQRczLP8xU9naqOki7Q+SAnmxbWOO3jyJIoQj mLqWzxV4l0Yts14WYDiw7SpNVdcN7qVcfVsn60Do6ekxnJ7E/d0GbRsrN/VdksDr RcjJv1ECgYEA5gBkKiDf3kvZT8R55g0kTuFgEPPLVcKxR3D4QfUitioMHKTe7dRA xs4N/0yzgurAx5FzJtMmcWFd7ypj1+UaNgR3Q1q+8n7mdpxEp1dnN5XTEa8sEAQx EF4MYV9RT8xnVYzmXtWKRq3UYhYM/rYcYlRs/SYaRRvD0Kd0oZxk4ecCgYEA3VWR pITCHne6zQwgj3hFyUsHCk59KeCbyiBP4BKYLWaFKfSj+bgyYATvuZtSGDBJUpyZ cFXw0IlLTgMGqGQvAMySXH3aDK21aGglkDxpDpIBywxW767wmvA/stP/T7IPLvtp 7ZC85Zt6v1LmAy8/xfZ9Rvu0qilGTzlFbuQXOFECgYEAgc44uZoCHphaFe3CCjbi he4mZIri+AzANpyoT7lElOCYI1ZdRoZi5JCIT8x/B2Tr1fXdsky6xoR4GjGnVcJE D7ZnhMjjOUKrWMeK65KlezaAf9uIF6X19tHNVOsRneKzcxHpNh54Qrl6Qr1FKj+n N0uEkz581wH7enf3l/oG6YMCgYAfz2uGNJpdnKGZVLPdStDk1EanwY4VlbVuQGSa dLGwXLqoxANJIaMDz9HQYDVVSqNPHziiP4fDwOe0x5SOYQ+sUrp6VpAfIFwhLE6x wyzqLivZzeU0v3TPH9ZX0kYwYwvxmaqovROZAFaM5tIuBP1qazmoGQbnKdV0D2we OuPncQKBgFGxH9re7e6aoCSxRnPwZgD2dbI/Ddpw7qmuRc1vSrwfQ0I+eNyIQzhv sT/Bkw0J2SY3IjLdFgPbqkKgXX045kT5acVJFns7dQG4lh+sayiUjM9dhiG8Dhfk U3gbMspIoeGx+zH7RX9NwDaUP8M7oeyZKBRZV0QX85hxwv0wNF2z -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 2097151 seed: uqWglk0zIPvAxqkiFARTyFE+okq4/QV3A0gEqWckgJY= zfec: required: 101 segmentSize: 131072 total: 255 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:CHK:a2qoww5xx6so5uftwg4msdtrpy:h7hwsjco5i3jkd7icytrtsuzcjvjzlnkr2wwau2tjlj5wehjrokq:101:256:2097153 format: kind: chk params: null sample: length: 2097153 seed: BTBX/ampNfLU+ox7xipBGiaSbgC0kcB8Gy7BkJB4oKI= zfec: required: 101 segmentSize: 131072 total: 256 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:SSK:xd45qxekarwrt6evpagw3yq6qa:56q3eead3qjwxvxxrzwwoleounmeneuw2o6mdcho2jqaghv7unra format: kind: ssk params: format: sdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEpAIBAAKCAQEAw0dwdT9MuyMSXaX2etcE8p/PwUd76I0p+mTR/0uiS/L5t1Dn wW6xgdXqMKKw1OAdfJ8ANrgLkoTpnl8gBlalR1OKG6r/yLhVFj1qoHfIq/iDdyH+ O+9P7deS0Huowz6WsOkN6HQ4vG4Kswj/SxHcbWuSmffb3otKkNr8zwRdfYDcCrv5 8PTE45SYFCU1x/ZFLCXb7y8pyPQ64h/M2MCNE3NvnxntkWMtxPDdc7577jPM+9Pg A4Yo0Oy+H65rN3x6Gpgtm9p0A9HuD++XFIs/vfAdnHBn9gdKegtUKxxlcQZuAm7L Oj4JpeckQj1DBcOz4V+uRnJVmjXz8GqPUjt4CwIDAQABAoIBADHXTv2t7VCqL+rV DCboMGwYm/cR3q9JMFinWO3XCRJnbpL6RFybexTPW1mUfJuoo/4FuxE8Qj/gSpxQ XSUhAkhFKy/KokRBv/3QKSHR0NcLs8o5U+FKQFVNOzyZA0bp954prZIR79UwJOzz M+2eHmuTFDTWDTksxhhYvEO+MnpwchZEUyUWGYwQu+bCIrGNI4SzfJhlRM7wBUy6 9iZ7e+hKsj3dwZVKxXNq2m7hhxZ2h9EqfRCq2LJjFiSto+zM4crDgQslxzntHdtO bKn5McyWtR2FbzDhz4/uPc2E91+JMjA9jIMZ24egXs9OSejN8rQvw04DojiCKMHp cDjagDkCgYEA3mF3bHymDPkc9EiDkr0vx7cR0O3+Caw17vVlQ5OtMhCFLhW7f2th 2m6e8yZfQA0B0srQM5rPgSE+9U/HvLwSQzpZJewfOevS1hohTvUaf+9CRfRTHxKv nsimR7SIfi3bcHUltPDiGKkqlmLNofN2zoB1A8SqblwnLHdfRP/kQI0CgYEA4M0W 7FoSWO7a/26MW0ibxhxXl8sfcgzIhDQa9P353g2HDIc2AQK4//xHQ/nXEcL1jOvZ I3IgjqjfIjW51HIwbjEz976QceIuJKv8xw2VBk265tnE9171KdpKpv9T7oooQVBg kW7F8leKTXIFoFH0Ug3xe//sHEFffddg7sux8PcCgYEA2xX/B/yNwz4xzmEabxi/ 1+x+Ou4dVv44bGGLEFaPTUGFU+/JNzFdyEsvgbGOKZYm87sn/49HW5qbYiblSwWm oGD9ryS/Ztr0bkZ0BkvnfZ8EFdMtiPFp3+8iEobD6jvXcyWWrnqa5VzUPjC9Eg7A P5XCsqGwnuVfGqnITDwmbYkCgYAJUddacx2BnF1t64pcGnWC0Bf9jgk+tDL38CUR 9RmP0CXCKjTd89vxmObndYsqDFgbwIdfBdM9ttiRVYLfwOArIVUTN05LumHJWWwA YJrGCSDvgyW8T66ATrF7nOhA4m9qzcdDIEGKm4B7V3fOPrePU06oomKKhVdNI0m8 fKti3wKBgQCRlS4RDAQXu+LbCwvGfiR5LRN/UzS2TwetqoRkrE99V9LvO24aFNJN HEtBxhnpo29SXStmRTiV6Dsyp9LjQxFsVlSsiHN+f5SwuKKNMVxZlRAxLx5uCE/v O6qsKi7zb3B/eTNLSKk+KjMM5PSpfgKnb30I6hHxUWH0cKULheK4UQ== -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 2097153 seed: BTBX/ampNfLU+ox7xipBGiaSbgC0kcB8Gy7BkJB4oKI= zfec: required: 101 segmentSize: 131072 total: 255 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:MDMF:xo543uh52xe7gpramuim6dsxt4:5zxemp7hunykp752nywtbfgqiacrxy4qzptesaaijjlhvwshrjja format: kind: ssk params: format: mdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEowIBAAKCAQEAlS1aW4nTOvDv3ObiFiccAmhSCUcQxJDTMW5o/ohlr4nofwm2 z9N3XJzlQ1z/O9zBUVbhPKvtqNxT7IhobM/YTzycvoutknDoNaSE3bkreh5++xhR F0hn0Q2he+KPY1KdxtktwTaROLl1rd/6qC1OR7VHAJF7qjhhsyTqw6aZFXjV2Eq2 lh2PYARfOsH2UxAY3ZP1hmXiCI5wNbsxXzhm8le07sJVQSo+kFbQ6alroy9DYQlK bnIaPy+WOMVzYOyZRiG5ZW6A4sNdSTAbJ/3dMf8zRumDCrrGpv+K+gsaThcaOVSJ jf414vtlzpih0Ot5etOhQ2UE6acx8+L2T3uTcQIDAQABAoIBACju8+9QR1zSBg9o bztC6gWjGHehP3Ggh8L1l+vYA4cCYYCSas5mKUeJacNtPj/v2D/4hf9+8cy3AHRU Dctl0OYVLLGAZFVdk+o8RZUNnWd5/L/rsTyhSpNrmRcEWPIZFmAc8dgln/2frjHS 1tXU8Ljufhgi03sm03AzvhOHoFPqk8WfslmsnpD36qmbGbV7AU1A0oLa5M73TkHu rY4u8q2vzDBRfnJ8hwkcyHzA+43uHf5uGeYieSZBxP+m0GzUChnYBkNGKmuHGqOC RPx5YAMfu/oECq6cRvuTLTGiHhDOi15ukh2HVwjGDZaj1qHMXXQJ/ghVcpZcDUNv 4H7vpI0CgYEAten7o2hbutKmuh6e/fr3TJrbD0jWajYAf0/Luo4rmF/VOskeUbiT OJ98NeDomiLDBAiRK1kaaxoBadMVN1lNQ5UiRW0srHWNsfQr/998+ZzlJCIWmQ0Z B4GOpUdRo2GwhgsWS3xIM+GS5iqTc9QyciPAMjqM1esCqkliMMhjXGsCgYEA0e5J uQE21G4RpV95hLpep8kBa1HwrIoFqOev8636/F+nMVR3X8RHy1Z02tEK1uXxKr0g 1n4KYWTURYRlvPehDunPhR6Wc3GAWOJ2gvcZSgPjKjzlaRPXDQDTGSJidd7/ClKh XzG0zdSFmvpA+tRSbbtGQQjSyhSvRN2Fip+JBpMCgYAD44mel6eGWeR4jBkIAupw d8sBC6SRxq/CCPmo9ksWSc4sIIqGYrS6/CXSnQk76kxS9L/ttkzrRzYKhhmpAj61 mCWQaGIRGb46tKaQJL3uNB1t5VCoWvBTCcD75YdoP7lfVDNYz8JXYZYbV4OpcTrW 187PBBNoq0p2S3VO56nAGwKBgQCVDZ+Cn/4SLmSRCoz5VGpIr0s2q+M6XnVOS9J+ LhV6g1/ugo6PjIl9MlGd27bahkEJm2dpY+xy4mhlQ3AJD7lnIVOarPEd3oTGl2SV 8GQgTUpJfxtT1CZosSExQ1ytXDuxVKIHOP+q9S43r1/buE0eZE2pd15S5QTc3Hwo xMVByQKBgFfnZcaXdyC4PtOahvs9UoV/15FtKjw9luS1zFrr+llA7aGeDRkoSUNH oBSvpotaM8xUaHbUN2jTY+GCZe+d4fgEqofeDE8dVqIoFfwvKsH6EHRxwhGjamlN HRur7hNPvC4QcDiUK+zF4jT4qnJHXo1M+7tlwbOXEMUk4E23AvDC -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 2097153 seed: BTBX/ampNfLU+ox7xipBGiaSbgC0kcB8Gy7BkJB4oKI= zfec: required: 101 segmentSize: 131072 total: 255 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:CHK:tnevhyl46kwtokjchy7pymrdga:7w2z4jd2hfnnng4n2ziz64hi6yh4rqfodxmlpt5w5ksknlfv4xha:101:256:4194304 format: kind: chk params: null sample: length: 4194304 seed: 2NVWOXvtwcAFf0J/g0fDC+ypQbnCU7awZoCxcnhpkVM= zfec: required: 101 segmentSize: 131072 total: 256 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:SSK:isisbwovtqib45rwg5g2mfdi7y:muylv2hhzo5wugdclu6us7ngvlbecv3uxpzrqgj3lnwsgxodz6wa format: kind: ssk params: format: sdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEogIBAAKCAQEAsPiJlX/DPcGvoJPp1ChAI6HlQO6qRMiGHZ6tqA6fto5b5yK3 bB2UzU+bG94CgObFMqUf479s+1VRWbSybfsRwXLx6SXVmvSHYFawC8ZRs3YIEP7i Fl6U1vJqtxdxOLLXUHfd4VQZuRah3fAx+ZHqpO5/pWRW9JJSGqJ83C01MIwI4WmI YryKu9oIJIi1bv9VkbfhMNx5fM6MSHQltm8fuBWBLzntql2eckvHUryRj/glAV9p VLU28bgLVMeViLmZFNVUlmjczJFSVAML7v37n7/xPQABAPpSnhlN775p5yN+cB1y 9JsPpL45IF7ihNe0jHQEKI+sQ5bnWdvtznWTiwIDAQABAoH/RLqS1rYjjrZ/7ZJz H+7cU46uYkM5jpK+hD8wRDj9KlF0Xe2ChtZh2EI2EiETIumKXLWviu6fXOcMmB3W xRqQAJGWZyb6X1SkCWiKqXV1qPIitNF/a+LOQRqd+ERrPHU7CiVoIusLEJYqMGXx 6HAMpJocCQiKGe6XOu/ke+K4UF3ZTBL4ar6yZuIYyiiGlpgNAwroDyg+hO+snyXA catMRT0h9oAFp0864cFTHanxjgSsbZjkyZtV2AFb1u2rjjd6E6NgUmJ+KKjK4QKR qIMNOzlEupAzEkh8jObtlLY+9ZAM8eRGNOa8RNimZq8ntl7Hhl9y/uw539M/Xdjf ejG1AoGBAMy1Vza5NKvnAFOaaORF+S1gpT7GaDfvtXjaokZbdEBDVy83KlrGDJNl XR5HbqZoeKGA/mbzHTWZrnZ2y2BzoGbc0aM6rtFsJ3rqabRlbmdyQFK6mjLmZGzC 5EMPG+pWXt95d7woxtqdbcMutz6ZtWn2uquXz4kM9R2SoWPvucKlAoGBAN1QBciq k+a0r4z1DEs20Yz8P1dCZdO4FnicI8Vk0IKwRXbJDYPpTwAH5ePNXlx1jDynLs7B 4J9csTWfhFwR5oeVzR5GWwmbx9cw6g1n+q/rpOaYqtQ+h0Jia5kXCKNcEio982kb CxtGDUgJj3iNi483a/wCCcKQXIIAkbxmcBZvAoGAQYA82cvFKMQPfLDJo1Eoe/aS qVV+/3b6ECOVDQIyXmWtvfPe35DDcV5bv1aH90MyZisKPBLKY946zrkQNlqJFqDN i3c5fNUohNIA5LIX843BOzduI59IvuxVcYeiHQdp8APD5jb9+fGpr2yBQcyZGcDS 1hkLVQUKYV4Luhh4zekCgYEA0VkjP4DsS25cKbCcIoIGk6EBod9zR2V6DDlXNSB6 hUWNUCI7oK6QRm0yL91TB49CSxWyl26atuUN1LXClP1x3ov77kmLUHmF/q+Ml4Xm g4cbA+8imYdUl51WPwik6TLtE/xqRuCIDxKi+aPhjZ4HiEBa65ZZ+Sxp9afoNBmK qg0CgYEAwZynW9qj3nH19nqqHPvOgGhYShotmLRznf6mmPqapQInlv+42ltxVsCB bKYgyM5KRZSFYVjKH5y1SFAByY32wqIZSx7Z7IbuSQNeoAt+DLlPfusuSnQRk03u PU4IolenY9gbyniTLxkcKjmRDQa6L9C6/ADI4FhUSTWlbcw3228= -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 4194304 seed: 2NVWOXvtwcAFf0J/g0fDC+ypQbnCU7awZoCxcnhpkVM= zfec: required: 101 segmentSize: 131072 total: 255 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:MDMF:2vhlj3tpmx3nhv4zq65aoss3um:dh3ql3v2mlnxizs3med6mmd72dxzzgh2varaq5zgs5e5vmob6hcq format: kind: ssk params: format: mdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEpAIBAAKCAQEA03uKqCbjzcK4tMQi1f7mAQl2t++ihbWQI2m12rsf312o7QOp td+qmKsQNGokH80lc8wdlkgGMTUtn4J869+i9uE3EDT+JhOEkRS9tPYT+sOaMoVt EVhioMHc55FruqXOTKv+BxkF8DFnBF6jFciQafH6RE2D8L+ASwjXOghgqLcZeIQp BRDe03niKh/Es8R8R+NunUd65qn0ztXTHITDBbxVUAN0Mx8rYy7cYi7drGLA56N2 KqY+kvWOXoDYzuzY/ILz8fQWHdNUOp2WbVWQ2MnDm0GSBUgy8y3CpWxmSifdg5mi nunNG1RsEWHV56N3SOCz2+zYiNaTyC2BzIOmLQIDAQABAoIBAAx4SrsS3U3YVHx8 MDL+vmCDRq0eg9kZ1UTxSm16U9fKNa1mu4641d3+ADecUUL6yAw66z0IRC00ndsQ 86gTiLrBRDZAGa2fFasEVmdMN0NgXerqs9KuVn/KI+oXngrkafSwE4t8Qz21fAXl mqeB1a9u5ZCPeDSC5jVbxm0VP2B98qElxzdBxYZnxxmnisYa66/ghkDgmwcmN/mV /r00j2ZXz/15CKd0JKNmztt2g+iXNDKtUSmJQZL78oQiYzFDL5nm0wQNM8XBBE1c arMSlZ9ZpOZ5NJXp33XsLbjF+cWn2oCb8fwEHkrVYP4Km4rebSCEZpx5bqQfIOrP +NtyF+ECgYEA61MofwHzJkeIxcKUal3aKmgD3XqDbRgn3cZfkqdG3Osm+YwO/mmu MBW9RzrMuBTtqxckYNta04UbsunfFiaQm3xCrmmtwxRzn+re+jp410yxeYj0fQuh +vRkez8LHYn2BKzfQbWgX1QdrUzoR24oJezwWQWgbkuMGq7ORqnNR2kCgYEA5hAh zne7xz3iChd5vzsbBmkfcT6Tp+QcQqLt9d9wEupMJ9ppOQzk4rBXwFQvjcTUOs9n AIo/qcB7+XAchCp3WCBdGC77Rz9hBosCjah/coNJ2PVM0CMF8VhxMbI7C7IQlKPd bZEmTCwQOR6kKKTD28CqWHA7NIryj5Ux2gi/NCUCgYEAx5Jy2aOxrlkkaXMnoz2M 9EHaZU6tfyvpQ3AlRZ6PvnO/Tgu1+5VsoGMPbwUy8TruhRbPR0VAtfpBD27AP2zd Xr/3XStKrhL+LDVofRZxvUXRjZzUm+ftq4LwZIWGy7pg5n4lqPh71dzkfkCnDU0i x2c2PolDEccIPujZD5yZ92ECgYB6+jSYATjHEDU738CckCOqEZdVGXYkULMqi51X yNBHzCZZR07nyBSxeEHv9RBWX9hyd1s/1qahPtsGQv97Rpf065fXzYVUWHSs4rHC t0cpFzTqXHVq7M3IbNZVEkitv8lNKyq53tTx8rvZTJ/Deg+X8C0eiR+cvolaZw32 1qYeYQKBgQCtjRniQKjBS9A+cs7/5XOPCKfOqYOQSxlMrnto5sX/CO+PTvT2m4jn YTgXKQ3SerpTVqNITtghSPBPV/eVJfLDuG8c0ZzWNOjCaZ5M91rMuAgJ3ug3LupF XUrl6Ks6B5OzXgGqkk8SCnsaKq+3JDQdvN72xOQdaS9Gi5HhZfJ4Ww== -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 4194304 seed: 2NVWOXvtwcAFf0J/g0fDC+ypQbnCU7awZoCxcnhpkVM= zfec: required: 101 segmentSize: 131072 total: 255 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:CHK:tg5w4m6hi5ezeapuix3lrrbbtq:7kerk5plqpsd2upnole3ifxoegxdux7prazbvzeu2kyilc6idk7a:101:256:8388607 format: kind: chk params: null sample: length: 8388607 seed: w6uP8Tcg6K2QR905Rms8iXTlksL6OD1KOWBxTK7wxPI= zfec: required: 101 segmentSize: 131072 total: 256 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:SSK:3y6p62da5ek5bl263mhgva45ke:hvu3gjbdhqkaauzqy7wt2aiz4t47qcqfm4h3ur34gcrxq4skwxkq format: kind: ssk params: format: sdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEogIBAAKCAQEAzclQ4Lb4GgTvkDXTIih0g1v4ys/bce595lQxSX2nNjeSbKjr o3hZRVJFxFCe3ylEFcsAtMflktITGzje/mWgrmn38bxYRASOBVdoLFXPiO8aCgT0 Excm4smNf++TQp6G3oQtey+xf4EwOtuBw8R4Ehm+CqFZrhB+qfMY726eUW4XtmZY zmpmM8pTWISgoZEqm1tKrV+qLfArtvrG84+VhSkzTg4u+oJKOnUINWfSPfQZd9nY rzNMe3K9OuM5JqYbdIVbnUS5jXFs0egNui5IwEgUtsom1nAWiljgo4o5NepXa60H XTFs0gpWzoZAO1A8J7JuuLN7PLzBJhk4ZwLTeQIDAQABAoIBAAmJxFiIDoeZNTfi UMkPZjgc9BMFb5rQJrB9dEPfYbfU+1HTQgnDhyK8CZ0L7hMypNPcQwn+Dm1f1JAh Uo+oyvnukjYXiGFNsz3640qTxyDmETdH676zRuOB20/D3VfcBG0NpBSGvUPHS4Kc 4D7ATV7sZ8czG5aib9aFfKFDZ63nNsAYWFJ0SqwwVembFkC9vOsxenPKmM5O2d8i 7G4wXRtzTMjhsccmQJfCoe4xAVuFCKhNlPB2LegglHfFowXcTc3LD8dninnZBfsp APwbb+2g/tWzj7+qn28ESxjYG84kLRvg5g1/RMgaYqLRti3vgERVnjG7Luf4hCxM fATU90ECgYEA2x1HqjMhSlkmjZNEjsQ8Z5sMv/+rD747IWGFumCf0PVl23WmQujC OW+bGHnY8A1TBKlrkW4V3HJfn3TGIaRkTRGzYsvtYGnIrgREJeNiQOkETydJq281 Dw8yvBquPAlruuCWucxL2lLpTJvDJYQ8WAKATXR/0XpSnynBGz9jq7kCgYEA8G2q 1zPbvS6Gj+7yMHNH1TiiFub3lZEevUkDGhHdFg/cROZCuMTK+DFzB4uGqZilXUw8 OASKuTw4lGUSNLNEkBuuZAUnnZ+LpGt7Cln1avnVwzSOK639zfYRaWMo8Rc3Yy2X puWu7VjHn5/GojACdeThr4xU5gkhBXv6Xy4BxcECgYBFWEXm+pmNkxtdcP8gg8Bu NabaWMrFh7nk/Z059/x8QD3FL723rTxSuxyFqYJbrovYjNnLQ+DNTLEwoN9XpFRO A80W9l0gxznIwPbkWsssqdJATrnE9MQBCRlQaM09mOmsUgnBsYNMDDNjmGQxSmFi pR//41/UZvchAjDoM66SmQKBgE1t5CEeUFwiya888r5rweyHKpxZkc6XR+EJzHfu 3NaoEPYXedFrfzpjInqBksK3qDndvV8FB3AUVtxjmHNkcGZAo+8OQe3fXed7vcpd ok3rW85b9JVYmW5lGsJn7t2F7o6ANmDHg4homRFtMVk2QPSa25vfg8/5jKrpfH5+ oI+BAoGAW8ezqR6768jFvKx1Fd9m2vTk29TF7CSHSCgjp8qBtryIQralfdDqdFSW 1riWoPU5uW4HWNzuQuZOCiqnwTCsc6tHxsxwgWCbtv0nliJ/VxsGQBfM7lyNHyfq tr4pVuZ8uXd9v2JEe6sI2muFjGmTu76ieglgamb/KlIwb61+EWQ= -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 8388607 seed: w6uP8Tcg6K2QR905Rms8iXTlksL6OD1KOWBxTK7wxPI= zfec: required: 101 segmentSize: 131072 total: 255 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:MDMF:qvcv4vtu3lbnjijwf26wiojbbe:snscnddmlq6642bhlhlaqtw7actaqggvwv4gpezgjqf7sxs3kena format: kind: ssk params: format: mdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEogIBAAKCAQEAq1Nn0JufzDRSFksuIYmhr4sLG2RjLhZD8Lf1E10JhSWrfb7Q muqmRmSv2f/2ell0cXxDtM/7OCjxrgZ92uspBZ70io/T90EjQ9D0Nxlaz+CpW9m5 1qXhCdYzGjaV6M0rsOnU5lWXPFjdTz9f81VD6paPSGLJsoO7jSpu20n7DL67O66C OMdKof2Pr4/pN6sfOyn09VKAzOAZoP7sGrre5xJ8T4LnBYGJ6nS2wyIU8dlou0Q6 PqOx+7OIsDxYgaMPtA0nR3n84WpnsKGnQBkrPGqPClhfhF/7w44WU075CIvuq/Gz B04e7D49UlEEwOOiYG/r4NpYIHrhmulv5mOinwIDAQABAoIBADeJJdHdYINVQnav kBiXAK5iqAsNE4lQ9l0FhI/uTLO4bkqom/5bqeKPqOFFs6Qdcz2GRnxKHukpfI4o 1IsuR3HnAOYZkWBI4SGOjlt+AI36CWwYu8D0rGn/4TjSEO4R8+O5KKYxgICzXane pT+/l/BnNbMFMtSHFzi/VIgJBzQt54OAe2sKwCSB5JUBBgJVce0idFwqvoVv7jpe mxZpevJlwUjJPw8ieDOPoS7UwzEMKHkbH/iJ+mGWV3JeLH76DG6BDGIA6+oLB9X1 QE/JXXB54t4jybw9vIT7toC0A6lF24FoXqCzoAHX8RgJjCeNvpA0Q103FktgErp0 awKCMKECgYEAxO6Hy8RIK7rcmSpdPbTPOd/X9uMfJ5wuhGHgt3TFqlNYHTiQY35v S58Rh5v3wsDbFczg9BMbYYY//PuCoAeNI2O+iDR4El9u1T8aMRCePIm8Z5v4SFUk /Y7bAO2Sg9y95IF5s8EL8d5yo6S3ZZUKhZ48zfTN+oDG2fAiOa8/VUsCgYEA3ra2 m7VyTxDAKRjq0BqnNUrg/UStv6pHA0N+iyiHoE9F3xVHKHhBP4X0aEps9bfrErYL b/cQvRAS/Cb+FaMfeWTSwYzMPok5xyoUtsNwjeLwPyVp+TPdOzJVuQmj6ydaGtpD KuJbbNr9vjci5fduB9DFUXrv7rrcTSTJjldf130CgYAUaxLjWq+M8SvsKYtPWY7e 1kmjDHtvdO8RxMAy5UWVWlzZcsLtve82LQD5SX+PzsUoZnywcca1/uBlj4JEq2PD 1pSrtJz6crCgJZHGoo11g2Zoa7B7d3CFZalpWDiHuXxq083ViF9/rWu/cdWeD6zu m7B8PjSZE38Km65Awt3TLwKBgG+oWSr9sD6VnlG8bVVCV5xvWxd/TEDwhMPNHe90 tXKY6+XpTBCtIcFQTnXPAou61r89x8QtsRWormv+vJpqewgolUV2apvbvrzsixAK Mi7gnSR7hILtDrh0BuhLPgRSaWlXDh+89qs/q8Gm8Pcsstx2PccZBJvC0VpX3Dlh 8uodAoGAfD6xgNtZdqc876KrfiGli6IyjQgkKyVdSe41qqlxC6C1i0ICex22WveZ pgjbN7JwBRfLht23vnrqIVr/1P6neDQ6nh9aKNUi6wsldg6nqgwfxXx8ChAEFZRR RZu9RLIfWHGagPiKF0FIK07/q1PRtfAiQsSIZfo4yA4Lqojauns= -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 8388607 seed: w6uP8Tcg6K2QR905Rms8iXTlksL6OD1KOWBxTK7wxPI= zfec: required: 101 segmentSize: 131072 total: 255 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:CHK:ioridipksj6o7ph3bcf37vslmi:pzhkwm4ry67ohj6dcs7zwnsjrvfqzam6tuitrns5vuo3uoomau6q:101:256:8388609 format: kind: chk params: null sample: length: 8388609 seed: yPi3JHKKbWaEEG5eZOlM6BHJll0Z3UTdBzz4bPQ7wjg= zfec: required: 101 segmentSize: 131072 total: 256 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:SSK:366siuvcfpanoq5xpsby6aeu7m:udkk4awrzcrxtmhjldtezkscedaax4bdkaq3rczqgozdizy5hjha format: kind: ssk params: format: sdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEpAIBAAKCAQEAl7MOMMbOn8dHhuStb5vOd3YhKX9ItY8jaFt2aUy4n6vhXSNF /KbIN2wQTDZdqHjMfnVIU82TIgmOzfgy3H8y5MStXKI8+zKediZVFW1SX+tVOJkr gHg5Yk7t/ahxXFnlNKt/hmU+Wpy65keIhZlDW3DXN8brN/abCN8gkoFHFZtl4FxW DhhBk08IBQdSM/m+QQJ4LuPcwlJamgRxL4f5lng2zRpnDXjtv+NMwUp/D+9hN/V9 3IUy/n6+G4iJQOdZDId7PmZB5XreXgDpuOo5Qh6E5iPWEYzpOpc4441F6huwCN+7 hOcnbNJv7GjRdl1wVwpoIUVgI7suKk9pDDMr+QIDAQABAoIBAEGGYhbHiPCTD15A 4HlY/3GyYNif1jQ2Q8EL4LXTIdw2Tf4BAnYDRHBMCS4iPYpLw2jMGBW6slb9ceWd 07pSZxVRruBYY6bNUo0OOaorsm0kJYdxAc1YINFJ7pqma3DMk6iQe2D90lUpZcGa HGo4rVOOBihdj7R4nLbUSil+FcpKzuCf/7SBIlhjMeLH9rEtlDJ6az8B+UKilJJP nACArs0Q/l4kCyKa9tkjzqU4BGEWB77uTzxwFSBvguiELXhfYRSxTqlCkkQWR6TQ jgp3rkMXIDeYaowYOC7HQLClLghH7HMlYkV0lAP7sgZUmdFwGKRpUy95fxFtGI/k KCVE+xECgYEAzuDttHfAcTSgP6vzW7Z6QFksB+XLMUspVVQDgf4Bllg899lw3ndV +55dDCsljhCZw4o+y8MktgtAWLZte7znUtRLHsbD6l6DPUnDcVXdRcFzqKeqXlAZ ATWrti5rEqQu9l1AzduaY7eWJCtXP5mywlb+3xOdJyyYdPP7UI1SnncCgYEAu7gS bAJ+h675WhK9bCcBMpeCP0su80Ovz3Bu/UzUa5VW0LPnFt6AMCGlJXu2q4jFo0ba j2uIe1qV8SIMWS+G+XxmueqeXfptoTIeHkBYOL/1YlCONkbodHQx33UW7RVTc5MU HiTzjBqrL46CrtrN91sA8K7pmeuPAk9apIvs9Q8CgYEAqwmtnSHQmgefYWThW3bf VeojjBgBSSzR7Hj8OXHukAU9ytAcD+Fr1g7U8OWPNAgniFH4nvAkntlohq+0jrPc ME/SF4zPlyoyqO4eRsptmWlaHRsZsMXaFnTwFTwFTDEvnoH0vP2NhFnZKOgoRy3k a+YO7BHEQQoOtcqtgaiFoPsCgYEAtQN+4CBXmscjM7Q2bIAAK6Tlt9rr3zA57DJj FGZtv4A2QvH3uJm9yqvm8Aonz6kHy7abMwlihnCHfgpzFd06roFDHawcIktGQ9Zs LIenirGwEanUOIqPxRv2q5/hB6U035HIKHlBUKy2vhkR80KSsh9S/MPuBrqbIIMc yOcVDAkCgYA1t45yzJwsMy98fVQgJl+Cb50fWmiaDCVlytBt/n7Otxuica96EbqL BiDuf5yicNlAjByHeKhv7FlykLqMkxDBiCNyxwZZKErZdn9+6hV5/9zCGx7JeSJr uSVEoab5zdhSqEkxNJBBLo8HuGHrjRKyhLchJFMiXuo4maJ1zKu85A== -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 8388609 seed: yPi3JHKKbWaEEG5eZOlM6BHJll0Z3UTdBzz4bPQ7wjg= zfec: required: 101 segmentSize: 131072 total: 255 - convergence: ZOyIygCyaOW6GjVnihtTFg== expected: URI:MDMF:buw2ujk4pqecvd224j4tibki74:vi3aaz2qzffsyfvafcencavush7x65mrdwg6stxpqidsckfhhy4q format: kind: ssk params: format: mdmf key: '-----BEGIN RSA PRIVATE KEY----- MIIEpAIBAAKCAQEA1TmUXQyj6HXUw5Q8Nh+LeL3oFFs2JcXW0qvLbUloNT9G42sR 3GPeGQ28QLFRzhtawf7p4hdfQSkMccba/MOwik4vZTq6kzuit0juIt75+p5j6U3P LUnMzjEr0cIXLOhQGwWT1aukQGzbxSndz+J8XtnehVEggmhQ2cGDnniM+Ub96FkR S+Z/cHWTzL7njyeYZ4MeUwWsO4F9vg/fYd/EFTsdEbqk1IV8yqnKuuYObNTCtlqW sJi1D2wFiFBVxi8nfRzy6Gif6B8Hf2+O1WQvmqdC/bUNl0wpouXdNRYttPH43BQs 5O5CTbbNgACHskcDqsdqRYuzEwLQfDgKKqnqFwIDAQABAoIBAAkyx1eBInMIubLI z14nNzi5lbWHGhSJN9x1dqo3D/pu7iuW9svuXPyoIRr361By9hPZqN26r2M3TVTG VyGvWXkczVlWiZvRsG2HTmYNbJlwHRRJss73RmyguI1IegAuvAHvjwSZN5714Pc4 ZhLAH0z5ErQwVh56E2yJbhiAhrGAxPjz5VecgpFtegv6gYTSN790qr4LotMkGiae q93EBXTxrzp58ReYOTl7OTu4a791XLtJq8ap23Lmu9s55QJ86vvBX1767WArgHcq 7rAf3Ti7YfHRKjw+zAK+CeLa+as4f7HD+B508QhJjhPNqQU6xdeOkuQkDX+vjq0m CvksTR0CgYEA9pRSPULncSkthLy1PVmNAPRfZl1iCa3Zo/UZsddWMrSS3rMKABtE EKyzJWzqLj00w6sQezz0YY6BJwYmHoJkDLDPHoSkxircuEgN+rJNLw7uTtOcSlX9 yMonaIUKw3wK5BbPmCIAv456AQw3hicqc1qWcWwBXFWvO+iyiBsxuUUCgYEA3V8I pRXIkxltc4LG/tytQFOPChf5fJhLlnR9UPopWK81SiViHmeT/jlvywk8thkvUuQw FOo1pXcidkcfvHWYWtbGgaRHmu27o30bNa1Jus1DgLq399VFvXYWQBauUOIfY/1Q xtxhHcPFGxSi0SYUA2px7rBb3KOhqkHEylgGlasCgYB98qbLGdhj6beRXF5q1sn6 Gdh8zegcr4tCfxg/yZEC109Jp0PNaB/tMHlU/XvkYGkKJN+HQ0xEZGi9yRtBbDK0 dL9mhDQx8ITLMCrLybU4+zRoWRg0tBWsMO3OKl6kGUDq3mfs+jlNnvXcgSP/RxQc 1cGQb62GP1IBlMtUUCemzQKBgQCgv+/hIS5jUyWdqaujStAsVAEczUgH5/eLq8+M S/xWP/SsgPT9Ky3WgBLkFzMU8Ljisn0P0vtdymMmDIPJMIOQA0JmxcqRgGyvTZvC oLFXitKn2e7Zcu+Pov6JT28JoQo2a66KmWGUYaLyBUwuID6MNHHDaCFs2Q3+OoAS h1VQvQKBgQD0DA0nokwoMxZ6ClV+B+G6NSmo6JbKOtnoqqBiM5rqw0ME3h1B26hO 5A41/AUKxjrFbcqE6Cm1WGQmR5vJDmKEEhF3SQXuEYm6Ji+l+awbOdYq6GvS5kCH pU+imOa8uwgmK2TlYYj9LaV+mRLqqIvJ6396y66IJeTlcLLoNTasXA== -----END RSA PRIVATE KEY----- ' mutable: null sample: length: 8388609 seed: yPi3JHKKbWaEEG5eZOlM6BHJll0Z3UTdBzz4bPQ7wjg= zfec: required: 101 segmentSize: 131072 total: 255 version: 2023-01-16.2 tahoe_lafs-1.20.0/integration/vectors/vectors.py0000644000000000000000000001023713615410400016671 0ustar00""" A module that loads pre-generated test vectors. :ivar DATA_PATH: The path of the file containing test vectors. :ivar capabilities: The capability test vectors. """ from __future__ import annotations from typing import TextIO from attrs import frozen from yaml import safe_load, safe_dump from base64 import b64encode, b64decode from twisted.python.filepath import FilePath from .model import Param, Sample, SeedParam from ..util import CHK, SSK DATA_PATH: FilePath = FilePath(__file__).sibling("test_vectors.yaml") # The version of the persisted test vector data this code can interpret. CURRENT_VERSION: str = "2023-01-16.2" @frozen class Case: """ Represent one case for which we want/have a test vector. """ seed_params: Param convergence: bytes seed_data: Sample fmt: CHK | SSK segment_size: int @property def data(self): return stretch(self.seed_data.seed, self.seed_data.length) @property def params(self): return self.seed_params.realize(self.fmt.max_shares) def encode_bytes(b: bytes) -> str: """ Base64 encode some bytes to text so they are representable in JSON. """ return b64encode(b).decode("ascii") def decode_bytes(b: str) -> bytes: """ Base64 decode some text to bytes. """ return b64decode(b.encode("ascii")) def stretch(seed: bytes, size: int) -> bytes: """ Given a simple description of a byte string, return the byte string itself. """ assert isinstance(seed, bytes) assert isinstance(size, int) assert size > 0 assert len(seed) > 0 multiples = size // len(seed) + 1 return (seed * multiples)[:size] def save_capabilities(results: list[tuple[Case, str]], path: FilePath = DATA_PATH) -> None: """ Save some test vector cases and their expected values. This is logically the inverse of ``load_capabilities``. """ path.setContent(safe_dump({ "version": CURRENT_VERSION, "vector": [ { "convergence": encode_bytes(case.convergence), "format": { "kind": case.fmt.kind, "params": case.fmt.to_json(), }, "sample": { "seed": encode_bytes(case.seed_data.seed), "length": case.seed_data.length, }, "zfec": { "segmentSize": case.segment_size, "required": case.params.required, "total": case.params.total, }, "expected": cap, } for (case, cap) in results ], }).encode("ascii")) def load_format(serialized: dict) -> CHK | SSK: """ Load an encrypted object format from a simple description of it. :param serialized: A ``dict`` describing either CHK or SSK, possibly with some parameters. """ if serialized["kind"] == "chk": return CHK.load(serialized["params"]) elif serialized["kind"] == "ssk": return SSK.load(serialized["params"]) else: raise ValueError(f"Unrecognized format: {serialized}") def load_capabilities(f: TextIO) -> dict[Case, str]: """ Load some test vector cases and their expected results from the given file. This is logically the inverse of ``save_capabilities``. """ data = safe_load(f) if data is None: return {} if data["version"] != CURRENT_VERSION: print( f"Current version is {CURRENT_VERSION}; " f"cannot load version {data['version']} data." ) return {} return { Case( seed_params=SeedParam(case["zfec"]["required"], case["zfec"]["total"]), segment_size=case["zfec"]["segmentSize"], convergence=decode_bytes(case["convergence"]), seed_data=Sample(decode_bytes(case["sample"]["seed"]), case["sample"]["length"]), fmt=load_format(case["format"]), ): case["expected"] for case in data["vector"] } try: with DATA_PATH.open() as f: capabilities: dict[Case, str] = load_capabilities(f) except FileNotFoundError: capabilities = {} tahoe_lafs-1.20.0/misc/awesome_weird_stuff/boodlegrid.tac0000644000000000000000000001402513615410400020420 0ustar00# -*- python -*- """Monitor a Tahoe grid, by playing sounds in response to remote events. To install: 1: install Boodler, from http://www.eblong.com/zarf/boodler/ 2: run "boodler.py -l listen.Sounds". This will run a daemon that listens on a network socket (31863 by default) and accepts commands in the form of "sound bird/crow1.aiff\n" 3: copy this file into a new directory, which we'll call $BASEDIR 4: write one or more logport FURLs into files named *.furl or *.furls, one per line. All logports from all such files will be used. 5: launch this daemon with 'cd $BASEDIR && twistd -y boodlegrid.tac' """ import os, time from zope.interface import implements from twisted.application import service from twisted.internet import protocol, reactor, defer from foolscap import Tub, Referenceable from foolscap.logging.interfaces import RILogObserver from twisted.python import log class Listener: def __init__(self): self.boodler = None # filled in when we connect to boodler self.last = {} def sound(self, name, slot=None, max=0.100): if not self.boodler: return now = time.time() if slot is None: slot = name if now < self.last.get(slot, 0) + max: return # too soon self.last[slot] = now self.boodler.write("sound %s\n" % name) def msg(self, m, furl): #print "got it", m message = m.get("message", m.get("format", "")) format = m.get("format", "") facility = m.get("facility", "") # messages emitted by the Introducer: client join/leave if message.startswith("introducer: subscription[storage] request"): print("new client") self.sound("voice/hooray.aiff") if message.startswith("introducer: unsubscribing"): print("unsubscribe") self.sound("electro/zaptrill-fade.aiff") # messages from the helper if message == "file already found in grid": print("already found") self.sound("mech/ziplash-high.aiff") #if message == "upload done": if format == "plaintext_hash=%(plaintext_hash)s, SI=%(SI)s, size=%(size)d": size = m.get("size") print("upload done, size", size) self.sound("mech/ziplash-low.aiff") if "fetching " in message: # helper grabbing ciphertext from client self.sound("voice/phoneme/sh.aiff", max=0.5) # messages from storage servers if message.startswith("storage: slot_readv"): #self.sound("voice/phoneme/r.aiff") self.sound("percussion/wood-tap-hollow.aiff") # messages from webapi if message.startswith("Retrieve") and "starting" in message: self.sound("mech/metal-clack.aiff") if message.startswith("Publish") and "starting" in message: self.sound("mech/door-slam.aiff") #self.sound("mech/metal-clash.aiff") if ("web: %(clientip)s" in format and m.get("method") == "POST" and ("t=set_children" in m.get("uri", "") # FIXME: may give false-positives or "t=set-children" in m.get("uri", ""))): self.sound("mech/clock-clang.aiff") # generic messages #if m['level'] < 20: # self.sound("mech/keyboard-1.aiff") if "_check_for_done but we're not running" in message: pass elif format == "excessive reactor delay (%ss)": self.sound("animal/frog-cheep.aiff") print("excessive delay %s: %s" % (m['args'][0], furl)) elif format == "excessive reactor delay (%(delay)ss)": self.sound("animal/frog-cheep.aiff") print("excessive delay %s: %s" % (m['delay'], furl)) elif facility == "foolscap.negotiation": if (message == "got offer for an existing connection" or "master told us to use a new connection" in message): print("foolscap: got offer for an existing connection", message, furl) else: #print "foolscap:", message pass elif m['level'] > 30: # SCARY or BAD #self.sound("mech/alarm-bell.aiff") self.sound("environ/thunder-tense.aiff") print(m, furl) elif m['level'] == 30: # WEIRD self.sound("mech/glass-breaking.aiff") print(m, furl) elif m['level'] > 20: # UNUSUAL or INFREQUENT or CURIOUS self.sound("mech/telephone-ring-old.aiff") print(m, furl) class BoodleSender(protocol.Protocol): def connectionMade(self): print("connected to boodler") self.factory.listener.boodler = self.transport class Bridge(Referenceable): implements(RILogObserver) def __init__(self, furl, listener): self.furl = furl self.listener = listener def remote_msg(self, m): d = defer.maybeDeferred(self.listener.msg, m, self.furl) d.addErrback(log.err) # never send errors to the remote side class Monitor(service.MultiService): def __init__(self): service.MultiService.__init__(self) self.tub = Tub() self.tub.setServiceParent(self) self.listener = Listener() self.targets = [] for fn in os.listdir("."): if fn.endswith(".furl") or fn.endswith(".furls"): for i,line in enumerate(open(fn, "r").readlines()): target = line.strip() if target: self.tub.connectTo(target, self._got_logpublisher, fn, i, target) cf = protocol.ClientFactory() cf.listener = self.listener cf.protocol = BoodleSender reactor.connectTCP("localhost", 31863, cf) def _got_logpublisher(self, publisher, fn, i, target): print("connected to %s:%d, %s" % (fn, i, target)) b = Bridge(target, self.listener) publisher.callRemote("subscribe_to_all", b) m = Monitor() application = service.Application("boodlegrid") m.setServiceParent(application) tahoe_lafs-1.20.0/misc/build_helpers/build-osx-pkg.sh0000755000000000000000000000425113615410400017421 0ustar00#!/bin/sh VERSION=`sh -c "cat src/allmydata/_version.py | grep verstr | head -n 1 | cut -d' ' -f 3" | sed "s/\"//g"` PWD=`pwd` TARGET="/Applications/tahoe.app" # Clean up any test garbage that might be left over from a recent test run. rm -rvf _trial_temp virtualenv osx-venv osx-venv/bin/pip install . # The virtualenv contains all the dependencies we need, but the bin/python # itself is not useful, nor is having it as the shbang line in the generated # bin/tahoe executable. Replace bin/tahoe with a form that explicitly sets # sys.path to the target directory (/Applications/tahoe.app). This isn't as # isolated as a proper virtualenv would be (the system site-packages # directory will still appear later in sys.path), but I think it ought to # work. rm osx-venv/bin/* cat >osx-venv/bin/tahoe < 1: depdirs = sys.argv[1 :] filenames = set() for depdir in depdirs: filenames = filenames.union(os.listdir(depdir)) def add(d, k, v): if k in d: d[k] += [v] else: d[k] = [v] for fname in filenames: for ext in extensions: if fname.endswith(ext): m = FILENAME_RE.match(fname[:-len(ext)]) try: pkg = m.group(1) pkg2 = m.group(2) if pkg2 in pkg_name_continuations: pkg += '-' + pkg2 else: pythonver = (m.group(3) or '-py')[3:] platform = (m.group(4) or '-')[1:] except (IndexError, AttributeError, TypeError): continue if not pkg2 in pkg_name_continuations and not pythonver: m = FILENAME_RE2.match(fname[:-len(ext)]) if m.group(3): try: platform = m.group(3) pythonver = (m.group(4) or '-py')[3:] except (IndexError, AttributeError, TypeError): continue for (alias, replacement) in platform_aliases: if platform.endswith(alias): platform = platform[:-len(alias)] + replacement break pkgs.add(pkg) if platform: platform_dependent_pkgs.add(pkg) if pythonver not in matrix: python_versions.add(pythonver) matrix[pythonver] = {} add(matrix[pythonver], platform, (pkg, fname)) break platform_independent_pkgs = pkgs - platform_dependent_pkgs width = 100 / (len(platform_dependent_pkgs) + 1) def file_list(all_files, pkg): files = sorted([(pkg_resources.parse_version(n), n) for (p, n) in all_files if pkg == p]) return '
 '.join(['%s' % (f, f) for (v, f) in files]) greybgstyle = '; background-color: #E0E0E0' nobgstyle = '' unsupportedstyle = '; color: #C00000' print('') print('') print('') print(' ') print(' Software packages that Tahoe-LAFS depends on') print('') print('') print('

What is this?

') print('

See quickstart.rst, wiki:Installation, and wiki:CompileError.') print('

Software packages that Tahoe-LAFS depends on

') print() for pyver in reversed(sorted(python_versions)): greybackground = False if pyver: print('

Packages for Python %s that have compiled C/C++ code:

' % (pyver,)) print('') print(' ') print(' ' % (width,)) for pkg in sorted(platform_dependent_pkgs): print(' ' % (width, pkg)) print(' ') first = True for platform in sorted(matrix[pyver]): unsupported_python = (platform in min_supported_python and pyver.split('.') < min_supported_python[platform].split('.')) if greybackground: bgstyle = greybgstyle else: bgstyle = nobgstyle greybackground = not greybackground row_files = sorted(matrix[pyver][platform]) style1 = first and 'border-top: 2px solid #000000' or '' style1 += bgstyle style1 += unsupported_python and unsupportedstyle or '' style2 = first and 'border-top: 2px solid #000000' or '' style2 += bgstyle annotated_platform = platform.replace('-', '‑') + (unsupported_python and ' (unsupported)' or '') print(' ') print(' ' % (style1, annotated_platform)) for pkg in sorted(platform_dependent_pkgs): if pkg == 'pywin32' and not platform.startswith('windows'): print(' ' % (style2,)) else: print(' ' % (style2, file_list(row_files, pkg))) print(' ') first = False print('
 Platform  %s 
 %s  n/a  %s
') print() print('

Packages that are platform-independent or source-only:

') print('') print(' ') print(' ') print(' ') print(' ') style1 = 'border-top: 2px solid #000000; background-color:#FFFFF0;' style2 = 'border-top: 2px solid #000000;' m = matrix[''][''] for pkg in sorted(platform_independent_pkgs): print(' ') print(' ' % (style1, pkg)) print(' ' % (style2, file_list(m, pkg))) print(' ') print('
 Package  All Python versions 
 %s  %s
') # The document does validate, but not when it is included at the bottom of a directory listing. #print('
') #print('Valid HTML 4.01 Transitional') print('') tahoe_lafs-1.20.0/misc/build_helpers/run-deprecations.py0000644000000000000000000000747113615410400020240 0ustar00 import sys, os, io, re from twisted.internet import reactor, protocol, task, defer from twisted.python.procutils import which from twisted.python import usage # run the command with python's deprecation warnings turned on, capturing # stderr. When done, scan stderr for warnings, write them to a separate # logfile (so the buildbot can see them), and return rc=1 if there were any. class Options(usage.Options): optParameters = [ ["warnings", None, None, "file to write warnings into at end of test run"], ["package", None, None, "Python package to which to restrict warning collection"] ] def parseArgs(self, command, *args): self["command"] = command self["args"] = list(args) description = """Run as: python run-deprecations.py [--warnings=STDERRFILE] [--package=PYTHONPACKAGE ] COMMAND ARGS.. """ class RunPP(protocol.ProcessProtocol): def outReceived(self, data): self.stdout.write(data) sys.stdout.write(str(data, sys.stdout.encoding)) def errReceived(self, data): self.stderr.write(data) sys.stderr.write(str(data, sys.stdout.encoding)) def processEnded(self, reason): signal = reason.value.signal rc = reason.value.exitCode self.d.callback((signal, rc)) def make_matcher(options): """ Make a function that matches a line with a relevant deprecation. A deprecation warning line looks something like this:: somepath/foo/bar/baz.py:43: DeprecationWarning: Foo is deprecated, try bar instead. Sadly there is no guarantee warnings begin at the beginning of a line since they are written to output without coordination with whatever other Python code is running in the process. :return: A one-argument callable that accepts a string and returns ``True`` if it contains an interesting warning and ``False`` otherwise. """ pattern = r".*\.py[oc]?:\d+:" # (Pending)?DeprecationWarning: .*" if options["package"]: pattern = r".*/{}/".format( re.escape(options["package"]), ) + pattern expression = re.compile(pattern) def match(line): return expression.match(line) is not None return match @defer.inlineCallbacks def run_command(main): config = Options() config.parseOptions() command = config["command"] if "/" in command: # don't search exe = command else: executables = which(command) if not executables: raise ValueError("unable to find '%s' in PATH (%s)" % (command, os.environ.get("PATH"))) exe = executables[0] pp = RunPP() pp.d = defer.Deferred() pp.stdout = io.BytesIO() pp.stderr = io.BytesIO() reactor.spawnProcess(pp, exe, [exe] + config["args"], env=None) (signal, rc) = yield pp.d match = make_matcher(config) # maintain ordering, but ignore duplicates (for some reason, either the # 'warnings' module or twisted.python.deprecate isn't quashing them) already = set() warnings = [] def add(line): if line in already: return already.add(line) warnings.append(line) pp.stdout.seek(0) for line in pp.stdout.readlines(): line = str(line, sys.stdout.encoding) if match(line): add(line) # includes newline pp.stderr.seek(0) for line in pp.stderr.readlines(): line = str(line, sys.stdout.encoding) if match(line): add(line) if warnings: if config["warnings"]: with open(config["warnings"], "w") as f: print("".join(warnings), file=f) print("ERROR: %d deprecation warnings found" % len(warnings)) sys.exit(1) print("no deprecation warnings") if signal: sys.exit(signal) sys.exit(rc) task.react(run_command) tahoe_lafs-1.20.0/misc/build_helpers/show-tool-versions.py0000644000000000000000000001132213615410400020545 0ustar00#! /usr/bin/env python import locale, os, platform, subprocess, sys, traceback from importlib.metadata import version, PackageNotFoundError def foldlines(s, numlines=None): lines = s.split("\n") if numlines is not None: lines = lines[:numlines] return " ".join(lines).replace("\r", "") def print_platform(): try: import platform out = platform.platform() print("platform:", foldlines(out)) print("machine: ", platform.machine()) if hasattr(platform, 'linux_distribution'): print("linux_distribution:", repr(platform.linux_distribution())) except EnvironmentError: sys.stderr.write("\nGot exception using 'platform'. Exception follows\n") traceback.print_exc(file=sys.stderr) sys.stderr.flush() def print_python_ver(): print("python:", foldlines(sys.version)) print('maxunicode: ' + str(sys.maxunicode)) def print_python_encoding_settings(): print('filesystem.encoding: ' + str(sys.getfilesystemencoding())) print('locale.getpreferredencoding: ' + str(locale.getpreferredencoding())) try: print('locale.defaultlocale: ' + str(locale.getdefaultlocale())) except ValueError as e: print('got exception from locale.getdefaultlocale(): ', e) print('locale.locale: ' + str(locale.getlocale())) def print_stdout(cmdlist, label=None, numlines=None): try: if label is None: label = cmdlist[0] res = subprocess.Popen(cmdlist, stdin=open(os.devnull), stdout=subprocess.PIPE).communicate()[0] print(label + ': ' + foldlines(res.decode('utf-8'), numlines)) except EnvironmentError as e: if isinstance(e, OSError) and e.errno == 2: print(label + ': no such file or directory') return sys.stderr.write("\nGot exception invoking '%s'. Exception follows.\n" % (cmdlist[0],)) traceback.print_exc(file=sys.stderr) sys.stderr.flush() def print_as_ver(): if os.path.exists('a.out'): print("WARNING: a file named a.out exists, and getting the version of the 'as' assembler " "writes to that filename, so I'm not attempting to get the version of 'as'.") return try: stdout, stderr = subprocess.Popen(['as', '-version'], stdin=open(os.devnull), stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate() print('as: ' + foldlines(stdout.decode('utf-8') + ' ' + stderr.decode('utf-8'))) if os.path.exists('a.out'): os.remove('a.out') except EnvironmentError: sys.stderr.write("\nGot exception invoking '%s'. Exception follows.\n" % ('as',)) traceback.print_exc(file=sys.stderr) sys.stderr.flush() def print_setuptools_ver(): try: print("setuptools:", version("setuptools")) except PackageNotFoundError: print('setuptools: DistributionNotFound') def print_py_pkg_ver(pkgname, modulename=None): if modulename is None: modulename = pkgname print() try: print(pkgname + ': ' + version(pkgname)) except PackageNotFoundError: print(pkgname + ': DistributionNotFound') try: __import__(modulename) except ImportError: pass else: modobj = sys.modules.get(modulename) print(pkgname + ' module: ' + str(modobj)) try: print(pkgname + ' __version__: ' + str(modobj.__version__)) except AttributeError: pass print_platform() print() print_python_ver() print_stdout(['virtualenv', '--version']) print_stdout(['tox', '--version']) print() print_stdout(['locale']) print_python_encoding_settings() print() print_stdout(['buildbot', '--version']) print_stdout(['buildslave', '--version']) if 'windows' in platform.system().lower(): print_stdout(['cl']) print_stdout(['gcc', '--version'], numlines=1) print_stdout(['g++', '--version'], numlines=1) print_stdout(['cryptest', 'V']) print_stdout(['git', '--version']) print_stdout(['openssl', 'version']) print_stdout(['flappclient', '--version']) print_stdout(['valgrind', '--version']) print_as_ver() print_setuptools_ver() print_py_pkg_ver('cffi') print_py_pkg_ver('coverage') print_py_pkg_ver('cryptography') print_py_pkg_ver('foolscap') print_py_pkg_ver('mock') print_py_pkg_ver('pyasn1') print_py_pkg_ver('pycparser') print_py_pkg_ver('cryptography') print_py_pkg_ver('pyflakes') print_py_pkg_ver('pyOpenSSL', 'OpenSSL') print_py_pkg_ver('six') print_py_pkg_ver('trialcoverage') print_py_pkg_ver('Twisted', 'twisted') print_py_pkg_ver('TwistedCore', 'twisted.python') print_py_pkg_ver('TwistedWeb', 'twisted.web') print_py_pkg_ver('TwistedConch', 'twisted.conch') print_py_pkg_ver('zfec') print_py_pkg_ver('zope.interface') tahoe_lafs-1.20.0/misc/build_helpers/test-git-ignore.py0000644000000000000000000000034013615410400017763 0ustar00#!/usr/bin/env python import sys from subprocess import Popen, PIPE cmd = ["git", "status", "--porcelain"] p = Popen(cmd, stdout=PIPE) output = p.communicate()[0] print(output) if output == "": sys.exit(0) sys.exit(1) tahoe_lafs-1.20.0/misc/build_helpers/test-osx-pkg.py0000644000000000000000000001137713615410400017323 0ustar00# This script treats the OS X pkg as an xar archive and uncompresses it to # the filesystem. The xar file contains a file called Payload, which is a # gziped cpio archive of the filesystem. It then cd's into the file system # and executes '$appname --version-and-path' and checks whether the output # of that command is right. # If all of the paths listed therein are loaded from within the current PWD # then it exits with code 0. # If anything goes wrong then it exits with non-zero (failure). This is to # check that the Mac OS '.pkg' package that gets built is correctly loading # all of its packages from inside the image. # Here is an example output from --version-and-path: # allmydata-tahoe: 1.10.0.post185.dev0 [2249-deps-and-osx-packaging-1: 76ac53846042d9a4095995be92af66cdc09d5ad0-dirty] (/Applications/tahoe.app/src) # foolscap: 0.7.0 (/Applications/tahoe.app/support/lib/python2.7/site-packages/foolscap-0.7.0-py2.7.egg) # zfec: 1.4.24 (/Applications/tahoe.app/support/lib/python2.7/site-packages/zfec-1.4.24-py2.7-macosx-10.9-intel.egg) # Twisted: 13.0.0 (/Applications/tahoe.app/support/lib/python2.7/site-packages/Twisted-13.0.0-py2.7-macosx-10.9-intel.egg) # Nevow: 0.11.1 (/Applications/tahoe.app/support/lib/python2.7/site-packages/Nevow-0.11.1-py2.7.egg) # zope.interface: unknown (/System/Library/Frameworks/Python.framework/Versions/2.7/Extras/lib/python/zope) # python: 2.7.5 (/usr/bin/python) # platform: Darwin-13.4.0-x86_64-i386-64bit (None) # pyOpenSSL: 0.13 (/System/Library/Frameworks/Python.framework/Versions/2.7/Extras/lib/python) # pyasn1: 0.1.7 (/Applications/tahoe.app/support/lib/python2.7/site-packages/pyasn1-0.1.7-py2.7.egg) # mock: 1.0.1 (/Applications/tahoe.app/support/lib/python2.7/site-packages) # setuptools: 0.6c16dev6 (/Applications/tahoe.app/support/lib/python2.7/site-packages/setuptools-0.6c16dev6.egg) # service-identity: 14.0.0 (/Applications/tahoe.app/support/lib/python2.7/site-packages/service_identity-14.0.0-py2.7.egg) # characteristic: 14.1.0 (/Applications/tahoe.app/support/lib/python2.7/site-packages) # pyasn1-modules: 0.0.5 (/Applications/tahoe.app/support/lib/python2.7/site-packages/pyasn1_modules-0.0.5-py2.7.egg) import os, re, shutil, subprocess, sys, tempfile def test_osx_pkg(pkgfile): """ Return on success, raise exception on failure. """ tmpdir = tempfile.mkdtemp(dir='/tmp') # xar -C /tmp/tmpdir -xf PKGNAME cmd = ['xar', '-C', tmpdir, '-xf', pkgfile] extractit = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) rc = extractit.wait() if rc != 0: raise Exception("FAIL: xar returned non-zero exit code: %r from command: %r" % (rc, cmd,)) stderrtxt = extractit.stderr.read() if stderrtxt: raise Exception("FAIL: xar said something on stderr: %r" % (stderrtxt,)) # cd /tmp/tmpXXX/tahoe-lafs.pkg os.chdir(tmpdir + '/tahoe-lafs.pkg') # cat Payload | gunzip -dc | cpio -i cat_process = subprocess.Popen(['cat', 'Payload'], stdout=subprocess.PIPE) gunzip_process = subprocess.Popen(['gunzip', '-dc'], stdin=cat_process.stdout, stdout=subprocess.PIPE) cpio_process = subprocess.Popen(['cpio', '-i', '--verbose'], stdin=gunzip_process.stdout, stdout=subprocess.PIPE) cpio_process.communicate() try: basedir = os.getcwd() cmd = ['bin/tahoe', '--version-and-path'] callit = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) rc = callit.wait() if rc != 0: print( "{} failed.\n" "stdout: {}\n" "stderr: {}\n".format( cmd, callit.stdout.read(), callit.stderr.read(), ), ) raise Exception("FAIL: '%s' returned non-zero exit code: %r" % (" ".join(cmd), rc)) stdouttxt = callit.stdout.read() PKG_VER_PATH_RE=re.compile("^(\S+): ([^\(]+)\((.+?)\)$", re.UNICODE) for mo in PKG_VER_PATH_RE.finditer(stdouttxt): if not mo.group(3).startswith(basedir): # the following packages are provided by the OS X default installation itself if not mo.group(1) in ['zope.interface', 'python', 'platform', 'pyOpenSSL']: raise Exception("FAIL: found package not loaded from basedir (%s); package was: %s" % (basedir, mo.groups(),)) # success! finally: shutil.rmtree(tmpdir) if __name__ == '__main__': pkgs = [fn for fn in os.listdir(".") if fn.endswith("-osx.pkg")] if len(pkgs) != 1: print("ERR: unable to find a single .pkg file:", pkgs) sys.exit(1) print("Testing %s ..." % pkgs[0]) test_osx_pkg(pkgs[0]) print("Looks OK!") tahoe_lafs-1.20.0/misc/build_helpers/update-version.py0000644000000000000000000000416613615410400017721 0ustar00# # this updates the (tagged) version of the software # # Any "options" are hard-coded in here (e.g. the GnuPG key to use) # author = "meejah " import sys import time from datetime import datetime from packaging.version import Version from dulwich.repo import Repo from dulwich.porcelain import ( tag_list, tag_create, status, ) from twisted.internet.task import ( react, ) from twisted.internet.defer import ( ensureDeferred, ) def existing_tags(git): versions = sorted( Version(v.decode("utf8").lstrip("tahoe-lafs-")) for v in tag_list(git) if v.startswith(b"tahoe-lafs-") ) return versions def create_new_version(git): versions = existing_tags(git) biggest = versions[-1] return Version( "{}.{}.{}".format( biggest.major, biggest.minor + 1, 0, ) ) async def main(reactor): git = Repo(".") st = status(git) if any(st.staged.values()) or st.unstaged: print("unclean checkout; aborting") raise SystemExit(1) v = create_new_version(git) if "--no-tag" in sys.argv: print(v) return print("Existing tags: {}".format("\n".join(str(x) for x in existing_tags(git)))) print("New tag will be {}".format(v)) # the "tag time" is seconds from the epoch .. we quantize these to # the start of the day in question, in UTC. now = datetime.now() s = now.utctimetuple() ts = int( time.mktime( time.struct_time((s.tm_year, s.tm_mon, s.tm_mday, 0, 0, 0, 0, s.tm_yday, 0)) ) ) tag_create( repo=git, tag="tahoe-lafs-{}".format(str(v)).encode("utf8"), author=author.encode("utf8"), message="Release {}".format(v).encode("utf8"), annotated=True, objectish=b"HEAD", sign=author.encode("utf8"), tag_time=ts, tag_timezone=0, ) print("Tag created locally, it is not pushed") print("To push it run something like:") print(" git push origin {}".format(v)) if __name__ == "__main__": react(lambda r: ensureDeferred(main(r))) tahoe_lafs-1.20.0/misc/build_helpers/icons/logo.ico0000644000000000000000000112336613615410400017156 0ustar00 hf   V00 %@@ (BD ( Ά(   0++0m}dU$ptpr GpEp|T6\ kHe Z S .???/??(0 LLmXXs3}3s=jM>^/& =Zj,BF ^}Ik ;)64A?( @  2 iiii.H7.Hqzzq!!== LzS77SzLII^yy^Q:kk:VV oo ?OOGGGgp`?`? ? ? ? (0` $O9 X;m R Rm ;bg%Vnz`H*2``Qo`mBZ`<C`:d`E`$O`DZ`od*n)`Sas6`"Q`y`c zE9+a V2a :,:#R6n"UDL*Y= w2C7F`'##0 08<>880008<8(@ @vv3ff3WWWW33ffff33WWWW$ 3pp3 $JJHH&&mccmQ@ @Q''IIvv u$$u7TT7#JJ#<tt<paapVhhVEEmmff]DD]8oyC44Cyo8ZZccccZZ4444??????#!11888?8?0?8???????3????><<  >>>(  XrrX*zz***uu,,MMxxxxMM,,uu****zzXXrrrrXXzz****uu,,MMxxxxMM,,uuQXQX***zz*hXrrXhhh+ hh +hh[hh[O hh Ohh6hh6shhsshhs6hh6hh NhhN [hh[hh*hh*hhghhgBhhBhhBhhBfhhfhh*hh*hhZhhZNhhNhh,H5hh5H,[$rhhr$[f_rhhr_fqhhq|hh|QhhQ,hh,MhhM&hh&GhhGrhhrhhhhhh5hh5[hh[hh hh hh %hh% +FhhF+6qhhq6AhhALhhLVhhVa4hh4alZhhZlwhhwhhhh)%hh%)7v&EhhE&v7G7*hh*7GZ?hh?ZE+hh+Ehh3hh3V1hh1VQhhQp?hh?pG#hh#G(hh(hh[hh[hhphhp-hh-zhhz hh NhhNnhhn 7 7Y(kk(Ys@@sYYrkkrrllrYYs@@sY(ll(Y7  7nmmnNN zmmz--ppnn[[(nn(G##Gp??pQooQV11V33ooE+ ;; +EZ? UU ?ZG7gg7G7v&&v7) }} )SS}}}}SS }}gg UU #### UU gg}} SS}}}}SS }} gg UU ## ???????????????????????????????tahoe_lafs-1.20.0/misc/build_helpers/icons/logo.svg0000644000000000000000000001016713615410400017174 0ustar00 image/svg+xml Tahoe-LAFS logo Tahoe-LAFS logo A proposed logo for the Tahoe-LAFS Project. Tahoe-LAFS Logo by Kevin Reid is licensed under a Creative Commons Attribution 3.0 Unported License . tahoe_lafs-1.20.0/misc/build_helpers/icons/make-osx-icon.sh0000755000000000000000000000164013615410400020520 0ustar00#! /bin/bash # Based on and # . # converts the passed-in svgs to icns format if [[ "$#" -eq 0 ]]; then echo "Usage: $0 svg1 [svg2 [...]]" exit 0 fi temp="$(pwd)/temp" declare -a res=(16 32 64 128 256 512 1024) for f in "$*"; do name="`basename -s .svg "$f"`" iconset="$temp/${name}.iconset" mkdir -p "$iconset" for r in "${res[@]}"; do inkscape -z -e "$iconset/icon_${r}x${r}.png" -w "$r" -h "$r" "$f" done ln "$iconset/icon_32x32.png" "$iconset/icon_16x16@2x.png" mv "$iconset/icon_64x64.png" "$iconset/icon_32x32@2x.png" ln "$iconset/icon_256x256.png" "$iconset/icon_128x128@2x.png" ln "$iconset/icon_512x512.png" "$iconset/icon_256x256@2x.png" mv "$iconset/icon_1024x1024.png" "$iconset/icon_512x512@2x.png" iconutil -c icns -o "${name}.icns" "$iconset" done rm -rf "$temp" tahoe_lafs-1.20.0/misc/build_helpers/icons/make-windows-icon.sh0000755000000000000000000000114113615410400021375 0ustar00#! /bin/bash # Based on # converts the passed-in svgs to ico format if [[ "$#" -eq 0 ]]; then echo "Usage: $0 svg1 [svg2 [...]]" exit 0 fi temp="$(mktemp -d)" declare -a res=(16 24 32 48 64 256) for f in "$*"; do name="`basename -s .svg "$f"`" iconset="$temp/${name}.iconset" mkdir -p "$iconset" for r in "${res[@]}"; do inkscape -z -e "$iconset/${name}${r}.png" -w "$r" -h "$r" "$f" done resm=( "${res[@]/#/$iconset/${name}}" ) resm=( "${resm[@]/%/.png}" ) convert "${resm[@]}" "${f%%.*}.ico" done rm -rf "$temp" tahoe_lafs-1.20.0/misc/build_helpers/osx/Distribution.xml0000644000000000000000000000324413615410400020410 0ustar00 Tahoe-LAFS tahoe-lafs.pkg tahoe_lafs-1.20.0/misc/build_helpers/osx/Contents/Info.plist0000644000000000000000000000241213615410400020750 0ustar00 CFBundleAllowMixedLocalizations CFBundleDevelopmentRegion English CFBundleExecutable applet CFBundleIconFile applet CFBundleIdentifier com.apple.ScriptEditor.id.tahoe CFBundleInfoDictionaryVersion 6.0 CFBundleName tahoe CFBundlePackageType APPL CFBundleShortVersionString 1.0 CFBundleSignature aplt LSMinimumSystemVersionByArchitecture x86_64 10.6 LSRequiresCarbon WindowState dividerCollapsed eventLogLevel -1 name ScriptWindowState positionOfDivider 333 savedFrame 7 281 602 597 0 0 1440 878 selectedTabView result tahoe_lafs-1.20.0/misc/build_helpers/osx/Contents/PkgInfo0000644000000000000000000000001013615410400020250 0ustar00APPLaplttahoe_lafs-1.20.0/misc/build_helpers/osx/Contents/MacOS/applet0000755000000000000000000006070413615410400021165 0ustar00! @! H H__PAGEZERO__TEXT__text__TEXT\\__stubs__TEXT__stub_helper__TEXT.__unwind_info__TEXT.P.__eh_frame__TEXT__DATA__program_vars__DATA(__nl_symbol_ptr__DATA((__la_symbol_ptr__DATA88__common__DATAP H__LINKEDIT  "0   Hh h X!p P8! /usr/lib/dyldZ=ῐAK$  *\ h;/System/Library/Frameworks/CoreServices.framework/Versions/A/CoreServices 8/usr/lib/libSystem.B.dylib& ) jHHH}HuHHHH9uHaUHtlpatpcsGHtH1]UHHEEfEEH}H}H]%F%H%JL)AS%hhh9\444 <zRx $$AC zRx $0AC PX`h$"UBS@dyld_stub_binderQr(r8@_CallComponentDispatchr@@_OpenDefaultComponentrH@_exit__mh_execute_header<$\<BEa/EK@__mh_execute_header_CallComponentDispatch_OpenDefaultComponent_exitdyld_stub_binderradr://5614542,8__PAGEZERO__TEXT__text__TEXTm __symbol_stub__TEXT__stub_helper__TEXT8:8__cstring__TEXTrDr__unwind_info__TEXTHH__DATA __dyld__DATA __nl_symbol_ptr__DATA __la_symbol_ptr__DATA __common__DATA( 8__LINKEDIT0   P  /usr/lib/dyldLD>dUC$  P d;/System/Library/Frameworks/CoreServices.framework/Versions/A/CoreServices 4/usr/lib/libSystem.B.dylib 4 /usr/lib/libgcc_s.1.dylib& ) j]$ML$ˉ\$USWVE( } =, u50 rtȍHAQt/u 4 ;[u tС tED$$s]UED$$GEt$ \$ t$|$E$$h% % UD$tpcs$tlpaDt$1]UEEfEEEE$]Ð% % % % %$ h nh bh Vh Jh$ >__dyld_make_delayed_module_initializer_calls__dyld_mod_term_funcs 444 8DP\h) -<BEa (  ,  4 4 0 =Tj  _NXArgc_NXArgv___progname__mh_execute_header_environ_CallComponentDispatch_OpenDefaultComponent___keymgr_dwarf2_register_sections__cthread_init_routine_atexit_errno_exit_mach_init_routineradr://5614542tahoe_lafs-1.20.0/misc/build_helpers/osx/Contents/Resources/applet.icns0000644000000000000000000031151213615410400023121 0ustar00icnsJic07 PNG  IHDR>asRGB pHYs   XIDATx\{WC%Jׅaj0He\]IRq0A 0C S.b̸Tn5n#PɝK4߲s9gߙ;{gϞ=(@d 2 D"cSz_wErqy_:C@ l\yo](0p&9럡]茞O\=A]Q_3Opiw!:Kc pP'+yS]o? Tݬ,7F烄2d+4ڭņKgb _|>D)Cn~wH/*™;wp3Gbc3 V,i}# H,ayǵ(@Q Tٸ N0= ʶp3g_g3 Fu"[?uaHnQ~ꒃZJ_#@{:F:2&TDuݢއJvDou&f䑋4UdK氽J"l3i l^蒕V }SAz 5vOV3)#>TNZ T%)ǻm@Jmhd*]<̈́ʉ/Ewr(Uռmx%It#!}ur32' 9rV?UjCE\d#o zo5\>8iK"z!@Co'׸-M0: 3,Л''}`"H qvi/r#@sk>/˖jeyw݅J_ɳ%:Yaڰ]u_` dH]oc=-4̺S^г6#YW*5`"T=.49Y*.3#G]~ "!hIBh1| !".@rT|m\# "1邁hõ{sU  uvKbi+\L ?B4>]aЌnH_,i(]!!]*0[ܭ2*:̖}toPbVqf.3uJT2g^,r5騗p  5^,z[8:q+?SZa6EษlzK8þpc|T)>xN: UZg3kMzU:OɐP[;% _ǪQ̻o}#3A'OMB a 6(J T UAW=7_ͼJC Eeaٶ~(obBP`Zrp()]VC  ؠlB(rWxռJR'.!SY ?l\\}tW@> i]Q nvtю8?`rpr78"}2 Hs>sp` cÐJ|9mj觮8u~ 3'N\;T{pt`PtDXA %p.17p pW 6sRv^ H D"@d 2 D"0xq'[IENDB`ic08PNG  IHDR\rfsRGB pHYs  @IDATx %Uy ;!* 5aТE .1 ٌ*"eeLb5)ad4р"aYaGx׼Mw>{^קw>}` " " " " " " " " " " " " " " " " " " ".i.-XyQV9l]:>jO;d7Ȗ9 v.JȽl[O0_ _C@x? ĴᏦy^ "ua >hõ?u N B}zȖ*+q uBذZ E >»q7lY?n<7mMoή4x$tըcGX!*ҶQ>H[D@,:ݘooe%ࣾ1T:="̾ 3P az6(șgumӝsw0%921UG2k AE WqWWgZx[Lp|5B}!`1YKY%d[^ry$%"#UWESr@C\#F?j0t Ŭi1X⺄ \ -%cj:4J-i-[$Wej)63 a_gVH@#5~&+)$ [c[uQkp:ZBPx#P! u7TϾ5,ް1`XM1`J::asя;:7nC) R4leU"1><3"К@}ǿ?k8!K'Xdz\d'_hQܽglKïy2 B~(~d ddp@J_)Hx2ڸb6k?{oX~iwW\>@LV1@bm]W1-"\Uۖ#nӄo] BW"0Lw9Ԏe7l~%@Bw鷪[W@>" 1O:TVwA8Uw[l!+CB^1#Y"1Y9ρ)Zt h~![CX/p!?.GJ %@·wBÝy,uvy bb?;@X "PKgͣ TlB`f"# @n5*9r8dW*~&A'@t Kg!CL*YӴ< ֆYwM s_+ i8PJo|x!ĤRuI*t.̀ A'B)^~&,04Istn QȘ^qIE2MK*Pz~A%Ȳ30wMS>H6g_gwCL |58ӿxWe!K_ ݱlO^z+_^mye>й|Ho?gaB |Ult6 ~Vu{4lT!'I=*-Ys0Rtw'GO18+ζmwO=6.hݐ"ϔCF`JJnlzg2~K_ֵkݾ> 7)?h {)$|+SȐk6+`ߵÓYp w7CJq69Dn^t~aZYcR 婿KZ/+HD;]]/FF ȸ2/״ ].b_ЋpK%v]W+Zܓt_j{W@Rܙr!/aFˀ66:R¯\:RQ-m-Ҵe&/~i-84Q=r'幆VTrW5YKL>χ SvL(/LȅLzne/u-Q5\g#5r/Eg1/Ykϩ&-)d\^،ZSR)\gkNcqIΆ¦h9;NIgtNK|MNVGlۉK@zͭ2ZW:!/S*3;Ŏkdy#3 ]Pi׳Suc)$?˰}H_Xn|~{zxyݐ2{?|;`/>G>\D`pSB#2OQx88@<#|G{zLĥ)3sHӃB/NaH7>@u޵J7&˴Yc/ 뒡I7l~Tiyyr!;w0`hiB>x9G=&VZNy';.[LWA2$nkm d#:V)?.D!c+ &KZy8SnuK@g@@Ȣ C4AxJ4-5?4gr܃' "p6 &*-PHȏgɀjG QZ+!ٵItدCvpV搐!LȆvybžs*2Ux^ Cx&f]7Ga<Bxx, {;3!dKe˄es9yDahe \'8u g*>.pDo wQܦG|Ĩk'Z?nٳ yW!N(V +.h;BB΁(Y!q%tR9 0"ؿE% x yC0IŬA2f_mlP`hJ{ ' ὆f3[?mP`d|Fr' glaGٌD_SL /@ۛgL[ҳ!|7 !u-$/ B6Y1i#26}WU%L6dzxYGxB6PŦpnK@@[bǂ 쫌&B4߂::,+GCfI(1c3&P0&K0cxiJ=.4a3.PPSTJA.'\)6ڄK[{:EܴӅ9=0<ͷM4KǔPP67R.̺6<ͷ 6lcF_ J N;e2!80kMԦ 5QAVC-rtita2.OŏP0$п]*{ ӅC݅ɸ<?B@@v1O7wL!`irtY7wn&V$uqTӳ]lӅLGޔɸ6*{,ӅNGq:WGQc.f86ӑ}:8~/2KKuȺlgR!B$TnU}RH`S?t(.~ˆ Kp6taVGǫ t/Fc~x.L:;:8ʿKes]ag#0&$({74Qa"8".ݜRAT8r/E 2&m: -dQB^yƕx37ΆT#""h-g [@fBx>Cd+=&V^HYZg2y,_cI^K2 2Q@)\vv+eg * ΪɏVוQ^SM2E$&Mwy]~-E\B:oӪwQ#EH"!񰊸? |;,\wEh37xVMbc(iBUi*#z fanq>we+NQ:H O㸺u6|- mOP0"_uC\*^FPDT*_cAIդ%#9uYr 9Ƙ!xM2+QuyՊe"<W2/jkDƼ WE8gUqU# {SRWQGU\䐚 lI(&.>eO";e)j Pa44EwAͱu7juurI =6R(;ӛ C+ګ-"Y?kPpnO#  B04aaM#klE-y-%GD%HW녀F^0;UFC di F@@0V=gwh*@~Z-F܅KEC觋:{ {解V"PF8]n|D@#O=h(_yCOZLhlLg*R/٘6G2`g! Ab%0wc{d6,Іe'"b}}z0nOYya0X ac{X~? $+@΅ɍ|]g1ܘB3 " |i083p&_)\}|:R,9@qzw)>khl@ "YF t+Ȗ,"H, Ԏ`FP9"nEg 6 2A y%_tyЈAP/:b{2} E@.owk?h(#;4|utрRz,B3 np3pf;T@h^Z4~N?>^6_!uPMS;, NL>٫KHKg`?7~ ҥ@" &Z㠾7~reΗCADpcHwڿ ?3o1sI]pz8>syc,jn ys{jJѕF]ɹKE5s P~k|^yaH kr17RɀxB>86OL\|/PC@M{6 p +dn@3kv.49ν&`QաB@@jns] +(~ù͛1s33ҿb S,Rou;Kb:x[eg +jB b& uJZ'R#1PO<)-  U)Pb1/uNʜ* j:O1rw.S@˩3{U)Hb1/uN|Q}uv/ Hsxν>;oiɠA/FjM>o C~Kʽ)%@䅆ZּeH *}mx{t `@PG! z^'G6ӎ(:mGm쑱]PMA=**ŪUQ~_22"3_s/3ƽq_df@ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @@&V܀ ɖ  @ u$?t=o6@2_bP @h@`}}NIf  @5lcHXqv'Ol&@ 0l[ d~{O"  ?n XAٓ @D` ɹPolzϒ0 P,CIlth{lË:C@f =WmaIpM  =[gj ,Wb죎 P4[vI~vE  l H+{8@LKG3xH,5 F?1u دْwMz,a_VW[@ȅ]P:^.pO@.@!n{NxS=<P܄ :yrIS ɭ%@@֐e,?b`b$zFa2& vDu嚽TMF`U20P^=ORS7H4Jly1xB #.bdzuTᘺ8`Hg6P7J{֒ݙ@3q'}ח ԽX0_L(}ʅ=` @+@?[?WW"Obң^H -uHMӃ \Z0.|\B`+iGHuVj/ 'Y!NRw:'`4yV1br*[ 0Ppq=ҺS(mJ*)) R6ҹ8^TB 9LU&2 # Ӧz`p2&b,z#Zrc0%pL<^Ln̅ "1Nhp tp XaiW1k31 1F:' @! lG7$[qZ*MR"'MI'MtMB`ud/o]d'ɎyNj\$f%U{DMv )KN|Db>^H@a Ц{m:@G.wHn׌ubY}aU2zIn,w@ /,o Vj|*k&sO{\@x_K$5_/p>oU@/T't>*)qU?I2>&csQ1O|mQԊMO/^sw{=5|=o>`dC[gNXNvyzMO^RYS^!?sc\RVBN %a:֤UI2_$ֳ "|@^V'p>óWKgՙN*R I* D=fB[KNO|˹:\bPg;,RJ~/p'rddRR/R pV.{iInLW)Ɯ{^GkǮ)1G Z WU1GsR|1!0&MMX b_})׏DkC,l'Ce%0Q0E @$d< y〡+\]])x\?yeYGW˝l@u1Sj TWvWF5Q `x0N4t6h?]췁'#@&Q$`nsqCm>@}i{QYVŶ uk-[o] }" =_q2@,+_O`ؚDO& =Դ=h95f[Y=mQe]b1`$>e>vG`Mjkr@}i2}"g}? "YII{OۗdYȧ;kM8;U^x]vto{JlYjJ:8WE+Hu=@EڈY.O?Hpl1h*PLlH1k{rm0|bcc-$F_BфbbCXAN6 qo#fć @`jrU{|B5Ԁ"V%p瀞njrU bIH" 3Ob*BǬ)qB8_1,H  X;QN7j4MЗt>Y$"$9x0}ߏmg//A\y4wdQ@c\ } $>sT׏h!'ȕq'{OBZ]Ҧ,]|pD@ x4GUUlƐBlbecM$FQئ+WB^`P(I" @D`ʥt"< :Tr)P٠zUŦSxbÒP.R +t@/HY?qX)~ieZ5L6yf,w< u'<9 zlRޝ:5[Kt71=dABN-qdNo%5]"냿v/V'S1ul1K%D2xKֲ+dJP%)ؐD\ yЃ0Sj*/[`{~U#{{<Ÿt-sIXC|K~yM)_( (MDGUJ.ԟ<~EBsR$5F>mN!n,gv4 p?b?:"@{ 'UQ5PvNDF x4f6T'~d86+vHK17{K5{q#R%K( 0(Mbߨ.@Ym.dsƓ !0=.RhJS\i7m9Z@KlgS$H" D@3>P7u50LS;:QW3x~4 @[oО;gjK%JB=ÿnZ@`(:P4DKmM=$vҎ;n#ku,uX䏒I>:b-|b 5<6ȔVn_HXdҲ:{e(v _I.ج9QeIod}ɺb'F!ɲi{?K[DA 0`D&9'ϓlұ4-k1GCǕI8@H:Od>OگC$p_wI8=@gH:COs2>>"1kgjPG=ִ/'c?$0_ǿf}z@x$B`Uy}dΉ&fpH±Es:\~ϒ\p:ќR[vnq3ux%@/!z2gD3~@p$@^=\>8E}8!}aT@s$!}>~}Xr^.8(Oy`}ƃ;wv̇_bĤN⅏>3I;`HEqB|moIX.8Yw}I!@+J#d|g:@e~'g=w$ޑ0AO,W'$31J+N%J ,GgpYwCIx#@ %&ܧń傻Nbq>3I/H`DIBY.dIg&ic (I@ȓy\ډ3JNk>3IH#DAzʀ~rIl ZJX>g~@H{H%hc,Qu:1˨=~Yc Cu_6b\.8]BoW$OKɇ<h㚮c`OSs>իF_YQ6@7Dї*N,쿇^wmqv4GyRg`'2kp&@hr~:Pe~Yv_g!D 2$Ү,ܬ#W7STd( 0 (:+@W>-- '_-='zUWMW}HFagA:rgK,uSWms%*rU_q4jL`0G˓:OXw5]q fN:`m,;ފ3+1K`,"(@ 7v.M[JHW< u|D _uq2mq0D3R%@j7XN+v0a}m.; X$r  ru<m[L}s.tc~ vRϔfRl_ .2c+T) M LJvн@BH '֓Gܒ<h O Jlo$/euNwK;:FzT[+R1<8iG&q 1Qwzf1u3]3^肀<#J2!Bg.UTg#WK_;HEyAרK.!PzUv)@.Sv"m7Qb\].\ڥѴ 0S4%u';:\αnնqt5FQ  76XAQJ&9ݫR7 %`-g+uQR`?Zn0> X 0kd+USs-:DMr!CfBv@*T(qtJ3g9YaQGbEixTToǺ%TXNګ]_R:ȕ @ů. |I @ݲ*@^Ͷ,ob8n% H\.tz*\%ԁ@=7X7jȹ=tcsT@H҈VG\|G9˹=R]*R =9^\VdR K(@> \ȆC<ڑ*SG'\c @Sc|E.7YwԐ Edp[~:% @i$6[g4dCԱy +xSdrlպ9˭|9䲂1?=7@icz 弴B}Ϝ#p;>usWQ  H/fX5zotC5Wߗyq @0 s.{7uS?c]A$ńG;&pc6X7j6)@Bf;y <.\,.7N^(tC 'M|iϕcG8se .tct0{5ՙP/*osbZ@@tlrھE\ę@LGMΜY0@O@\5P[:ֱ9A&'`IYP1f 5C"GbGqxThToǺ)TXFګ])¥"u P*R#]5lnYSla\kl@hj2@cb1 2 @Aą$ h^cݘ"t4ЕcsT@H#^G|G9֋ڛ[z_sK5M|Wۖ׫,ֱ{K'#, @5 Huzl8ģ];7u4•csT !PjV:ȷ4W-.7Y_;R`,n@ i$>πk3Vs)_R%K(hTGHr}3UH%̗Ř.T- !`]Nwy yA"RD8ѲToǺ1TOFh+3S^Mu;몊l[^ [@Ȏ p.S㷨4ڼ`?:nrLG (l@\;{ҦЁ[Nzpql' @ >(qpos,GnV=U@'jtG^z8mj^RNQ. &@0 [!%[f5UFv0XP* \,].<'%vEE!f&IdGR놬؀+ 2<[@l?Js0㓪Asڦ*V23^&kHZ`~=odZR=rxZ: [l:FR,S$I AҦʝ3K֐J~'yD2%gJq>   @(['t~3sPz-%u|>v}R @X(Gj%J"GڰH6~Ht8_;-j@@ V$l->7a= @U_IF1(ف?!x@.CR9@%hpǃ6C4wԝɌX@"oɏzB@~ǸQQg!@ 0 vO|ABRԯ.׾%Əz@@4!C?]bsL@ h@Jr2-}} h {d g@ @ ^HNc=ϒ|I t@4\K+-9 XɈKu2*4@O(U<@4,+~%a{LWb{?h x"͒%퇉=.= 5 f4(&8kGyQ Ѐ)j6gKU@p$ht* ,Vw  =2C]cSПi^OV,8! 01@a?#oj^X`V/8Q@C h??wWS>>?24` 4%1`?Ƿ6/!r&tJ@--N akKu;l@IHJ6#ߖ6 }[J։e}DG 0a| 4%;Ζ06l_WxhN- @ANT {o|U>Kz *.9쎂~IB1Quēi$tJnbKU! K 0av v#*7K@7޻ I dJD`ga3#53-E@6H %x"'=ɏSs J&@Pr}mmdDA7Ȋ @Vy[xU*R#l>lۋ'0xȊ @V6y$OT$F3lۋ'@(  xMs6WL l_U(BC B@ "0{FA U$FCX3b&O`Vz+Dn0 02lO @ah@@3$Q;?y"ω쀀o$/Jΰ\6e kע!@H03'u2 wMwJ@ .F@m}Jf;Yx@^ ,)3t .>w!ʓ9 y# c,4 v\.@BYnCS -f -"^#_=3G# @DTAExFx0@Lie|B@~,E %D'b5 3>i+w @$D?/ 䇍 V+jvA7.& 2>3kGsn\;Y13ņ]hZR:Q y,ֈxw  @?]d%G W| =+ @ $$')\ @4.08b '.@zY1N 0tp@h7JpI GĖ0[4@c h:@${t?6Š@m@;WwfepnG\ ( XKr(2&  @ ^.kz Wi, @?I&N#Vs0' @ MԎN"04'N3X @A@G`S l< @`IIiG @5hOKjP@@~4XRŒ _< 48.9U$/: @ o,7TI6;Zx@@@S% $Ğ \-?(9 TB5.8lcшkF @ @׈?>•п'Itbh"@ @% }8beoE^0& @ Cc ~o|0A+ @ @@_1QMkwW~5۷  @ %ώ8‡ }vz#L @ @`[< >mü> @'pL2S8n_O @ȫLĹ>ik @t(mQ"|Gu+ @] <y2űnH,E @ @ E.D,kͻ:R!@ @@Wj}CW/xdW0 @ @D> R @aJ&@S/8 &'^?Aۚ$@ tJ'@@Go8]"{DtdA=街H3#["n6{- Fw9h:jU*-ppV}%Ut$CJJJ. ЮvVe pqpC(BN " @@Cd"@ jxrEZ[G9j+8T!Ut$  @`k_tp9GD9xK @n u '@@FqX*vG ?)|2EV uby?WqrkU(8Z!@@w]+^TznUh+(⥭(G@9}!XcQO],'FkyFmUc мk4 $@@wGE^w+%D1wdKEp iNמz]-m|FϏ6  ЬSZ @KݢQԗ#bqj"@0  @`bqao&FzOҺF  @)5՝!@@y^,>&+S.``tr  @OegZ$IP ȁp @'{س熁6,(yMXJEb"(H7"{T !!n Ї#gU @U;Da0jklj @`^W| PER>3rB[\!ptܸpTf$@F?q'P@'nqYIIɅp@"+!⑾n߻o @2L-p h@v PSJ @Vwe'  =Ce (?dCW>^C pSpȟ8{OyiɈ@>Xlv#@I0)  @` _ O1 @-%  @#[4 H5$*G_kS|V"08.~[ QF  @$G\T$SQgIc 8`Lmm @"o KyL `"@ 8`? @@!G>ҨN3u  0፵@ ox6`KW& @@>+rI5%+WG5q /pHks Imzܟ б#:| @P|/sUAwWD  @`0kg{rA{^#000 @\_k.3 زr^G 4ԙJ!@@^X$I +``X_K'@>3l"@`I.:/#@ 4ԙJ!@@8wA`o Iʑ00 @|37XPkA0 @U PR1*>^E$@ NS` ,en4+`ٮUXuK@'Y @`pkxߜ޻&@V ڳ"@@}/eB{n$Mh%pPL#pqt < ж#_ @&#ُԔ\ T pv%E!`` em @wF 0\Lf"@@gU @Ւ< T"O)MAȚ @4f$@`yCN\V&*VՈ+WJjqQIʍpxZ"@.YyAys@襧Iz\O_ɴl沈Gv]$@]a=M|K9@7]PT#pHd$Z@5 @`E @ C2F>Wi%@|XF˼kwW=w @>V(Q{EN5 x[r%@H0f @`a+n𫼀1 @,o(EE$" QY%@ XXd^@r+l)-YbOG>"Х9Q#zE @\L 0Ѷ MBO"K*I @`:GLge_1ݜȋ{]UXXX yL h5I5*ῆ#&00qh4#7o @`i,M 0;;LЮ& *G1"O0 @#'  @0gtL-kS  P#*(i @׊ώ)|, nM @`< @K"oD<|A`JyMu 8-s@@-`XQwltXP &428'!g" h:/h"@s 8`n*3 @@A9 I*u9f"@s H ~|B`L'Gc>)-4"F:RTQtZxy>KW5*``UA'@)"hIhHsHip @ct&:Y vr, -&~L )X#:d#Mm 8UzR=Ʈ~*U@X]Z!בHE*7uF] M&nU_իף"Wu @Xߢ,L*pNM WXkֻHK#@^kϫm m6,OU ::[ 0`-&IuZ!ӵ o(O'2"@>/&ɻAhK5O @o8 9zyK p @$=XZx?Fv%x!M=".`VefP}%_<J&T&ut  @`!܏tW>6‡Hn\V(8;*zt+N$[Cr$@zPoɜ#Όx/1'r|d#UF#0g= E6=b3f(1hF@3]C1["n1Ǽf!0ǣE|n,Z @'/DȫL)pN4m @3ur  @`! 0@NԾf  @Sv  йs/pQ4{Mk @ 7#. FX.6ÿL"p$j!Hc9¸eIY\=.V T @jxt$f;@~r$@ @@yQ#hu&H @I{#ٯG`@^>5J @;J:>ZF_u=qJ @?3ظC*_O @n;b@xg ش @~}<;ցYA @;<4b } O @ @@soaD_ߤXI @ 0@^"a MEdD" @:qm<'@ @-}.@@>|! @ ׋{ϊf .>#L @ @`.bEWgD*p1 ;M &p@,9}q}`DNFkr7{nܿ0K".DD~hGQ3"nOUU*kɮ*pX@^ᨈv|׈W8n9]q~ym}T~nsI  $@yE!"=z_|[v`ӥOGxć#}Ŧ+ǶW^ﴈ?.5׽y#r# <_#r}޿'3&&0Fob4D-/?)uofooHk)@Sշ\j ^'bmF6-~S1om~^DA jL(pL}{DN`~SWϝ7F]D(fS"I&~=⌈VT@G̶7˳h;m{!@|{C<|EO]7T@~xyiD;E}9x_|B!s")֧7]& @}e>""?#b읱o1ȟ3-GՈֳs sVDn[G&"y5ܨ?`yXD_h"@4)K?$"sQM" L |xu{@OoM ʈ?oF P0 ΋~ʝDl%px;zcBS#r[b_A=) @`%{ƫa'>< b`O /=Wl+a_;߾ˊLXFxs"`;u>#zާݏ=}y<DK1W^XȎ`;y(Oy4W(yx^b^"OSL @mg^a]mO NǷx_6-Ίg @^煄jّ}W}?5iv}4V}uA[ٺ]D??:DjN}myԇYE.bnx~'@㮉M;D Ὥ׻`Y!|(N*%@fO;w'pŇkVM l>8 ޯi>w۷.) & @Fdyx}GvĭNhwawvb)꫌s?1-@ݼUY; .ߓ;~v~>66WU @>bqٸᡏ\?w4Ӽlgn?^D4"y;/bٸ\˩sav:N|ϞYߺe:~*0L @v%yF[5#Lu tߝPwg^}^ĬOݲXd d"@*C~'bku56Y; `Zw96b֟nY,<+֣n @J<(bum>]_ s]-9b֏nY ֧H +CC<#P@.RhGQJ ?)fK ȼ ,֬ (1 r"aJ&@gE: [>Qռ#$:v@]{"fźׁ렺%@}WE?cy8;ֳa*_`g453<2"ߋ~sbuх& p @ 9qrB_2/%QNh"0DgD&ҖOȁXP#vu_bYXY?; `bƼ(笿ܲkX  0բ??޼!ֿ'Z52i;N!"fsb} 8`"x(DD'4/Pyʡ<62Xu6QEOܥM~1nxv_Y: uY+d4"2"@ 3J0bE }UW*%u^EdB*%08!qn "ǃ ɥ4q:{?BY5 \#<~˴(p9B!ãEܲҖQ]#VYUU_|2&+fxQyi[Go8tJ`Ww8g9@ZF !P@4‡*$?UfF /Ȉ$׉V$SV1ߟDL2'*ͶGG']v,plGMu.k:[GܦeݙY%@ K"frˢu)+,|+b"ֵ~5׵.XW-Z> ]h_Q"n~*lL#X]%WÚei˴!g "Y!"b"@`  k AF_HHc!/pBl+qNiʎ@ӨdI`_|G< ]V{?cU <3j?nZ -LF,,'Vv 'p]&<< =b3?@~2b?mGqxtϖWN8(t4 @A/rῠ(y ^F}xX3Ӽii╧-j/$P5#XQ^N{E- )[hhcMqHmm?ܬB`N|yQ@+8`14TXnvc7\^Ez7Ey1QK 8`I8/#P# iL"IZѱ9 `&K|_+(픣 \%Gf9*xJ~xOOTޫO'ЀSG17БJF4Z%BYQm_ _QjzT4ld1Sb.shRQUi"@` gKy B1Br Ν:Jsh0qCk]:1'#鲻9p->Y}" ]ц#@kEO&-r'͇Շ@^ Oת\# jqF8+SL\){K|Ƚ-r?MSkEx숛oG t+WQ#^pp^F`Bk"@`cχSkE;?𿵍G-&P= ,nH{w^~y4%z_R.;ML״ -G< %G@)R"!ܧ#aO]#lS~2bg9 `&wmG s"nq -|N"P#K y*pRW)(0'~أ8,nIdu J/ )(]q'9Q~%(Dݳ__ }T<]չ7J+gdߒO iWZ%?i3+nԴ$Hy*] iαo*Yƞ=+-#0ͼRZ@ݯJy>\I$F2GЄp} 9х7F4FC И#I9.``H`%hK@ !%>V58+iI(Z*H-I`X,@WڭboSE=4+6R2 Wa_Z$} w{ĽC>th}M6 !l:o'u*B;JK!Ц5mk5OOW&mO.gU褣YV #Q4?[[z? 5-~`z% *[buCUט:_: -@ xY *;x9@<'Djy:QݫsɈ.3Qn|yׇ:SR4pȈn ({Mνyl>Z mYz@XcaǭsE%-ZZ{k=~Ps_v5_V\U D@B hBo^r}k ,/Z^ٕ@ovչ^^w 8!rnA Jn?n2t#.rݼM7Pz007 *ph棶1m w']~յp} 5);qx>@"-c| t!?.Y ܡœLtەk>i^wK`=Kgl#f)覫Z+WjeggROn-1K TeD`'Qzljiees-`_jݢׄ&Q lS4@^ɳ6V)pգ vK ҂Z0RovR,$0&m]hugkc˧=+-khuv( G@?}zn\O2%P@Nj4Y/%~V뗲n t H*U j:jR>a/6R؍nZ ܠ\J6̓jKz|[?V~>-S r(J@Q! "VXֆOdֻn #~%KJ@Wݭ =y "Z!lֱjmm]ܪ ()D0qԊ TyRoVhu]j~9i[x={j?B $ٕ^$𹝞,ufYm~ kĒ2v9wOػsi%@`Z+v0g L!?)Եً@/&|ޝK,+Ժ_d/%}*. UL`wQmh\ .F/a9P۾%+!``$'P{V=!jXsbkDԸn.3&0}͵X :I] =@;5>p7j\7w3X2 /"`g:@`]"]bA[hs5J뢪p=\;Hz UkP&#c=m)_v}zǫ#pv4q  _K}Z Q5P5M5^omvAŅKY{тZ(B 00eXh)R184ܫ l~ڨ> 6`K0y!ҩZW[j< utj 6; `` x0 @mj;}Zj3tKSQ,]v ^m?h5MQ1r\{r֋_sֺXQZT௢GtZ *p]0S 270ehWTXĘ\۲l_c&yUc6- ԸoTSJyBb @@ 5K) Lm Y>l @`m &H蛥%% O4&0 $:C* !2zuYKWEoxuX}5 ZD_յ!6PI^>I%mv^I6:@`y@9|<@6"@MgDXL31{m?X&k@ \!f`doR w?dejY j@HEg2 pU3A p߸# @`W@oZdHj"/GϟmZ/ ݈H?VT #`N[^@]&햧"gwn|mp2+\tL!ߌ:Dz ym)#]Lzȣ _; ,+``Y9#Pe!  <=ZhD~`xg-/OQ0PnȌ<^?ό!Шm6e,G|~&БvTR ]I-4z$PF*疓L * ڂ([/`>!r$y ??D`'ߊ'? #Ш74Z&``4j Tcڂ(Og#%}y"ar6a(2zJqYI@*4)B -Vȗ% 2 Wsyw[ Q1-F8Ch$z1Y ^0h] mz#0C`xE~gxg-L/Gg4d@`RwFϝ4G }k0D P@ y:D*kt,@Y-R?Ui&0LϟgF 0E"q~- ˓%?gט@OZ}}k0&H!Pّ˯TKcA/Y,@#/:ZA`/~r `` A@_PV_+u XWֽP#0?Gۿ1a&ЬfVa |3lS:_*?+ uR*XZ*m"ЂQ".n5(M@i="x_,꿯oqD`2?Ϙu C Ox~ʒij~eG"OX^v^IgE/!Q9FC0M?xc$P+#߭)aM@m=&_ sOpbn.C SY^F: PGi`& |>W[KP$ҥ^ExJ45eU -_Uf..(=#P^)Ӑ^s\f$3⯿([DzWv#І6Qxj䷢20Щ{B NzȻ+D&F00&";yn G񵭞s tܯ0#Ύ&4EkSvWj /Y`vA{In5 䇫G{D4/GB0HUZȧiMwl)qzt?MlLINi2E,T6]TR~7R j$@/~Ly`l#޶A $- m`W٪Xh?=&&00! $_?>U$vvKTr#h"0+<|v09"? c(P_jr K1D8&C <jr XLb^&к@E녪oVǧ&i]lA{F'M` g2qeI -@CߌZ դcyOElrkycXU /:& 0PPgH@AyMrJym&8'Rה*9+ ;O hUH`YROEtD7XFE2/F87Z_ijGkQC OZ+N=Z0JOpόE?"ogs"Nph=a͋=kf,U *xq\U(SW\ 0@!^6M /&yd=OE#;E9EIZ(xA,;#>ZNz5 P :I w3nWPNR)Gʝ#rD@..X_QB+ I` Z,#~۷^n햨2} 諿UK`hOGxPDb<#wSJXA mB p >)=2yDQx6!ʋGE^;6Ԧ@XO?!:U񪈱% cGv=?z&p@$y;"7'7R~T=#C VL\#>\(Y୑#\PrOɍ.p/%Dom7ȋ<qP}GcWێD|-6|#|1& @`}׉E=5"?`),?qؕ >?©`mn8$DL"΋00A҈S+M49h?A|s"0 @FoByy;:O0+`߾E䠣m7G(m )po|=c 1op~FD @5\,p¼ Wq.&33 c  W7~nܚ @ \#2˟SpɈC#L6 ,r_#a `5b#R @*:NQy"vnӓB@\Lmsb?/{ @z&yިklcxAuc#11w3y0lW؉ϙUEpU8^6=3boDWoC  @3fIv?uSGsX(L Ǯl;+knX^z̋"r[9kFyzVp&ߜ В@b;N9(qLjGcNy}F+-o1 ~\v}c @`a9^yD@NDnw]D@"DKě"00 P#,o;SE;7f%Ex?wF9mF4>|$E@mq?aa}mDn8:"ozk|"boDG ZC$߸dfSN8 +_#bvߗc_邈ŗv#oF0)4xaD~{M iop3#1C.ېGŢl1(QozL Яv;V+rEt63ofv @: SӲ PN;y+IL(h;6gy!``AzvrvRɜm.&@U _` re~GNe?gϳ͙c1f!@e ,5hG`oJyA)[%QXK"clsXY @`˨y wg(9w'L Yλ͙--Q1- ,3>!2H%~;^)BlsL @`;x},3`9U @^]kl7F00" P%Ӆ Zpqf'@B?9&HD%@@Aw0j  MqM.Ypf'@ &y/!˙ϣB6{?d2ۚ MK XU  P/-`b* ЧG,S[@IV)VY%@`9V*ۚ2*O~ @@?9NRNxlW_eZu[J^K!`j@)?=-p#pb5mҰ+`߾W9f0ׁ0Q%@ #DjֱYCA~ *'@L`]  r`﬈u k[S5  0)M2ֹSCː~;_=3'@24$Dž) (@y\5kN оXM`gD\n9{=O`ClkH"  ЮvVeW` s//8~$G@FgD0@6ClkH"  ЮvVeW`oΌ8|DG_=;ԶfR4 @&5\  0;EC}8S`vk&@3;z<'~ZnJ"Вӣ1P 00 @*1vʟ"'U"Y<6Jͫ='@@> @`-c픟ٮ7R\Ĩ9# @>  n}`$~VQ.XJxK"^Ջhmy:0IG+;A;)b@nmZ @@*4) @`cy,@3}Jۚf:K!XuIZbquJd2'P@^ *b[3A$@@ @`,vʟccj@-MTۚ,0P^Ȉc  ^6v^ˆ'h[z86># 4ڼ`v5I lp b:4=3)&∩kS 4vJ%@v72zxo%{[5(h)1'`@)0y4]A ON}1Z(j0P[ɗs(aʳT <6*ϫO=@\] !P1o 0 eY6fܽh+BJ9?08+⨕bD\E)ۘB8Ai LU&p^A Kā1s!- hWDJvC  ͗v*Miۘi{HHD%@@a%N)I:J T`%nc d00 @RwΟS|Y-}(OsP6P.i @`øZ*j(u&``mDj9"hU /BɑIEdxnc+ P;GjQZ~mT fۘW'% Ы^{^_'_4#p\TrFWT6bv @`{ ~CΌ8Tw۵oc]F@_o @`; ㉋{Ǐ6`[uuG ̎J9~V-u S@jl%˷tyi[xψ?b{S6˶eJcm @`.s1]sѣ'uѫ,Q (-Z,#``=BzI?=:\oa]l\mقC(C@  N~VQ%ˡ #ʗDEW۶#@@a  @`B/ WnymK+ ([@#;)t==z+N^O9ub'@@q  @`2wlya@! =iW̞-t4 E@/=N.NӃe}m0Ǻg'U޷-v hQ@&,'Nzˆ*W8.9#‫<o[m PGrU94ό8|Ty(`]Wm[p _(JtWtDZq[ۢV꒙Mr|um0J ,LhVN]mvXH1I\I+-#@&8$M&iSzO0R  `N pgݾ81fz<3v4mJGT0P~ɐc Y_󬈣_"pD<򒈃Lئ'@0ut 0vW}#.0P`g 05H\kqh.Bqkm.%@$&a(m]x*/ h"QIp?۔8Ai Lu&`g}yzmو3ׁ3mS2'008 Pź+^bRqSn26  m`?yOHVK#@ʸn##~"#zzަj&@pGA#@y18>>i-_ubm#rbDOSo۔VT(`N2ag=}SD^n&C / GE%/8mJ}>p @CA+vk09_EXFxQ^ #E?qˈ)- @:aԚ?mJE*ZyXmSGӪ鴶-Y  0ɻ@(No~8e _um/T&`.Fhq4A`ow%K; dzQŝ<:`"P@^T-)q007  Ѝ@;'wӃ M %vDZy};me%IJ  @ j2_V.wy^+N-,[`~8)m<+-տ Ћ+z y @Y^1閛= bxyڼ ٠Ss'NP,{(>;'11W]y[CҎM+9J2y @V(ի|_J%/UU=w5L+WrU[̏,(iV/ @:(7`wœ'O[a=S@*Wwyt~evB ԾҥB[v'Gʫj9sUooB@}+,ʴFy[i  @~r'^9aU_?P>|uߒx[w~%H T Љksj⽻r1#O`<5;. @@G&:  @`p?hPK)dG-?[Kv %+8/ \Y9o77\p <"ѫ\`P,W@c#3&E c ѠB6L  @'5H|V~ 9R |VB @@&:i @`PSk?,Y9p ['*?r9 @NLT l7,kqceoXGAv\;  С zF% P>r1#@Lt>@#@צwT? B`F3qnb~s+``c's@K_Πjk*[ 5(SHG1)ٕv]9p 0A4) @`@wW{;rL/”p@2O+*&(``&e *g-.txצL0 @`$#Z  0mK8.U9pxDR?rW%޻*&*``'m (Pu 8AѠL!  @`&%Zv|n-= -`OOrL ILLV"@N.NV{fMO|i<.S8   0@۠;ٲ[#-~&;@'@  ~FݯrL#pxR=r7&9c G00>65r^{(7}s{Jc @`+[xzhqK{[S4gAB @2L,SS_ PK Z6S1#@50 (p[j~}[AB!b(y9Z|G*P..5SZdwJ oO_V!``$@'j-A߷c &7דSU J@+yq  @`-.~* wS4bnP PS@Mm @{苵m9:__9pxBB0w;=? @` & NdYQ\2 bLY:b  @ *̂ @@r u` (JH溺4];=k&!@0vC <_?PY{n­NyuǞߟG/3$@;0C@ @@*KKk8Jcbe/Yso}D(oLSʩ_xS8L`\ *pf%i NJ 0%S- @ge[x6k'ppB?ro'ٕc G pЕL]1۹ ;fW̵  @&@ LR Z\J>(cb\* PK@-iq @yIIxSM]|󅶼*[k-mD00'  @`}oj b m{kߟbG ۘ+``c's_uo-^q/:X *- @ *̂ @@'' +rX=rL89wvdzE; @`s`YB ZXn-b-ƨžXT(M@o#"XU~O*'VN;rL @`p'@צwT ҧ@Qc4CV &udE %书/i>ؠL!  @'=\ @{苵m9:__9p{xBV9f-u/ ڣ @ &f@ m jqeN*d1)Gy뤔$Kk!``-Q @L\NMkk[)ۮ'oO-&Ad8qIDAT_  @`r&&7d&@% \~>f栬YW<ʝ?#`( Ч@ 5N%^hMeGKo[i @mLl)X{Ra` (ʫI  @@*$K%g$}(?-&aZktL\P cKv|[,mNHT}i}rL @L܉ @`P3].Aha6J'`{#@qJT.|ࡕc qp]ۉwv @  @`@wW{;rL66^*CxWT) xhq:w¼žuG!@LF8/WVF8.U9⏭ pU⽫rL @LŃ 0@y}zܗaOcǮM'Z @ @`DKw0n-͉4 ؕ#o͒یqW3 @|W $> >1}ND @`s߹xʝ,[ 'hc\ǒi2$-s 0MĦ9n&@9SGswzX @LLkdKrؤ~q*·H]6  @05  @``?iN6K?>_?}-i?LAS%9 @@3=1~Y",I\Xsr#@L='@mrmV]_w=+I@O!B 'K"!LX+A֗ @`J]} @rOiI4'}VT_JsHc `W=l DP,Q9d},$`Vz!6 @Wh~-|* l?u<)|2دyᥟq haW4/vRޛOWVG @`#  @`r4'>wBR8ީ$IHV(;87HT8$ޔ}i[8\[Or[N>RjVd} %Fk% N @V9?9},VH-pt~$j00渫jߴWw WU @`xw5<) |>>3hRr%@ix 4I 0@N?7v!d?E6 @0Véȉ?{CDM}_ Щ NFZ 0@N~)[?ֶ ЕݓY٧_-] d 0]P;v2'@M ^PkR[װ6% @@E"@ nui.^>xssvK?)Ʉ&``j#&_C '2sҞsǃ!S-׷D @*LRW 2ߞ1ޟNܷKMF л GH~ p˫O˓ ֿT @2L,SS_ rW$~r Яϸz1= @`y<8S3#/t @`&v*h{"T% xZRȏayR1p@UCY [ g2Nv7qnN'}0  @@NI, LWI9Ȼt%ȜV!``$@rlĆ|>́( @.7%+4tv! <09?S#4- @;& ГI2?SBr!0a&  @`2B ʟKoOp`}qJ~޶>%p @`i9C9:NuD.7A%00渫^Ii&)X?SR~JS*JYV%c'/ <"_= [c  T ?' 4 %sUKL o]5`zTAS[wU @@NߟҜ7 Oj>+?^pLh!/|F?T? Zo LJr\= ܔdA Өȅ+puOؼ5yX|G=wh'`QROX t(p|rzYyIV$-+-Y 4?|:;?46¯4Ah$ J @;N:WJ!@U X~  @ϳx7$pr~FBո6rbQnW4~C ПRy+&#X+%Jđ[=qx`.2L+1gE +T +b(F,h,P~Vq @ \B\] @`dci&G>5ےrX~v- fl@CLG"3;zV!@ 80I I?{#@`? @`'y)$AC9H| Y&$ T  08m @_W;62#@$OaH`G= @@+%@z|=P3 jQ[z9 @`& X@~m9j XX/rט^I47&1b$PU "soT*- l@!9&W{]  \@!&!Id)IZ J^\!``, @`` w HV!@@ks  й@[>;OUz!ɁR& y~4Nl'p@|v+x h?2 @@?{#@ t$ @`K[x6I3]1U R@K}  0 GM#MY X@{p y 7A&Pn_Q= Џ~C&z(+9A @`t'@.dzYJ@W!t'pLwIiɍ }P?8j=K; q'!@@_& @7󽍈|-pdɎc {UO 0 R-i@Lt=<#@@sO7@LI)%+W&``W/|[|P>} x @= s߉o&Lq +p[RT^o2#@"@~@{ȁ7ҥ{] (p]LQGMh#6aE%@s:  @` ] @do+x!=dՊ&@`&e}HܫW  iej!@\Ile/ȃ..p8H3 J )R_  й HzL}:h,wBaA+*Pf%X'XbBp=V"ޟ_I:%@` oh#`X{LXx~FBk&lT9E 'L.%y 0d%LR6I @` d, -^1  XUZ Dխ~LNV"@d\0١8!L?id,KC \Û^D 2  J '7'/.3 '39Y00<!','_Z2"@e ^ o_|&Gz̸ @`IS FRr%0:`HMW @@(wޓjA"@`+Oe~4- 0 FYH gM:GiIhn?ώVz  0`#( &SϦ Q)D'#  3;54ȉu {bڟ6/$$33-)``&k /ە`R3kI0\O @@EݜpO;bX(ol\LX1 @9!5ş{"(|s |Q[ֽP @jLW PA 'FBBrmB A@>bUٷ5A-' >9cz^yCSJ)5?v% @LV N OHwP,D&7sr{!؃ =x/ISi~5*HRk TR  @{T D|lڟ֍,$[>~6$@  @`9:ٟDmC۴ge-[ @JwB_ gXLgժpB%חY 0Q8i @xp:7om Klz,7+_- @`rb @h=.M+][(UyV @`LW9&@9 v1Z,`T3X @ @8,ſ.B`Is 'oؓ @`]BON(Z-nH_N{b @.W I;ah/[/.7[ @`nWMfXG]~:ױF5U&ʫ?俪` +)ʍ'2w(scs TW,Sg 9q2i_PE|f9oA @`NsYI;Sʥ׍SJǬGf9omN ؛@piKھ{[Cܖ*ߘ9rI00<DTu%d/s ɮ F[~]<i> }4'k? $@ @@xZn#ߓ @ 0@N I47RN5tX#\f!@%'5in,غ;9PzOТ @ X  @``L<(嗉80ŔJ)ɾ%9@̔+E,J!@ d"l򴗦`76[k^VJ @ v" @@(Ĵo*RoNRL{C9 $D)``"@3d2ii/I{Dސ +h/`ȀdS)iO;f`U~y:/}|&@Lt=<#@2[&J{HϥϷ9;U ^Q L^Ol'gG9ݐ\>V>=9DGIt!`aZ 冁ǧԴVn*8r[t{ޝ91_- @&0U jǥ=9IiL[Wi\ @`F3BY* c~4푛Qzho$.9h%_- @&] $9 IGnZ.=}aOj!@, @`4L75Pڽ~ r߁r|y?ie)'V.߽}=/+i_ȕj!@ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @LN!꬚KIENDB`ic11PNG  IHDR szzsRGB pHYs%%IR$OIDATX ŖKA5-2+H~@j -eѦs!e)AېA[ =4'{o}oΜ9ϼ3gަ,-*8h ¸ 4Dam!hWU1Bmm|,~^")_-xZ\pZyA v9ϳ/^(8*s!g?*v T˯Q!F_Ml~z9md b/ػ 5nPU11'" -~|/nEDa"~ŢXV+cyڃπ5hn,`RGSױtc-);OEPtai՜A!IOQ+jI!do7zӀ=+j >q ]Ė.8MͣSnH3x&>g&,`&SҘt l^6n0 E/ah8q|i3~!@IENDB`ic12PNG  IHDR@@iqsRGB pHYs%%IR$ IDATx[UǏxKQEL S R"@ʂ2-T@)lE`V=8'I ޓOP2 Yxd>A]$נȚY S&"kЧkhEnߓ؎l2*n:锗7E'l0 \ ~+sQlSփݎ^Us_RQ >趁kAh]HP9NG*/׿\ C%2Q?n`n6<.E6As;QzsL{"/znj3ht;4mwC^_2v (\fq#@SWv%EY_!Πn,vY껬Eb< M_k, >oprjzmFB?[6S%fqmV =:v4e@_=_'<&& :7lВUS19<w>dTJ'[ z HЇEmkG-_hT*}#]V~[@vD.}`+O߂ @w0uG0B--}I~8W;v-bTm^Y~w#1j#+Ù磾_xBY]]p\=]$"Enڢ$-`OgD@$Ѭu2ۊww`Kզ^kMJFtr|G2ӁDI@{j/@ N&qYǶD#NqFHǶU-[Jا?˼2v;עny>+SR}3 D˞: ƃ17dۓr`>߽{v1Ŗ5H )?x)@:~G?!%xL<:{GH蹘D[f@L3=MzI -}'$>̐!cW 1BwlQ$qEyQ>{ ,CְGH0sK'|QGcGSfKqEYz,*m-wg:VH~2HsvQ-փY](!@)ː.w:n'(m[%B23(m{XTgV#քIENDB`ic13PNG  IHDR\rfsRGB pHYs%%IR$@IDATx %Uy ;!* 5aТE .1 ٌ*"eeLb5)ad4р"aYaGx׼Mw>{^קw>}` " " " " " " " " " " " " " " " " " " ".i.-XyQV9l]:>jO;d7Ȗ9 v.JȽl[O0_ _C@x? ĴᏦy^ "ua >hõ?u N B}zȖ*+q uBذZ E >»q7lY?n<7mMoή4x$tըcGX!*ҶQ>H[D@,:ݘooe%ࣾ1T:="̾ 3P az6(șgumӝsw0%921UG2k AE WqWWgZx[Lp|5B}!`1YKY%d[^ry$%"#UWESr@C\#F?j0t Ŭi1X⺄ \ -%cj:4J-i-[$Wej)63 a_gVH@#5~&+)$ [c[uQkp:ZBPx#P! u7TϾ5,ް1`XM1`J::asя;:7nC) R4leU"1><3"К@}ǿ?k8!K'Xdz\d'_hQܽglKïy2 B~(~d ddp@J_)Hx2ڸb6k?{oX~iwW\>@LV1@bm]W1-"\Uۖ#nӄo] BW"0Lw9Ԏe7l~%@Bw鷪[W@>" 1O:TVwA8Uw[l!+CB^1#Y"1Y9ρ)Zt h~![CX/p!?.GJ %@·wBÝy,uvy bb?;@X "PKgͣ TlB`f"# @n5*9r8dW*~&A'@t Kg!CL*YӴ< ֆYwM s_+ i8PJo|x!ĤRuI*t.̀ A'B)^~&,04Istn QȘ^qIE2MK*Pz~A%Ȳ30wMS>H6g_gwCL |58ӿxWe!K_ ݱlO^z+_^mye>й|Ho?gaB |Ult6 ~Vu{4lT!'I=*-Ys0Rtw'GO18+ζmwO=6.hݐ"ϔCF`JJnlzg2~K_ֵkݾ> 7)?h {)$|+SȐk6+`ߵÓYp w7CJq69Dn^t~aZYcR 婿KZ/+HD;]]/FF ȸ2/״ ].b_ЋpK%v]W+Zܓt_j{W@Rܙr!/aFˀ66:R¯\:RQ-m-Ҵe&/~i-84Q=r'幆VTrW5YKL>χ SvL(/LȅLzne/u-Q5\g#5r/Eg1/Ykϩ&-)d\^،ZSR)\gkNcqIΆ¦h9;NIgtNK|MNVGlۉK@zͭ2ZW:!/S*3;Ŏkdy#3 ]Pi׳Suc)$?˰}H_Xn|~{zxyݐ2{?|;`/>G>\D`pSB#2OQx88@<#|G{zLĥ)3sHӃB/NaH7>@u޵J7&˴Yc/ 뒡I7l~Tiyyr!;w0`hiB>x9G=&VZNy';.[LWA2$nkm d#:V)?.D!c+ &KZy8SnuK@g@@Ȣ C4AxJ4-5?4gr܃' "p6 &*-PHȏgɀjG QZ+!ٵItدCvpV搐!LȆvybžs*2Ux^ Cx&f]7Ga<Bxx, {;3!dKe˄es9yDahe \'8u g*>.pDo wQܦG|Ĩk'Z?nٳ yW!N(V +.h;BB΁(Y!q%tR9 0"ؿE% x yC0IŬA2f_mlP`hJ{ ' ὆f3[?mP`d|Fr' glaGٌD_SL /@ۛgL[ҳ!|7 !u-$/ B6Y1i#26}WU%L6dzxYGxB6PŦpnK@@[bǂ 쫌&B4߂::,+GCfI(1c3&P0&K0cxiJ=.4a3.PPSTJA.'\)6ڄK[{:EܴӅ9=0<ͷM4KǔPP67R.̺6<ͷ 6lcF_ J N;e2!80kMԦ 5QAVC-rtita2.OŏP0$п]*{ ӅC݅ɸ<?B@@v1O7wL!`irtY7wn&V$uqTӳ]lӅLGޔɸ6*{,ӅNGq:WGQc.f86ӑ}:8~/2KKuȺlgR!B$TnU}RH`S?t(.~ˆ Kp6taVGǫ t/Fc~x.L:;:8ʿKes]ag#0&$({74Qa"8".ݜRAT8r/E 2&m: -dQB^yƕx37ΆT#""h-g [@fBx>Cd+=&V^HYZg2y,_cI^K2 2Q@)\vv+eg * ΪɏVוQ^SM2E$&Mwy]~-E\B:oӪwQ#EH"!񰊸? |;,\wEh37xVMbc(iBUi*#z fanq>we+NQ:H O㸺u6|- mOP0"_uC\*^FPDT*_cAIդ%#9uYr 9Ƙ!xM2+QuyՊe"<W2/jkDƼ WE8gUqU# {SRWQGU\䐚 lI(&.>eO";e)j Pa44EwAͱu7juurI =6R(;ӛ C+ګ-"Y?kPpnO#  B04aaM#klE-y-%GD%HW녀F^0;UFC di F@@0V=gwh*@~Z-F܅KEC觋:{ {解V"PF8]n|D@#O=h(_yCOZLhlLg*R/٘6G2`g! Ab%0wc{d6,Іe'"b}}z0nOYya0X ac{X~? $+@΅ɍ|]g1ܘB3 " |i083p&_)\}|:R,9@qzw)>khl@ "YF t+Ȗ,"H, Ԏ`FP9"nEg 6 2A y%_tyЈAP/:b{2} E@.owk?h(#;4|utрRz,B3 np3pf;T@h^Z4~N?>^6_!uPMS;, NL>٫KHKg`?7~ ҥ@" &Z㠾7~reΗCADpcHwڿ ?3o1sI]pz8>syc,jn ys{jJѕF]ɹKE5s P~k|^yaH kr17RɀxB>86OL\|/PC@M{6 p +dn@3kv.49ν&`QաB@@jns] +(~ù͛1s33ҿb S,Rou;Kb:x[eg +jB b& uJZ'R#1PO<)-  U)Pb1/uNʜ* j:O1rw.S@˩3{U)Hb1/uN|Q}uv/ Hsxν>;oiɠA/FjM>o C~Kʽ)%@䅆ZּeH *}mx{t `@PG! z^'G6ӎ(:mGm쑱]PMA=**ŪUQ~_22"3_s/3ƽq_df@ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @ @@&V܀ ɖ  @ u$?t=o6@2_bP @h@`}}NIf  @5lcHXqv'Ol&@ 0l[ d~{O"  ?n XAٓ @D` ɹPolzϒ0 P,CIlth{lË:C@f =WmaIpM  =[gj ,Wb죎 P4[vI~vE  l H+{8@LKG3xH,5 F?1u دْwMz,a_VW[@ȅ]P:^.pO@.@!n{NxS=<P܄ :yrIS ɭ%@@֐e,?b`b$zFa2& vDu嚽TMF`U20P^=ORS7H4Jly1xB #.bdzuTᘺ8`Hg6P7J{֒ݙ@3q'}ח ԽX0_L(}ʅ=` @+@?[?WW"Obң^H -uHMӃ \Z0.|\B`+iGHuVj/ 'Y!NRw:'`4yV1br*[ 0Ppq=ҺS(mJ*)) R6ҹ8^TB 9LU&2 # Ӧz`p2&b,z#Zrc0%pL<^Ln̅ "1Nhp tp XaiW1k31 1F:' @! lG7$[qZ*MR"'MI'MtMB`ud/o]d'ɎyNj\$f%U{DMv )KN|Db>^H@a Ц{m:@G.wHn׌ubY}aU2zIn,w@ /,o Vj|*k&sO{\@x_K$5_/p>oU@/T't>*)qU?I2>&csQ1O|mQԊMO/^sw{=5|=o>`dC[gNXNvyzMO^RYS^!?sc\RVBN %a:֤UI2_$ֳ "|@^V'p>óWKgՙN*R I* D=fB[KNO|˹:\bPg;,RJ~/p'rddRR/R pV.{iInLW)Ɯ{^GkǮ)1G Z WU1GsR|1!0&MMX b_})׏DkC,l'Ce%0Q0E @$d< y〡+\]])x\?yeYGW˝l@u1Sj TWvWF5Q `x0N4t6h?]췁'#@&Q$`nsqCm>@}i{QYVŶ uk-[o] }" =_q2@,+_O`ؚDO& =Դ=h95f[Y=mQe]b1`$>e>vG`Mjkr@}i2}"g}? "YII{OۗdYȧ;kM8;U^x]vto{JlYjJ:8WE+Hu=@EڈY.O?Hpl1h*PLlH1k{rm0|bcc-$F_BфbbCXAN6 qo#fć @`jrU{|B5Ԁ"V%p瀞njrU bIH" 3Ob*BǬ)qB8_1,H  X;QN7j4MЗt>Y$"$9x0}ߏmg//A\y4wdQ@c\ } $>sT׏h!'ȕq'{OBZ]Ҧ,]|pD@ x4GUUlƐBlbecM$FQئ+WB^`P(I" @D`ʥt"< :Tr)P٠zUŦSxbÒP.R +t@/HY?qX)~ieZ5L6yf,w< u'<9 zlRޝ:5[Kt71=dABN-qdNo%5]"냿v/V'S1ul1K%D2xKֲ+dJP%)ؐD\ yЃ0Sj*/[`{~U#{{<Ÿt-sIXC|K~yM)_( (MDGUJ.ԟ<~EBsR$5F>mN!n,gv4 p?b?:"@{ 'UQ5PvNDF x4f6T'~d86+vHK17{K5{q#R%K( 0(Mbߨ.@Ym.dsƓ !0=.RhJS\i7m9Z@KlgS$H" D@3>P7u50LS;:QW3x~4 @[oО;gjK%JB=ÿnZ@`(:P4DKmM=$vҎ;n#ku,uX䏒I>:b-|b 5<6ȔVn_HXdҲ:{e(v _I.ج9QeIod}ɺb'F!ɲi{?K[DA 0`D&9'ϓlұ4-k1GCǕI8@H:Od>OگC$p_wI8=@gH:COs2>>"1kgjPG=ִ/'c?$0_ǿf}z@x$B`Uy}dΉ&fpH±Es:\~ϒ\p:ќR[vnq3ux%@/!z2gD3~@p$@^=\>8E}8!}aT@s$!}>~}Xr^.8(Oy`}ƃ;wv̇_bĤN⅏>3I;`HEqB|moIX.8Yw}I!@+J#d|g:@e~'g=w$ޑ0AO,W'$31J+N%J ,GgpYwCIx#@ %&ܧń傻Nbq>3I/H`DIBY.dIg&ic (I@ȓy\ډ3JNk>3IH#DAzʀ~rIl ZJX>g~@H{H%hc,Qu:1˨=~Yc Cu_6b\.8]BoW$OKɇ<h㚮c`OSs>իF_YQ6@7Dї*N,쿇^wmqv4GyRg`'2kp&@hr~:Pe~Yv_g!D 2$Ү,ܬ#W7STd( 0 (:+@W>-- '_-='zUWMW}HFagA:rgK,uSWms%*rU_q4jL`0G˓:OXw5]q fN:`m,;ފ3+1K`,"(@ 7v.M[JHW< u|D _uq2mq0D3R%@j7XN+v0a}m.; X$r  ru<m[L}s.tc~ vRϔfRl_ .2c+T) M LJvн@BH '֓Gܒ<h O Jlo$/euNwK;:FzT[+R1<8iG&q 1Qwzf1u3]3^肀<#J2!Bg.UTg#WK_;HEyAרK.!PzUv)@.Sv"m7Qb\].\ڥѴ 0S4%u';:\αnնqt5FQ  76XAQJ&9ݫR7 %`-g+uQR`?Zn0> X 0kd+USs-:DMr!CfBv@*T(qtJ3g9YaQGbEixTToǺ%TXNګ]_R:ȕ @ů. |I @ݲ*@^Ͷ,ob8n% H\.tz*\%ԁ@=7X7jȹ=tcsT@H҈VG\|G9˹=R]*R =9^\VdR K(@> \ȆC<ڑ*SG'\c @Sc|E.7YwԐ Edp[~:% @i$6[g4dCԱy +xSdrlպ9˭|9䲂1?=7@icz 弴B}Ϝ#p;>usWQ  H/fX5zotC5Wߗyq @0 s.{7uS?c]A$ńG;&pc6X7j6)@Bf;y <.\,.7N^(tC 'M|iϕcG8se .tct0{5ՙP/*osbZ@@tlrھE\ę@LGMΜY0@O@\5P[:ֱ9A&'`IYP1f 5C"GbGqxThToǺ)TXFګ])¥"u P*R#]5lnYSla\kl@hj2@cb1 2 @Aą$ h^cݘ"t4ЕcsT@H#^G|G9֋ڛ[z_sK5M|Wۖ׫,ֱ{K'#, @5 Huzl8ģ];7u4•csT !PjV:ȷ4W-.7Y_;R`,n@ i$>πk3Vs)_R%K(hTGHr}3UH%̗Ř.T- !`]Nwy yA"RD8ѲToǺ1TOFh+3S^Mu;몊l[^ [@Ȏ p.S㷨4ڼ`?:nrLG (l@\;{ҦЁ[Nzpql' @ >(qpos,GnV=U@'jtG^z8mj^RNQ. &@0 [!%[f5UFv0XP* \,].<'%vEE!f&IdGR놬؀+ 2<[@l?Js0㓪Asڦ*V23^&kHZ`~=odZR=rxZ: [l:FR,S$I AҦʝ3K֐J~'yD2%gJq>   @(['t~3sPz-%u|>v}R @X(Gj%J"GڰH6~Ht8_;-j@@ V$l->7a= @U_IF1(ف?!x@.CR9@%hpǃ6C4wԝɌX@"oɏzB@~ǸQQg!@ 0 vO|ABRԯ.׾%Əz@@4!C?]bsL@ h@Jr2-}} h {d g@ @ ^HNc=ϒ|I t@4\K+-9 XɈKu2*4@O(U<@4,+~%a{LWb{?h x"͒%퇉=.= 5 f4(&8kGyQ Ѐ)j6gKU@p$ht* ,Vw  =2C]cSПi^OV,8! 01@a?#oj^X`V/8Q@C h??wWS>>?24` 4%1`?Ƿ6/!r&tJ@--N akKu;l@IHJ6#ߖ6 }[J։e}DG 0a| 4%;Ζ06l_WxhN- @ANT {o|U>Kz *.9쎂~IB1Quēi$tJnbKU! K 0av v#*7K@7޻ I dJD`ga3#53-E@6H %x"'=ɏSs J&@Pr}mmdDA7Ȋ @Vy[xU*R#l>lۋ'0xȊ @V6y$OT$F3lۋ'@(  xMs6WL l_U(BC B@ "0{FA U$FCX3b&O`Vz+Dn0 02lO @ah@@3$Q;?y"ω쀀o$/Jΰ\6e kע!@H03'u2 wMwJ@ .F@m}Jf;Yx@^ ,)3t .>w!ʓ9 y# c,4 v\.@BYnCS -f -"^#_=3G# @DTAExFx0@Lie|B@~,E %D'b5 3>i+w @$D?/ 䇍 V+jvA7.& 2>3kGsn\;Y13ņ]hZR:Q y,ֈxw  @?]d%G W| =+ @ $$')\ @4.08b '.@zY1N 0tp@h7JpI GĖ0[4@c h:@${t?6Š@m@;WwfepnG\ ( XKr(2&  @ ^.kz Wi, @?I&N#Vs0' @ MԎN"04'N3X @A@G`S l< @`IIiG @5hOKjP@@~4XRŒ _< 48.9U$/: @ o,7TI6;Zx@@@S% $Ğ \-?(9 TB5.8l>$$o{|nTT ustr 8s8mk B-eoM x`:m"l+3IlQ1lqU(lwo~c<CC<tahoe_lafs-1.20.0/misc/build_helpers/osx/Contents/Resources/applet.rsrc0000644000000000000000000000055213615410400023135 0ustar00$$F@$$F#Fscszspshtahoe_lafs-1.20.0/misc/build_helpers/osx/Contents/Resources/Scripts/main.scpt0000644000000000000000000000131013615410400024214 0ustar00FasdUAS 1.101.10 l I .sysodisAaleRTEXT mbPlease read /Applications/tahoe.app/docs/running.rst for information on how to get started. To start using the command-line interface, enter 'tahoe --help' at a terminal window.  .aevtoappnull****  .aevtoappnull**** k     .sysodisAaleRTEXTj ascr ޭtahoe_lafs-1.20.0/misc/build_helpers/osx/Contents/Resources/description.rtfd/TXT.rtf0000644000000000000000000000014613615410400025431 0ustar00{\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf210 {\fonttbl} {\colortbl;\red255\green255\blue255;} }tahoe_lafs-1.20.0/misc/build_helpers/osx/scripts/postinstall0000755000000000000000000000027613615410400021202 0ustar00#!/bin/bash PWD=`pwd` echo "/Applications/tahoe.app/bin/" >> /etc/paths.d/tahoe # copy the manpage into /etc/manpaths.d/ echo "/Applications/tahoe.app/docs/man/" >> /etc/manpaths.d/tahoe tahoe_lafs-1.20.0/misc/build_helpers/osx/scripts/preinstall0000755000000000000000000000034513615410400021000 0ustar00#!/bin/bash if [ -d /Applications/tahoe.app ]; then rm -r /Applications/tahoe.app fi if [ -f /etc/paths.d/tahoe ]; then rm /etc/paths.d/tahoe fi if [ -f /etc/manpaths.d/tahoe.1 ]; then rm /etc/manpaths.d/tahoe.1 fitahoe_lafs-1.20.0/misc/checkers/check_grid.py0000644000000000000000000001671413615410400016006 0ustar00 """ Test an existing Tahoe grid, both to see if the grid is still running and to see if the client is still compatible with it. This script is suitable for running from a periodic monitoring script, perhaps by an hourly cronjob. This script uses a pre-established client node (configured to connect to the grid being tested) and a pre-established directory (stored as the 'testgrid:' alias in that client node's aliases file). It then performs a number of uploads and downloads to exercise compatibility in various directions (new client vs old data). All operations are performed by invoking various CLI commands through bin/tahoe . The script must be given two arguments: the client node directory, and the location of the bin/tahoe executable. Note that this script does not import anything from tahoe directly, so it doesn't matter what its PYTHONPATH is, as long as the bin/tahoe that it uses is functional. This script expects the client node to be running already. To set up the client node, do the following: tahoe create-client --introducer=INTRODUCER_FURL DIR tahoe run DIR tahoe -d DIR create-alias testgrid # pick a 10kB-ish test file, compute its md5sum tahoe -d DIR put FILE testgrid:old.MD5SUM tahoe -d DIR put FILE testgrid:recent.MD5SUM tahoe -d DIR put FILE testgrid:recentdir/recent.MD5SUM echo "" | tahoe -d DIR put --mutable - testgrid:log echo "" | tahoe -d DIR put --mutable - testgrid:recentlog This script will perform the following steps (the kind of compatibility that is being tested is in [brackets]): read old.* and check the md5sums [confirm that new code can read old files] read all recent.* files and check md5sums [read recent files] delete all recent.* files and verify they're gone [modify an old directory] read recentdir/recent.* files and check [read recent directory] delete recentdir/recent.* and verify [modify recent directory] delete recentdir and verify (keep the directory from growing unboundedly) mkdir recentdir upload random 10kB file to recentdir/recent.MD5SUM (prepare for next time) upload random 10kB file to recent.MD5SUM [new code can upload to old servers] append one-line timestamp to log [read/write old mutable files] append one-line timestamp to recentlog [read/write recent mutable files] delete recentlog upload small header to new mutable recentlog [create mutable files] This script will also keep track of speeds and latencies and will write them in a machine-readable logfile. """ import time, subprocess, md5, os.path, random from twisted.python import usage class GridTesterOptions(usage.Options): optFlags = [ ("no", "n", "Dry run: do not run any commands, just print them."), ] def parseArgs(self, nodedir, tahoe): # Note: does not support Unicode arguments. self.nodedir = os.path.expanduser(nodedir) self.tahoe = os.path.abspath(os.path.expanduser(tahoe)) class CommandFailed(Exception): pass class GridTester(object): def __init__(self, config): self.config = config self.tahoe = config.tahoe self.nodedir = config.nodedir def command(self, *cmd, **kwargs): expected_rc = kwargs.get("expected_rc", 0) stdin = kwargs.get("stdin", None) if self.config["no"]: return if stdin is not None: p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) (stdout,stderr) = p.communicate(stdin) else: p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) (stdout,stderr) = p.communicate() rc = p.returncode if expected_rc != None and rc != expected_rc: if stderr: print("STDERR:") print(stderr) raise CommandFailed("command '%s' failed: rc=%d" % (cmd, rc)) return stdout, stderr def cli(self, cmd, *args, **kwargs): print("tahoe", cmd, " ".join(args)) stdout, stderr = self.command(self.tahoe, "-d", self.nodedir, cmd, *args, **kwargs) if not kwargs.get("ignore_stderr", False) and stderr != "": raise CommandFailed("command '%s' had stderr: %s" % (" ".join(args), stderr)) return stdout def read_and_check(self, f): expected_md5_s = f[f.find(".")+1:] out = self.cli("get", "testgrid:" + f) got_md5_s = md5.new(out).hexdigest() if got_md5_s != expected_md5_s: raise CommandFailed("%s had md5sum of %s" % (f, got_md5_s)) def delete_and_check(self, dirname, f): oldfiles = self.listdir(dirname) if dirname: absfilename = "testgrid:" + dirname + "/" + f else: absfilename = "testgrid:" + f if f not in oldfiles: raise CommandFailed("um, '%s' was supposed to already be in %s" % (f, dirname)) self.cli("unlink", absfilename) newfiles = self.listdir(dirname) if f in newfiles: raise CommandFailed("failed to remove '%s' from %s" % (f, dirname)) def listdir(self, dirname): out = self.cli("ls", "testgrid:"+dirname).strip().split("\n") files = [f.strip() for f in out] print(" ", files) return files def do_test(self): files = self.listdir("") for f in files: if f.startswith("old.") or f.startswith("recent."): self.read_and_check("" + f) for f in files: if f.startswith("recent."): self.delete_and_check("", f) files = self.listdir("recentdir") for f in files: if f.startswith("old.") or f.startswith("recent."): self.read_and_check("recentdir/" + f) for f in files: if f.startswith("recent."): self.delete_and_check("recentdir", f) self.delete_and_check("", "recentdir") self.cli("mkdir", "testgrid:recentdir") fn, data = self.makefile("recent") self.put("recentdir/"+fn, data) files = self.listdir("recentdir") if fn not in files: raise CommandFailed("failed to put %s in recentdir/" % fn) fn, data = self.makefile("recent") self.put(fn, data) files = self.listdir("") if fn not in files: raise CommandFailed("failed to put %s in testgrid:" % fn) self.update("log") self.update("recentlog") self.delete_and_check("", "recentlog") self.put_mutable("recentlog", "Recent Mutable Log Header\n\n") def put(self, fn, data): self.cli("put", "-", "testgrid:"+fn, stdin=data, ignore_stderr=True) def put_mutable(self, fn, data): self.cli("put", "--mutable", "-", "testgrid:"+fn, stdin=data, ignore_stderr=True) def update(self, fn): old = self.cli("get", "testgrid:"+fn) new = old + time.ctime() + "\n" self.put(fn, new) def makefile(self, prefix): size = random.randint(10001, 10100) data = os.urandom(size) md5sum = md5.new(data).hexdigest() fn = prefix + "." + md5sum return fn, data def main(): config = GridTesterOptions() config.parseOptions() gt = GridTester(config) gt.do_test() if __name__ == "__main__": main() tahoe_lafs-1.20.0/misc/checkers/check_load.py0000644000000000000000000002117213615410400015772 0ustar00""" this is a load-generating client program. It does all of its work through a given tahoe node (specified by URL), and performs random reads and writes to the target. Run this in a directory with the following files: server-URLs : a list of tahoe node URLs (one per line). Each operation will use a randomly-selected server. root.cap: (string) the top-level directory rwcap to use delay: (float) seconds to delay between operations operation-mix: "R/W": two ints, relative frequency of read and write ops #size:? Set argv[1] to a per-client stats-NN.out file. This will will be updated with running totals of bytes-per-second and operations-per-second. The stats from multiple clients can be totalled together and averaged over time to compute the traffic being accepted by the grid. Each time a 'read' operation is performed, the client will begin at the root and randomly choose a child. If the child is a directory, the client will recurse. If the child is a file, the client will read the contents of the file. Each time a 'write' operation is performed, the client will generate a target filename (a random string). 90% of the time, the file will be written into the same directory that was used last time (starting at the root). 10% of the time, a new directory is created by assembling 1 to 5 pathnames chosen at random. The client then writes a certain number of zero bytes to this file. The filesize is determined with something like a power-law distribution, with a mean of 10kB and a max of 100MB, so filesize=min(int(1.0/random(.0002)),1e8) """ from __future__ import annotations import os, sys, httplib, binascii import urllib, json, random, time, urlparse if sys.argv[1] == "--stats": statsfiles = sys.argv[2:] # gather stats every 10 seconds, do a moving-window average of the last # 60 seconds DELAY = 10 MAXSAMPLES = 6 totals = [] last_stats : dict[str, float] = {} while True: stats : dict[str, float] = {} for sf in statsfiles: for line in open(sf, "r").readlines(): name, str_value = line.split(":") value = int(str_value.strip()) if name not in stats: stats[name] = 0 stats[name] += float(value) del name if last_stats: delta = dict( [ (n,stats[n]-last_stats[n]) for n in stats ] ) print("THIS SAMPLE:") for name in sorted(delta.keys()): avg = float(delta[name]) / float(DELAY) print("%20s: %0.2f per second" % (name, avg)) totals.append(delta) while len(totals) > MAXSAMPLES: totals.pop(0) # now compute average print() print("MOVING WINDOW AVERAGE:") for name in sorted(delta.keys()): avg = sum([ s[name] for s in totals]) / (DELAY*len(totals)) print("%20s %0.2f per second" % (name, avg)) last_stats = stats print() print() time.sleep(DELAY) stats_out = sys.argv[1] server_urls = [] for url in open("server-URLs", "r").readlines(): url = url.strip() if url: server_urls.append(url) root = open("root.cap", "r").read().strip() delay = float(open("delay", "r").read().strip()) readfreq, writefreq = ( [int(x) for x in open("operation-mix", "r").read().strip().split("/")]) files_uploaded = 0 files_downloaded = 0 bytes_uploaded = 0 bytes_downloaded = 0 directories_read = 0 directories_written = 0 def listdir(nodeurl, root, remote_pathname): if nodeurl[-1] != "/": nodeurl += "/" url = nodeurl + "uri/%s/" % urllib.quote(root) if remote_pathname: url += urllib.quote(remote_pathname) url += "?t=json" data = urllib.urlopen(url).read() try: parsed = json.loads(data) except ValueError: print("URL was", url) print("DATA was", data) raise nodetype, d = parsed assert nodetype == "dirnode" global directories_read directories_read += 1 children = dict( [(str(name),value) for (name,value) in d["children"].iteritems()] ) return children def choose_random_descendant(server_url, root, pathname=""): children = listdir(server_url, root, pathname) name = random.choice(children.keys()) child = children[name] if pathname: new_pathname = pathname + "/" + name else: new_pathname = name if child[0] == "filenode": return new_pathname return choose_random_descendant(server_url, root, new_pathname) def read_and_discard(nodeurl, root, pathname): if nodeurl[-1] != "/": nodeurl += "/" url = nodeurl + "uri/%s/" % urllib.quote(root) if pathname: url += urllib.quote(pathname) f = urllib.urlopen(url) global bytes_downloaded while True: data = f.read(4096) if not data: break bytes_downloaded += len(data) directories = [ "dreamland/disengaging/hucksters", "dreamland/disengaging/klondikes", "dreamland/disengaging/neatly", "dreamland/cottages/richmond", "dreamland/cottages/perhaps", "dreamland/cottages/spies", "dreamland/finder/diversion", "dreamland/finder/cigarette", "dreamland/finder/album", "hazing/licences/comedian", "hazing/licences/goat", "hazing/licences/shopkeeper", "hazing/regiment/frigate", "hazing/regiment/quackery", "hazing/regiment/centerpiece", "hazing/disassociate/mob", "hazing/disassociate/nihilistic", "hazing/disassociate/bilbo", ] def create_random_directory(): d = random.choice(directories) pieces = d.split("/") numsegs = random.randint(1, len(pieces)) return "/".join(pieces[0:numsegs]) def generate_filename(): fn = binascii.hexlify(os.urandom(4)) return fn def choose_size(): mean = 10e3 size = random.expovariate(1.0 / mean) return int(min(size, 100e6)) # copied from twisted/web/client.py def parse_url(url, defaultPort=None): url = url.strip() parsed = urlparse.urlparse(url) scheme = parsed[0] path = urlparse.urlunparse(('','')+parsed[2:]) if defaultPort is None: if scheme == 'https': defaultPort = 443 else: defaultPort = 80 host, port = parsed[1], defaultPort if ':' in host: host, port = host.split(':') port = int(port) if path == "": path = "/" return scheme, host, port, path def generate_and_put(nodeurl, root, remote_filename, size): if nodeurl[-1] != "/": nodeurl += "/" url = nodeurl + "uri/%s/" % urllib.quote(root) url += urllib.quote(remote_filename) scheme, host, port, path = parse_url(url) if scheme == "http": c = httplib.HTTPConnection(host, port) elif scheme == "https": c = httplib.HTTPSConnection(host, port) else: raise ValueError("unknown scheme '%s', need http or https" % scheme) c.putrequest("PUT", path) c.putheader("Hostname", host) c.putheader("User-Agent", "tahoe-check-load") c.putheader("Connection", "close") c.putheader("Content-Length", "%d" % size) c.endheaders() global bytes_uploaded while size: chunksize = min(size, 4096) size -= chunksize c.send("\x00" * chunksize) bytes_uploaded += chunksize return c.getresponse() current_writedir = "" while True: time.sleep(delay) if random.uniform(0, readfreq+writefreq) < readfreq: op = "read" else: op = "write" print("OP:", op) server = random.choice(server_urls) if op == "read": pathname = choose_random_descendant(server, root) print(" reading", pathname) read_and_discard(server, root, pathname) files_downloaded += 1 elif op == "write": if random.uniform(0, 100) < 10: current_writedir = create_random_directory() filename = generate_filename() if current_writedir: pathname = current_writedir + "/" + filename else: pathname = filename print(" writing", pathname) size = choose_size() print(" size", size) generate_and_put(server, root, pathname, size) files_uploaded += 1 f = open(stats_out+".tmp", "w") f.write("files-uploaded: %d\n" % files_uploaded) f.write("files-downloaded: %d\n" % files_downloaded) f.write("bytes-uploaded: %d\n" % bytes_uploaded) f.write("bytes-downloaded: %d\n" % bytes_downloaded) f.write("directories-read: %d\n" % directories_read) f.write("directories-written: %d\n" % directories_written) f.close() os.rename(stats_out+".tmp", stats_out) tahoe_lafs-1.20.0/misc/coding_tools/check-debugging.py0000755000000000000000000000154613615410400017626 0ustar00#! /usr/bin/python """ Checks for defer.setDebugging(). Runs on Python 3. Usage: ./check-debugging.py src """ import sys, re, os ok = True for starting_point in sys.argv[1:]: for root, dirs, files in os.walk(starting_point): for f in files: if not f.endswith(".py"): continue if f == "check-debugging.py": continue fn = os.path.join(root, f) for lineno,line in enumerate(open(fn, "r").readlines()): lineno = lineno+1 mo = re.search(r"\.setDebugging\(True\)", line) if mo: print("Do not use defer.setDebugging(True) in production") print("First used here: %s:%d" % (fn, lineno)) sys.exit(1) print("No cases of defer.setDebugging(True) were found, good!") sys.exit(0) tahoe_lafs-1.20.0/misc/coding_tools/check-interfaces.py0000644000000000000000000002151213615410400020006 0ustar00 # To check a particular Tahoe source distribution, this should be invoked from # the root directory of that distribution as # # bin/tahoe @misc/coding_tools/check-interfaces.py import os, sys, re, platform import zope.interface as zi # We use the forked version of verifyClass below. #from zope.interface.verify import verifyClass from zope.interface.advice import addClassAdvisor interesting_modules = re.compile(r'(allmydata)|(foolscap)\..*') excluded_classnames = re.compile(r'(_)|(Mock)|(Fake)|(Dummy).*') excluded_file_basenames = re.compile(r'(check)|(bench)_.*') _other_modules_with_violations = set() _err = sys.stderr _report_argname_mismatch = False # very noisy and usually not important # deep magic def strictly_implements(*interfaces): frame = sys._getframe(1) f_locals = frame.f_locals # Try to make sure we were called from a class def. Assumes Python > 2.2. if f_locals is frame.f_globals or '__module__' not in f_locals: raise TypeError("implements can be used only from a class definition.") if '__implements_advice_data__' in f_locals: raise TypeError("implements can be used only once in a class definition.") def _implements_advice(cls): interfaces, classImplements = cls.__dict__['__implements_advice_data__'] del cls.__implements_advice_data__ classImplements(cls, *interfaces) if interesting_modules.match(cls.__module__): if not excluded_classnames.match(cls.__name__): for interface in interfaces: try: verifyClass(interface, cls) except Exception as e: print("%s.%s does not correctly implement %s.%s:\n%s" % (cls.__module__, cls.__name__, interface.__module__, interface.__name__, e), file=_err) else: _other_modules_with_violations.add(cls.__module__) return cls f_locals['__implements_advice_data__'] = interfaces, zi.classImplements addClassAdvisor(_implements_advice, depth=2) def check(): # patchee-monkey zi.implements = strictly_implements if len(sys.argv) >= 2: if sys.argv[1] == '--help' or len(sys.argv) > 2: print("Usage: check-miscaptures.py [SOURCEDIR]", file=_err) return srcdir = sys.argv[1] else: # import modules under src/ by default srcdir = 'src' # attempt to avoid side-effects from importing command scripts sys.argv = ['', '--help'] syslow = platform.system().lower() is_windows = 'cygwin' in syslow or 'windows' in syslow for (dirpath, dirnames, filenames) in os.walk(srcdir): for fn in filenames: (basename, ext) = os.path.splitext(fn) if ext in ('.pyc', '.pyo') and not os.path.exists(os.path.join(dirpath, basename+'.py')): print("Warning: no .py source file for %r.\n" % (os.path.join(dirpath, fn),), file=_err) if ext == '.py' and not excluded_file_basenames.match(basename): relpath = os.path.join(dirpath[len(srcdir)+1:], basename) module = relpath.replace(os.sep, '/').replace('/', '.') try: __import__(module) except ImportError as e: if not is_windows and (' _win' in str(e) or 'win32' in str(e)): print("Warning: %r imports a Windows-specific module, so we cannot check it (%s).\n" % (module, str(e)), file=_err) else: import traceback traceback.print_exc(file=_err) print(file=_err) others = list(_other_modules_with_violations) others.sort() print("There were also interface violations in:\n", ", ".join(others), "\n", file=_err) # Forked from # http://svn.zope.org/*checkout*/Zope3/trunk/src/zope/interface/verify.py?content-type=text%2Fplain&rev=27687 # but modified to report all interface violations rather than just the first. ############################################################################## # # Copyright (c) 2001, 2002 Zope Corporation and Contributors. # All Rights Reserved. # # This software is subject to the provisions of the Zope Public License, # Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. # THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS # FOR A PARTICULAR PURPOSE. # ############################################################################## """Verify interface implementations $Id$ """ from zope.interface.exceptions import DoesNotImplement from zope.interface.exceptions import BrokenMethodImplementation from types import FunctionType, MethodType from zope.interface.interface import fromMethod, fromFunction, Method # This will be monkey-patched when running under Zope 2, so leave this # here: MethodTypes = (MethodType, ) def _verify(iface, candidate, tentative=0, vtype=None): """Verify that 'candidate' might correctly implements 'iface'. This involves: o Making sure the candidate defines all the necessary methods o Making sure the methods have the correct signature o Making sure the candidate asserts that it implements the interface Note that this isn't the same as verifying that the class does implement the interface. If optional tentative is true, suppress the "is implemented by" test. """ if vtype == 'c': tester = iface.implementedBy else: tester = iface.providedBy violations = [] def format(e): return " " + str(e).strip() + "\n" if not tentative and not tester(candidate): violations.append(format(DoesNotImplement(iface))) # Here the `desc` is either an `Attribute` or `Method` instance for name, desc in iface.namesAndDescriptions(1): if not hasattr(candidate, name): if (not isinstance(desc, Method)) and vtype == 'c': # We can't verify non-methods on classes, since the # class may provide attrs in it's __init__. continue if isinstance(desc, Method): violations.append(" The %r method was not provided.\n" % (name,)) else: violations.append(" The %r attribute was not provided.\n" % (name,)) continue attr = getattr(candidate, name) if not isinstance(desc, Method): # If it's not a method, there's nothing else we can test continue if isinstance(attr, FunctionType): # should never get here, since classes should not provide functions meth = fromFunction(attr, iface, name=name) elif (isinstance(attr, MethodTypes) and type(attr.im_func) is FunctionType): meth = fromMethod(attr, iface, name) else: if not callable(attr): violations.append(format(BrokenMethodImplementation(name, "Not a method"))) # sigh, it's callable, but we don't know how to intrspect it, so # we have to give it a pass. continue # Make sure that the required and implemented method signatures are # the same. desc = desc.getSignatureInfo() meth = meth.getSignatureInfo() mess = _incompat(desc, meth) if mess: violations.append(format(BrokenMethodImplementation(name, mess))) if violations: raise Exception("".join(violations)) return True def verifyClass(iface, candidate, tentative=0): return _verify(iface, candidate, tentative, vtype='c') def verifyObject(iface, candidate, tentative=0): return _verify(iface, candidate, tentative, vtype='o') def _incompat(required, implemented): if len(implemented['required']) > len(required['required']): return 'implementation requires too many arguments' if ((len(implemented['positional']) < len(required['positional'])) and not implemented['varargs']): return "implementation doesn't allow enough arguments" if required['kwargs'] and not implemented['kwargs']: return "implementation doesn't support keyword arguments" if required['varargs'] and not implemented['varargs']: return "implementation doesn't support variable arguments" if (_report_argname_mismatch and required['positional'] != implemented['positional'][:len(required['positional'])] and implemented['kwargs'] is None): return 'implementation has different argument names' if __name__ == "__main__": check() # Avoid spurious warnings about ignored exceptions during shutdown by doing a hard exit. os._exit(0) tahoe_lafs-1.20.0/misc/coding_tools/check-umids.py0000644000000000000000000000206013615410400017001 0ustar00#! /usr/bin/python3 """ Ensure UMIDS are unique. This runs on Python 3. """ # ./check-umids.py src import sys, re, os ok = True umids = {} for starting_point in sys.argv[1:]: for root, dirs, files in os.walk(starting_point): for fn in [f for f in files if f.endswith(".py")]: fn = os.path.join(root, fn) for lineno,line in enumerate(open(fn, "r").readlines()): lineno = lineno+1 if "umid" not in line: continue mo = re.search("umid=[\"\']([^\"\']+)[\"\']", line) if mo: umid = mo.group(1) if umid in umids: oldfn, oldlineno = umids[umid] print("%s:%d: duplicate umid '%s'" % (fn, lineno, umid)) print("%s:%d: first used here" % (oldfn, oldlineno)) ok = False umids[umid] = (fn,lineno) if ok: print("all umids are unique") else: print("some umids were duplicates") sys.exit(1) tahoe_lafs-1.20.0/misc/coding_tools/coverage.el0000644000000000000000000001103213615410400016347 0ustar00 (defvar coverage-annotation-file ".coverage.el") (defvar coverage-annotations nil) (defun find-coverage-annotation-file () (let ((dir (file-name-directory buffer-file-name)) (olddir "/")) (while (and (not (equal dir olddir)) (not (file-regular-p (concat dir coverage-annotation-file)))) (setq olddir dir dir (file-name-directory (directory-file-name dir)))) (and (not (equal dir olddir)) (concat dir coverage-annotation-file)) )) (defun load-coverage-annotations () (let* ((annotation-file (find-coverage-annotation-file)) (coverage (with-temp-buffer (insert-file-contents annotation-file) (let ((form (read (current-buffer)))) (eval form))))) (setq coverage-annotations coverage) coverage )) (defun coverage-unannotate () (save-excursion (dolist (ov (overlays-in (point-min) (point-max))) (delete-overlay ov)) (setq coverage-this-buffer-is-annotated nil) (message "Removed annotations") )) ;; in emacs22, it will be possible to put the annotations in the fringe. Set ;; a display property for one of the characters in the line, using ;; (right-fringe BITMAP FACE), where BITMAP should probably be right-triangle ;; or so, and FACE should probably be '(:foreground "red"). We can also ;; create new bitmaps, with faces. To do tartans will require a lot of ;; bitmaps, and you've only got about 8 pixels to work with. ;; unfortunately emacs21 gives us less control over the fringe. We can use ;; overlays to put letters on the left or right margins (in the text area, ;; overriding actual program text), and to modify the text being displayed ;; (by changing its background color, or adding a box around each word). (defun coverage-annotate (show-code) (let ((allcoverage (load-coverage-annotations)) (filename-key (expand-file-name buffer-file-truename)) thiscoverage code-lines covered-lines uncovered-code-lines ) (while (and (not (gethash filename-key allcoverage nil)) (string-match "/" filename-key)) ;; eat everything up to and including the first slash, then look again (setq filename-key (substring filename-key (+ 1 (string-match "/" filename-key))))) (setq thiscoverage (gethash filename-key allcoverage nil)) (if thiscoverage (progn (setq coverage-this-buffer-is-annotated t) (setq code-lines (nth 0 thiscoverage) covered-lines (nth 1 thiscoverage) uncovered-code-lines (nth 2 thiscoverage) ) (save-excursion (dolist (ov (overlays-in (point-min) (point-max))) (delete-overlay ov)) (if show-code (dolist (line code-lines) (goto-line line) ;;(add-text-properties (point) (line-end-position) '(face bold) ) (overlay-put (make-overlay (point) (line-end-position)) ;'before-string "C" ;'face '(background-color . "green") 'face '(:background "dark green") ) )) (dolist (line uncovered-code-lines) (goto-line line) (overlay-put (make-overlay (point) (line-end-position)) ;'before-string "D" ;'face '(:background "blue") ;'face '(:underline "blue") 'face '(:box "red") ) ) (message (format "Added annotations: %d uncovered lines" (safe-length uncovered-code-lines))) ) ) (message "unable to find coverage for this file")) )) (defun coverage-toggle-annotations (show-code) (interactive "P") (if coverage-this-buffer-is-annotated (coverage-unannotate) (coverage-annotate show-code)) ) (setq coverage-this-buffer-is-annotated nil) (make-variable-buffer-local 'coverage-this-buffer-is-annotated) (define-minor-mode coverage-annotation-minor-mode "Minor mode to annotate code-coverage information" nil " CA" '( ("\C-c\C-a" . coverage-toggle-annotations) ) () ; forms run on mode entry/exit ) (defun maybe-enable-coverage-mode () (if (string-match "/src/allmydata/" (buffer-file-name)) (coverage-annotation-minor-mode t) )) (add-hook 'python-mode-hook 'maybe-enable-coverage-mode) tahoe_lafs-1.20.0/misc/coding_tools/coverage2el.py0000644000000000000000000000350013615410400017003 0ustar00from coverage import coverage, summary, misc class ElispReporter(summary.SummaryReporter): def report(self, morfs=None): self.find_code_units(morfs) out = open(".coverage.el", "w") out.write(""" ;; This is an elisp-readable form of the coverage data. It defines a ;; single top-level hash table in which the key is an asolute pathname, and ;; the value is a three-element list. The first element of this list is a ;; list of line numbers that represent actual code statements. The second is ;; a list of line numbers for lines which got used during the unit test. The ;; third is a list of line numbers for code lines that were not covered ;; (since 'code' and 'covered' start as sets, this last list is equal to ;; 'code - covered'). """) out.write("(let ((results (make-hash-table :test 'equal)))\n") for cu in self.code_units: f = cu.filename try: (fn, executable, missing, mf) = self.coverage.analysis(cu) except misc.NoSource: continue code_linenumbers = executable uncovered_code = missing covered_linenumbers = sorted(set(executable) - set(missing)) out.write(" (puthash \"%s\" '((%s) (%s) (%s)) results)\n" % (f, " ".join([str(ln) for ln in sorted(code_linenumbers)]), " ".join([str(ln) for ln in sorted(covered_linenumbers)]), " ".join([str(ln) for ln in sorted(uncovered_code)]), )) out.write(" results)\n") out.close() def main(): c = coverage() # defaults to data_file=.coverage c.load() c._harvest_data() c.config.from_args(include="src/*") ElispReporter(c, c.config).report() if __name__ == '__main__': main() tahoe_lafs-1.20.0/misc/coding_tools/fixshebangs.py0000644000000000000000000000153213615410400017111 0ustar00#!/usr/bin/env python from allmydata.util import fileutil import re, shutil, sys R=re.compile("^#! */usr/bin/python *$") for fname in sys.argv[1:]: inf = open(fname, "rU") rntf = fileutil.ReopenableNamedTemporaryFile() outf = open(rntf.name, "w") first = True for l in inf: if first and R.search(l): outf.write("#!/usr/bin/env python\n") else: outf.write(l) first = False outf.close() try: shutil.move(rntf.name, fname) except EnvironmentError: # Couldn't atomically overwrite, so just hope that this process doesn't die # and the target file doesn't get recreated in between the following two # operations: shutil.move(fname, fname + ".bak") shutil.move(rntf.name, fname) fileutil.remove_if_possible(fname + ".bak") tahoe_lafs-1.20.0/misc/coding_tools/graph-deps.py0000755000000000000000000002645713615410400016662 0ustar00#!/usr/bin/env python # Run this as "./graph-deps.py ." from your source tree, then open out.png . # You can also use a PyPI package name, e.g. "./graph-deps.py tahoe-lafs". # # This builds all necessary wheels for your project (in a tempdir), scans # them to learn their inter-dependencies, generates a DOT-format graph # specification, then runs the "dot" program (from the "graphviz" package) to # turn this into a PNG image. # To hack on this script (e.g. change the way it generates DOT) without # re-building the wheels each time, set --wheeldir= to some not-existent # path. It will write the wheels to that directory instead of a tempdir. The # next time you run it, if --wheeldir= points to a directory, it will read # the wheels from there. # To hack on the DOT output without re-running this script, add --write-dot, # which will cause it to write "out.dot". Edit that file, then run "dot -Tpng # out.dot >out.png" to re-render the graph. # Install 'click' first. I run this with py2, but py3 might work too, if the # wheels can be built with py3. import os, sys, subprocess, json, tempfile, zipfile, re, itertools import email.parser from pprint import pprint from io import StringIO import click all_packages = {} # name -> version all_reqs = {} # name -> specs all_pure = set() # 1: build a local directory of wheels for the given target # pip wheel --wheel-dir=tempdir sys.argv[1] def build_wheels(target, wheeldir): print("-- building wheels for '%s' in %s" % (target, wheeldir)) pip = subprocess.Popen(["pip", "wheel", "--wheel-dir", wheeldir, target], stdout=subprocess.PIPE) stdout = pip.communicate()[0] if pip.returncode != 0: sys.exit(pip.returncode) # 'pip wheel .' starts with "Processing /path/to/." but ends with # "Successfully built PKGNAME". 'pip wheel PKGNAME' start with # "Collecting PKGNAME" but ends with e.g. "Skipping foo, due to already # being wheel." lines = stdout.decode("utf-8").splitlines() if lines[0].startswith("Collecting "): root_pkgname = lines[0].split()[-1] elif lines[-1].startswith("Successfully built "): root_pkgname = lines[-1].split()[-1] else: print("Unable to figure out root package name") print("'pip wheel %s' output is:" % target) print(stdout) sys.exit(1) with open(os.path.join(wheeldir, "root_pkgname"), "w") as f: f.write(root_pkgname+"\n") def get_root_pkgname(wheeldir): with open(os.path.join(wheeldir, "root_pkgname"), "r") as f: return f.read().strip() # 2: for each wheel, find the *.dist-info file, find metadata.json inside # that, extract metadata.run_requires[0].requires def add(name, version, extras, reqs, raw): if set(reqs) - set([None]) - set(extras): print("um, %s metadata has mismatching extras/reqs" % name) pprint(extras) pprint(reqs) print("raw data:") pprint(raw) raise ValueError if None not in reqs: print("um, %s has no reqs" % name) print("raw data:") pprint(raw) raise ValueError all_packages[name] = version all_reqs[name] = reqs def parse_metadata_json(f): md = json.loads(f.read().decode("utf-8")) name = md["name"].lower() version = md["version"] try: reqs = {None: []} # extra_name/None -> [specs] if "run_requires" in md: for r in md["run_requires"]: reqs[r.get("extra", None)] = r["requires"] # this package provides the following extras extras = md.get("extras", []) #for e in extras: # if e not in reqs: # reqs[e] = [] except KeyError: print("error in '%s'" % name) pprint(md) raise add(name, version, extras, reqs, md) return name def parse_METADATA(f): data = f.read().decode("utf-8") md = email.parser.Parser().parsestr(data) name = md.get_all("Name")[0].lower() version = md.get_all("Version")[0] reqs = {None: []} for req in md.get_all("Requires-Dist") or []: # untested pieces = [p.strip() for p in req.split(";")] spec = pieces[0] extra = None if len(pieces) > 1: mo = re.search(r"extra == '(\w+)'", pieces[1]) if mo: extra = mo.group(1) if extra not in reqs: reqs[extra] = [] reqs[extra].append(spec) extras = md.get_all("Provides-Extra") or [] # untested add(name, version, extras, reqs, data) return name def parse_wheels(wheeldir): for fn in os.listdir(wheeldir): if not fn.endswith(".whl"): continue zf = zipfile.ZipFile(os.path.join(wheeldir, fn)) zfnames = zf.namelist() mdfns = [n for n in zfnames if n.endswith(".dist-info/metadata.json")] if mdfns: name = parse_metadata_json(zf.open(mdfns[0])) else: mdfns = [n for n in zfnames if n.endswith(".dist-info/METADATA")] if mdfns: name = parse_METADATA(zf.open(mdfns[0])) else: print("no metadata for", fn) continue is_pure = False wheel_fns = [n for n in zfnames if n.endswith(".dist-info/WHEEL")] if wheel_fns: with zf.open(wheel_fns[0]) as wheel: for line in wheel: if line.lower().rstrip() == b"root-is-purelib: true": is_pure = True if is_pure: all_pure.add(name) return get_root_pkgname(wheeldir) # 3: emit a .dot file with a graph of all the dependencies def dot_name(name, extra): # the 'dot' format enforces C identifier syntax on node names assert name.lower() == name, name name = "%s__%s" % (name, extra) return name.replace("-", "_").replace(".", "_") def parse_spec(spec): # turn "twisted[tls] (>=16.0.0)" into "twisted" pieces = spec.split() name_and_extras = pieces[0] paren_constraint = pieces[1] if len(pieces) > 1 else "" if "[" in name_and_extras: name = name_and_extras[:name_and_extras.find("[")] extras_bracketed = name_and_extras[name_and_extras.find("["):] extras = extras_bracketed.strip("[]").split(",") else: name = name_and_extras extras = [] return name.lower(), extras, paren_constraint def format_attrs(**kwargs): # return "", or "[attr=value attr=value]" if not kwargs or all([not(v) for v in kwargs.values()]): return "" def escape(s): return s.replace('\n', r'\n').replace('"', r'\"') pieces = ['%s="%s"' % (k, escape(kwargs[k])) for k in sorted(kwargs) if kwargs[k]] body = " ".join(pieces) return "[%s]" % body # We draw a node for each wheel. When one of the inbound dependencies asks # for an extra, we assign that (target, extra) pair a color. We draw outbound # links for all non-extra dependencies in black. If something asked the # target for an extra, we also draw links for the extra deps using the # assigned color. COLORS = itertools.cycle(["green", "blue", "red", "purple"]) extras_to_show = {} # maps (target, extraname) -> colorname def add_extra_to_show(targetname, extraname): key = (targetname, extraname) if key not in extras_to_show: extras_to_show[key] = next(COLORS) _scanned = set() def scan(name, extra=None, path=""): dupkey = (name, extra) if dupkey in _scanned: #print("SCAN-SKIP %s %s[%s]" % (path, name, extra)) return _scanned.add(dupkey) #print("SCAN %s %s[%s]" % (path, name, extra)) add_extra_to_show(name, extra) for spec in all_reqs[name][extra]: #print("-", spec) dep_name, dep_extras, dep_constraint = parse_spec(spec) #print("--", dep_name, dep_extras) children = set(dep_extras) children.add(None) for dep_extra in children: scan(dep_name, dep_extra, path=path+"->%s[%s]" % (dep_name, dep_extra)) def generate_dot(): f = StringIO() f.write("digraph {\n") for name, extra in extras_to_show.keys(): version = all_packages[name] if extra: label = "%s[%s]\n%s" % (name, extra, version) else: label = "%s\n%s" % (name, version) color = None if name not in all_pure: color = "red" f.write('%s %s\n' % (dot_name(name, extra), format_attrs(label=label, color=color))) for (source, extra), color in extras_to_show.items(): if extra: f.write('%s -> %s [weight="50" style="dashed"]\n' % (dot_name(source, extra), dot_name(source, None))) specs = all_reqs[source][extra] for spec in specs: reqname, reqextras, paren_constraint = parse_spec(spec) #extras_bracketed = "[%s]" % ",".join(extras) if extras else "" #edge_label = " ".join([p for p in [extras_bracketed, # paren_constraint] if p]) assert None not in reqextras if not reqextras: reqextras = [None] for reqextra in reqextras: edge_label = "" if extra: edge_label += "(%s[%s] wants)\n" % (source, extra) edge_label += spec style = "bold" if reqextra else "solid" f.write('%s -> %s %s\n' % (dot_name(source, extra), dot_name(reqname, reqextra), format_attrs(label=edge_label, fontcolor=color, style=style, color=color))) f.write("}\n") return f # 4: convert to .png def dot_to_png(f, png_fn): png = open(png_fn, "wb") dot = subprocess.Popen(["dot", "-Tpng"], stdin=subprocess.PIPE, stdout=png) dot.communicate(f.getvalue().encode("utf-8")) if dot.returncode != 0: sys.exit(dot.returncode) png.close() print("wrote graph to %s" % png_fn) @click.command() @click.argument("target") @click.option("--wheeldir", default=None, type=str) @click.option("--write-dot/--no-write-dot", default=False) def go(target, wheeldir, write_dot): if wheeldir: if os.path.isdir(wheeldir): print("loading wheels from", wheeldir) root_pkgname = parse_wheels(wheeldir) else: assert not os.path.exists(wheeldir) print("loading wheels from", wheeldir) build_wheels(target, wheeldir) root_pkgname = parse_wheels(wheeldir) else: wheeldir = tempfile.mkdtemp() build_wheels(target, wheeldir) root_pkgname = parse_wheels(wheeldir) print("root package:", root_pkgname) # parse the requirement specs (which look like "Twisted[tls] (>=13.0.0)") # enough to identify the package name pprint(all_packages) pprint(all_reqs) print("pure:", " ".join(sorted(all_pure))) for name in all_packages.keys(): extras_to_show[(name, None)] = "black" scan(root_pkgname) f = generate_dot() if write_dot: with open("out.dot", "w") as dotf: dotf.write(f.getvalue()) print("wrote DOT to out.dot") dot_to_png(f, "out.png") return 0 if __name__ == "__main__": go() tahoe_lafs-1.20.0/misc/coding_tools/make-canary-files.py0000644000000000000000000001241213615410400020077 0ustar00#!/usr/bin/env python """ Given a list of nodeids and a 'convergence' file, create a bunch of files that will (when encoded at k=1,N=1) be uploaded to specific nodeids. Run this as follows: make-canary-files.py -c PATH/TO/convergence -n PATH/TO/nodeids -k 1 -N 1 It will create a directory named 'canaries', with one file per nodeid named '$NODEID-$NICKNAME.txt', that contains some random text. The 'nodeids' file should contain one base32 nodeid per line, followed by the optional nickname, like: --- 5yyqu2hbvbh3rgtsgxrmmg4g77b6p3yo server12 vb7vm2mneyid5jbyvcbk2wb5icdhwtun server13 ... --- The resulting 'canaries/5yyqu2hbvbh3rgtsgxrmmg4g77b6p3yo-server12.txt' file will, when uploaded with the given (convergence,k,N) pair, have its first share placed on the 5yyq/server12 storage server. If N>1, the other shares will be placed elsewhere, of course. This tool can be useful to construct a set of 'canary' files, which can then be uploaded to storage servers, and later downloaded to test a grid's health. If you are able to download the canary for server12 via some tahoe node X, then the following properties are known to be true: node X is running, and has established a connection to server12 server12 is running, and returning data for at least the given file Using k=1/N=1 creates a separate test for each server. The test process is then to download the whole directory of files (perhaps with a t=deep-check operation). Alternatively, you could upload with the usual k=3/N=10 and then move/delete shares to put all N shares on a single server. Note that any changes to the nodeid list will affect the placement of shares. Shares should be uploaded with the same nodeid list as this tool used when constructing the files. Also note that this tool uses the Tahoe codebase, so it should be run on a system where Tahoe is installed, or in a source tree with setup.py like this: setup.py run_with_pythonpath -p -c 'misc/make-canary-files.py ARGS..' """ from past.builtins import cmp import os, hashlib from twisted.python import usage from allmydata.immutable import upload from allmydata.util import base32 class Options(usage.Options): optParameters = [ ("convergence", "c", None, "path to NODEDIR/private/convergence"), ("nodeids", "n", None, "path to file with one base32 nodeid per line"), ("k", "k", 1, "number of necessary shares, defaults to 1", int), ("N", "N", 1, "number of total shares, defaults to 1", int), ] optFlags = [ ("verbose", "v", "Be noisy"), ] opts = Options() opts.parseOptions() verbose = bool(opts["verbose"]) nodes = {} for line in open(opts["nodeids"], "r").readlines(): line = line.strip() if not line or line.startswith("#"): continue pieces = line.split(None, 1) if len(pieces) == 2: nodeid_s, nickname = pieces else: nodeid_s = pieces[0] nickname = None nodeid = base32.a2b(nodeid_s) nodes[nodeid] = nickname if opts["k"] != 3 or opts["N"] != 10: print("note: using non-default k/N requires patching the Tahoe code") print("src/allmydata/client.py line 55, DEFAULT_ENCODING_PARAMETERS") convergence_file = os.path.expanduser(opts["convergence"]) convergence_s = open(convergence_file, "rb").read().strip() convergence = base32.a2b(convergence_s) def get_permuted_peers(key): results = [] for nodeid in nodes: permuted = hashlib.sha1(key + nodeid).digest() results.append((permuted, nodeid)) results.sort(lambda a,b: cmp(a[0], b[0])) return [ r[1] for r in results ] def find_share_for_target(target): target_s = base32.b2a(target) prefix = "The first share of this file will be placed on " + target_s + "\n" prefix += "This data is random: " attempts = 0 while True: attempts += 1 suffix = base32.b2a(os.urandom(10)) if verbose: print(" trying", suffix, end=' ') data = prefix + suffix + "\n" assert len(data) > 55 # no LIT files # now, what storage index will this get? u = upload.Data(data, convergence) eu = upload.EncryptAnUploadable(u) d = eu.get_storage_index() # this happens to run synchronously def _got_si(si, data=data): if verbose: print("SI", base32.b2a(si), end=' ') peerlist = get_permuted_peers(si) if peerlist[0] == target: # great! if verbose: print(" yay!") fn = base32.b2a(target) if nodes[target]: nickname = nodes[target].replace("/", "_") fn += "-" + nickname fn += ".txt" fn = os.path.join("canaries", fn) open(fn, "w").write(data) return True # nope, must try again if verbose: print(" boo") return False d.addCallback(_got_si) # get sneaky and look inside the Deferred for the synchronous result if d.result: return attempts os.mkdir("canaries") attempts = [] for target in nodes: target_s = base32.b2a(target) print("working on", target_s) attempts.append(find_share_for_target(target)) print("done") print("%d attempts total, avg %d per target, max %d" % \ (sum(attempts), 1.0* sum(attempts) / len(nodes), max(attempts))) tahoe_lafs-1.20.0/misc/coding_tools/make_umid0000644000000000000000000000305213615410400016113 0ustar00#!/usr/bin/env python """Create a short probably-unique string for use as a umid= argument in a Foolscap log() call, to make it easier to locate the source code that generated the message. The main text of the log message is frequently unhelpful for this, and python doesn't make it cheap to compile in the filename and line number of logging calls. Given a message-unique-ID like 'aXoWcA', make your logging call look like: log.msg('OMG badness', level=log.WEIRD, umid='aXoWcA') Then later, if this message actually occurs, you can grep your source tree for aXoWcA to locate the code that caused it. Just stick to the convention that 'umid=' is reserved for this job. It is a good idea to make all the logging statements that could provoke an Incident (i.e. those at level=log.WEIRD or higher) have umid= arguments, to make it easier to write classifier functions for the incident-gatherer. """ ''' The following elisp code may be useful: (defun insert-umid () (interactive) (insert ", umid=\"") (call-process "make_umid" nil t) (delete-char -1) (insert "\"") ) (global-set-key (kbd "C-\`") 'insert-umid) ''' # ' # emacs gets confused by the odd number of single-quotes there import os, base64, sys def make_id(): while True: m = os.urandom(4) # this gives 6-character message ids m = base64.b64encode(m) if "/" in m or "+" in m: continue m = m.replace("=", "") break return m count = 1 if len(sys.argv) > 1: count = int(sys.argv[1]) for i in range(count): print(make_id()) tahoe_lafs-1.20.0/misc/incident-gatherer/classify_tahoe.py0000644000000000000000000000463213615410400020522 0ustar00 import re umidmap = { 'lp1vaQ': 'download-not-enough-shares', '3uuBUQ': 'download-connection-lost-in-get-buckets', 'LkD9Pw': 'user-incident-button', } def classify_incident(trigger): m = trigger.get('message', '') f = trigger.get('format', '') umid_value = umidmap.get(trigger.get('umid',''), None) if umid_value: return umid_value if re.search(r"^they had shares .* that we didn't know about$", m): # Publish._got_write_answer return "mutable-publish-surprise-shares" if m.startswith("error during query"): # there are a couple of different places that can generate this # message (the result of cut-and-paste error-handling), so it isn't # clear which is which if re.search(r'mutable/servermap\.py.*_do_query', m): # servermap.ServermapUpdater._query_failed() where = "mapupdate" elif re.search(r'mutable/retrieve\.py.*_got_results_one_share', m): where = "retrieve" else: where = "unknown" if ("Calling Stale Broker" in m and "DeadReferenceError" in m): # a storage server went offline while we were talking to it (or # because the client was shut off in the middle of an operation) what = "lost-server" elif "IOError" in m: what = "ioerror" elif ("UncoordinatedWriteError" in m and "someone wrote to the data since we read the servermap" in m): what = "uncoordinated-write-error" elif "ConnectionLost" in m: what = "lost-server" else: what = "unknown" return "mutable-" + where + "-query-" + what if (f.startswith("ran out of peers:") and "have" in trigger and "need" in trigger): return "mutable-retrieve-failure" if m.startswith("invalid privkey from "): # TODO: a UCW causes this, after the prefix has changed. Compare the # prefix before trying to validate the privkey, to avoid the # duplicate error. return "mutable-mapupdate-bad-privkey" if trigger.get('facility', '') == "tahoe.introducer": if (trigger.get('isError', False) and "ConnectionDone" in str(trigger.get('failure',''))): return "introducer-lost-connection" if "Initial Introducer connection failed" in m: return "introducer-connection-failed" return None tahoe_lafs-1.20.0/misc/operations_helpers/cpu-watcher-poll.py0000644000000000000000000000075513615410400021226 0ustar00#!/usr/bin/env python from foolscap import Tub, eventual from twisted.internet import reactor import sys import pprint def oops(f): print("ERROR") print(f) def fetch(furl): t = Tub() t.startService() d = t.getReference(furl) d.addCallback(lambda rref: rref.callRemote("get_averages")) d.addCallback(pprint.pprint) return d d = eventual.fireEventually(sys.argv[1]) d.addCallback(fetch) d.addErrback(oops) d.addBoth(lambda res: reactor.stop()) reactor.run() tahoe_lafs-1.20.0/misc/operations_helpers/cpu-watcher-subscribe.py0000644000000000000000000000264513615410400022241 0ustar00# -*- python -*- from twisted.internet import reactor import sys import os.path, pprint from twisted.application import service from twisted.python import log from foolscap import Tub, Referenceable, RemoteInterface from foolscap.schema import ListOf, TupleOf from zope.interface import implements Averages = ListOf( TupleOf(str, float, float, float) ) class RICPUWatcherSubscriber(RemoteInterface): def averages(averages=Averages): return None class CPUWatcherSubscriber(service.MultiService, Referenceable): implements(RICPUWatcherSubscriber) def __init__(self, furlthing): service.MultiService.__init__(self) if furlthing.startswith("pb://"): furl = furlthing else: furlfile = os.path.expanduser(furlthing) if os.path.isdir(furlfile): furlfile = os.path.join(furlfile, "watcher.furl") furl = open(furlfile, "r").read().strip() tub = Tub() tub.setServiceParent(self) tub.connectTo(furl, self.connected) def connected(self, rref): print("subscribing") d = rref.callRemote("get_averages") d.addCallback(self.remote_averages) d.addErrback(log.err) d = rref.callRemote("subscribe", self) d.addErrback(log.err) def remote_averages(self, averages): pprint.pprint(averages) c = CPUWatcherSubscriber(sys.argv[1]) c.startService() reactor.run() tahoe_lafs-1.20.0/misc/operations_helpers/cpu-watcher.tac0000644000000000000000000002051613615410400020376 0ustar00# -*- python -*- """ # run this tool on a linux box in its own directory, with a file named # 'pids.txt' describing which processes to watch. It will follow CPU usage of # the given processes, and compute 1/5/15-minute moving averages for each # process. These averages can be retrieved from a foolscap connection # (published at ./watcher.furl), or through an HTTP query (using ./webport). # Each line of pids.txt describes a single process. Blank lines and ones that # begin with '#' are ignored. Each line is either "PID" or "PID NAME" (space # separated). PID is either a numeric process ID, a pathname to a file that # contains a process ID, or a pathname to a directory that contains a # twistd.pid file (which contains a process ID). NAME is an arbitrary string # that will be used to describe the process to watcher.furl subscribers, and # defaults to PID if not provided. """ # TODO: # built-in graphs on web interface import pickle, os.path, time, pprint from twisted.application import internet, service, strports from twisted.web import server, resource, http from twisted.python import log import json from foolscap import Tub, Referenceable, RemoteInterface, eventual from foolscap.schema import ListOf, TupleOf from zope.interface import implements def read_cpu_times(pid): data = open("/proc/%d/stat" % pid, "r").read() data = data.split() times = data[13:17] # the values in /proc/%d/stat are in ticks, I think. My system has # CONFIG_HZ_1000=y in /proc/config.gz but nevertheless the numbers in # 'stat' appear to be 10ms each. HZ = 100 userspace_seconds = int(times[0]) * 1.0 / HZ system_seconds = int(times[1]) * 1.0 / HZ child_userspace_seconds = int(times[2]) * 1.0 / HZ child_system_seconds = int(times[3]) * 1.0 / HZ return (userspace_seconds, system_seconds) def read_pids_txt(): processes = [] for line in open("pids.txt", "r").readlines(): line = line.strip() if not line or line[0] == "#": continue parts = line.split() pidthing = parts[0] if len(parts) > 1: name = parts[1] else: name = pidthing pid = None try: pid = int(pidthing) except ValueError: pidfile = os.path.expanduser(pidthing) if os.path.isdir(pidfile): pidfile = os.path.join(pidfile, "twistd.pid") try: pid = int(open(pidfile, "r").read().strip()) except EnvironmentError: pass if pid is not None: processes.append( (pid, name) ) return processes Averages = ListOf( TupleOf(str, float, float, float) ) class RICPUWatcherSubscriber(RemoteInterface): def averages(averages=Averages): return None class RICPUWatcher(RemoteInterface): def get_averages(): """Return a list of rows, one for each process I am watching. Each row is (name, 1-min-avg, 5-min-avg, 15-min-avg), where 'name' is a string, and the averages are floats from 0.0 to 1.0 . Each average is the percentage of the CPU that this process has used: the change in CPU time divided by the change in wallclock time. """ return Averages def subscribe(observer=RICPUWatcherSubscriber): """Arrange for the given observer to get an 'averages' message every time the averages are updated. This message will contain a single argument, the same list of tuples that get_averages() returns.""" return None class CPUWatcher(service.MultiService, resource.Resource, Referenceable): implements(RICPUWatcher) POLL_INTERVAL = 30 # seconds HISTORY_LIMIT = 15 * 60 # 15min AVERAGES = (1*60, 5*60, 15*60) # 1min, 5min, 15min def __init__(self): service.MultiService.__init__(self) resource.Resource.__init__(self) try: self.history = pickle.load(open("history.pickle", "rb")) except: self.history = {} self.current = [] self.observers = set() ts = internet.TimerService(self.POLL_INTERVAL, self.poll) ts.setServiceParent(self) def startService(self): service.MultiService.startService(self) try: desired_webport = open("webport", "r").read().strip() except EnvironmentError: desired_webport = None webport = desired_webport or "tcp:0" root = self serv = strports.service(webport, server.Site(root)) serv.setServiceParent(self) if not desired_webport: got_port = serv._port.getHost().port open("webport", "w").write("tcp:%d\n" % got_port) self.tub = Tub(certFile="watcher.pem") self.tub.setServiceParent(self) try: desired_tubport = open("tubport", "r").read().strip() except EnvironmentError: desired_tubport = None tubport = desired_tubport or "tcp:0" l = self.tub.listenOn(tubport) if not desired_tubport: got_port = l.getPortnum() open("tubport", "w").write("tcp:%d\n" % got_port) d = self.tub.setLocationAutomatically() d.addCallback(self._tub_ready) d.addErrback(log.err) def _tub_ready(self, res): self.tub.registerReference(self, furlFile="watcher.furl") def getChild(self, path, req): if path == "": return self return resource.Resource.getChild(self, path, req) def render(self, req): t = req.args.get("t", ["html"])[0] ctype = "text/plain" data = "" if t == "html": data = "# name, 1min, 5min, 15min\n" data += pprint.pformat(self.current) + "\n" elif t == "json": #data = str(self.current) + "\n" # isn't that convenient? almost. data = json.dumps(self.current, indent=True) else: req.setResponseCode(http.BAD_REQUEST) data = "Unknown t= %s\n" % t req.setHeader("content-type", ctype) return data def remote_get_averages(self): return self.current def remote_subscribe(self, observer): self.observers.add(observer) def notify(self, observer): d = observer.callRemote("averages", self.current) def _error(f): log.msg("observer error, removing them") log.msg(f) self.observers.discard(observer) d.addErrback(_error) def poll(self): max_history = self.HISTORY_LIMIT / self.POLL_INTERVAL current = [] try: processes = read_pids_txt() except: log.err() return for (pid, name) in processes: if pid not in self.history: self.history[pid] = [] now = time.time() try: (user_seconds, sys_seconds) = read_cpu_times(pid) self.history[pid].append( (now, user_seconds, sys_seconds) ) while len(self.history[pid]) > max_history+1: self.history[pid].pop(0) except: log.msg("error reading process %s (%s), ignoring" % (pid, name)) log.err() try: # Newer protocols won't work in Python 2; when it is dropped, # protocol v4 can be used (added in Python 3.4). pickle.dump(self.history, open("history.pickle.tmp", "wb"), protocol=2) os.rename("history.pickle.tmp", "history.pickle") except: pass for (pid, name) in processes: row = [name] for avg in self.AVERAGES: row.append(self._average_N(pid, avg)) current.append(tuple(row)) self.current = current print(current) for ob in self.observers: eventual.eventually(self.notify, ob) def _average_N(self, pid, seconds): num_samples = seconds / self.POLL_INTERVAL samples = self.history[pid] if len(samples) < num_samples+1: return None first = -num_samples-1 elapsed_wall = samples[-1][0] - samples[first][0] elapsed_user = samples[-1][1] - samples[first][1] elapsed_sys = samples[-1][2] - samples[first][2] if elapsed_wall == 0.0: return 0.0 return (elapsed_user+elapsed_sys) / elapsed_wall application = service.Application("cpu-watcher") CPUWatcher().setServiceParent(application) tahoe_lafs-1.20.0/misc/operations_helpers/find-share-anomalies.py0000644000000000000000000000440613615410400022023 0ustar00#!/usr/bin/env python # feed this the results of 'tahoe catalog-shares' for all servers import sys chk_encodings = {} sdmf_encodings = {} sdmf_versions = {} for catalog in sys.argv[1:]: for line in open(catalog, "r").readlines(): line = line.strip() pieces = line.split() if pieces[0] == "CHK": ftype, si, kN, size, ueb_hash, expiration, filename = pieces if si not in chk_encodings: chk_encodings[si] = (set(), set()) chk_encodings[si][0].add( (si, kN) ) chk_encodings[si][1].add( line ) if pieces[0] == "SDMF": ftype, si, kN, size, ver, expiration, filename = pieces if si not in sdmf_encodings: sdmf_encodings[si] = (set(), set()) sdmf_encodings[si][0].add( (si, kN) ) sdmf_encodings[si][1].add( line ) if si not in sdmf_versions: sdmf_versions[si] = (set(), set()) sdmf_versions[si][0].add( ver ) sdmf_versions[si][1].add( line ) del si chk_multiple_encodings = [(si,lines) for si,(encodings,lines) in chk_encodings.items() if len(encodings) > 1] chk_multiple_encodings.sort() sdmf_multiple_encodings = [(si,lines) for si,(encodings,lines) in sdmf_encodings.items() if len(encodings) > 1 ] sdmf_multiple_encodings.sort() sdmf_multiple_versions = [(si,lines) for si,(versions,lines) in sdmf_versions.items() if len(versions) > 1] sdmf_multiple_versions.sort() if chk_multiple_encodings: print() print("CHK multiple encodings:") for (si,lines) in chk_multiple_encodings: print(" " + si) for line in sorted(lines): print(" " + line) if sdmf_multiple_encodings: print() print("SDMF multiple encodings:") for (si,lines) in sdmf_multiple_encodings: print(" " + si) for line in sorted(lines): print(" " + line) if sdmf_multiple_versions: print() print("SDMF multiple versions:") for (si,lines) in sdmf_multiple_versions: print(" " + si) for line in sorted(lines): print(" " + line) tahoe_lafs-1.20.0/misc/operations_helpers/munin/tahoe-conf0000644000000000000000000000137713615410400020563 0ustar00# put a copy of this file in /etc/munin/plugin-conf.d/tahoe-conf to let these # plugins know where the node's base directories are. Modify the lines below # to match your nodes. [tahoe-files] env.basedir_bs5c1 /home/amduser/tahoe/bs5c1 env.basedir_bs5c2 /home/amduser/tahoe/bs5c2 env.basedir_bs5c3 /home/amduser/tahoe/bs5c3 env.basedir_bs5c4 /home/amduser/tahoe/bs5c4 [tahoe-sharesperfile] env.basedir_bs5c1 /home/amduser/tahoe/bs5c1 env.basedir_bs5c2 /home/amduser/tahoe/bs5c2 env.basedir_bs5c3 /home/amduser/tahoe/bs5c3 env.basedir_bs5c4 /home/amduser/tahoe/bs5c4 [tahoe-storagespace] env.basedir_bs5c1 /home/amduser/tahoe/bs5c1 env.basedir_bs5c2 /home/amduser/tahoe/bs5c2 env.basedir_bs5c3 /home/amduser/tahoe/bs5c3 env.basedir_bs5c4 /home/amduser/tahoe/bs5c4 tahoe_lafs-1.20.0/misc/operations_helpers/munin/tahoe-stats.plugin-conf0000644000000000000000000000103013615410400023176 0ustar00[tahoe_storage_allocated] env.statsfile /home/robk/trees/tahoe/stats_gatherer/stats.json [tahoe_storage_consumed] env.statsfile /home/robk/trees/tahoe/stats_gatherer/stats.json [tahoe_runtime_load_avg] env.statsfile /home/robk/trees/tahoe/stats_gatherer/stats.json [tahoe_runtime_load_peak] env.statsfile /home/robk/trees/tahoe/stats_gatherer/stats.json [tahoe_storage_bytes_added] env.statsfile /home/robk/trees/tahoe/stats_gatherer/stats.json [tahoe_storage_bytes_freed] env.statsfile /home/robk/trees/tahoe/stats_gatherer/stats.json tahoe_lafs-1.20.0/misc/operations_helpers/munin/tahoe_cpu_watcher0000644000000000000000000000131713615410400022216 0ustar00#!/usr/bin/env python import os, sys, re import urllib import json url = os.environ["url"] current = json.loads(urllib.urlopen(url).read()) configinfo = """\ graph_title Tahoe CPU Usage graph_vlabel CPU % graph_category tahoe graph_info This graph shows the 5min average of CPU usage for each process """ data = "" for (name, avg1, avg5, avg15) in current: dataname = re.sub(r'[^\w]', '_', name) configinfo += dataname + ".label " + name + "\n" configinfo += dataname + ".draw LINE2\n" if avg5 is not None: data += dataname + ".value %.2f\n" % (100.0 * avg5) if len(sys.argv) > 1: if sys.argv[1] == "config": print(configinfo.rstrip()) sys.exit(0) print(data.rstrip()) tahoe_lafs-1.20.0/misc/operations_helpers/munin/tahoe_diskleft0000644000000000000000000000136413615410400021521 0ustar00#!/usr/bin/env python # This is a munin plugin which pulls data from the server in # misc/operations_helpers/spacetime/diskwatcher.tac . It produces a graph of how much free space # is left on all disks across the grid. The plugin should be configured with # env_url= pointing at the diskwatcher.tac webport. import os, sys, urllib, json if len(sys.argv) > 1 and sys.argv[1] == "config": print("""\ graph_title Tahoe Remaining Disk Space graph_vlabel bytes remaining graph_category tahoe graph_info This graph shows the total amount of disk space left available in the grid disk_left.label disk left disk_left.draw LINE1""") sys.exit(0) url = os.environ["url"] data = json.load(urllib.urlopen(url))["available"] print("disk_left.value", data) tahoe_lafs-1.20.0/misc/operations_helpers/munin/tahoe_disktotal0000644000000000000000000000161513615410400021711 0ustar00#!/usr/bin/env python # This is a munin plugin which pulls data from the server in # misc/operations_helpers/spacetime/diskwatcher.tac . It produces a graph of how much space is # present on all disks across the grid, and how much space is actually being # used. The plugin should be configured with env_url= pointing at the # diskwatcher.tac webport. import os, sys, urllib, json if len(sys.argv) > 1 and sys.argv[1] == "config": print("""\ graph_title Tahoe Total Disk Space graph_vlabel bytes graph_category tahoe graph_info This graph shows the total amount of disk space present in the grid, and how much of it is currently being used. disk_total.label disk total disk_total.draw LINE2 disk_used.label disk used disk_used.draw LINE1""") sys.exit(0) url = os.environ["url"] data = json.load(urllib.urlopen(url)) print("disk_total.value", data["total"]) print("disk_used.value", data["used"]) tahoe_lafs-1.20.0/misc/operations_helpers/munin/tahoe_diskusage0000644000000000000000000000243313615410400021671 0ustar00#!/usr/bin/env python # This is a munin plugin which pulls data from the server in # misc/operations_helpers/spacetime/diskwatcher.tac . It produces a graph of how much disk space # is being used per unit time. The plugin should be configured with env_url= # pointing at the diskwatcher.tac webport. import os, sys, urllib, json if len(sys.argv) > 1 and sys.argv[1] == "config": print("""\ graph_title Tahoe Disk Usage Measurement graph_vlabel bytes per second graph_category tahoe graph_info This graph shows the estimated disk usage per unit time, totalled across all storage servers graph_args --lower-limit 0 --rigid rate_1hr.label (one hour sample) rate_1hr.draw LINE1 rate_1day.label (one day sample) rate_1day.draw LINE1 rate_2wk.label (two week sample) rate_2wk.draw LINE2 rate_4wk.label (four week sample) rate_4wk.draw LINE2""") sys.exit(0) url = os.environ["url"] timespans = json.load(urllib.urlopen(url))["rates"] data = dict([(name, growth) for (name, timespan, growth, timeleft) in timespans]) # growth is in bytes per second if "1hr" in data: print("rate_1hr.value", data["1hr"]) if "1day" in data: print("rate_1day.value", data["1day"]) if "2wk" in data: print("rate_2wk.value", data["2wk"]) if "4wk" in data: print("rate_4wk.value", data["4wk"]) tahoe_lafs-1.20.0/misc/operations_helpers/munin/tahoe_diskused0000644000000000000000000000134013615410400021521 0ustar00#!/usr/bin/env python # This is a munin plugin which pulls data from the server in # misc/operations_helpers/spacetime/diskwatcher.tac . It produces a graph of how much space is # used on all disks across the grid. The plugin should be configured with # env_url= pointing at the diskwatcher.tac webport. import os, sys, urllib, json if len(sys.argv) > 1 and sys.argv[1] == "config": print("""\ graph_title Tahoe Total Disk Space Used graph_vlabel bytes used graph_category tahoe graph_info This graph shows the total amount of disk space used across the grid disk_used.label disk used disk_used.draw LINE1""") sys.exit(0) url = os.environ["url"] data = json.load(urllib.urlopen(url))["used"] print("disk_used.value", data) tahoe_lafs-1.20.0/misc/operations_helpers/munin/tahoe_doomsday0000644000000000000000000000251113615410400021526 0ustar00#!/usr/bin/env python # This is a munin plugin which pulls data from the server in # misc/operations_helpers/spacetime/diskwatcher.tac . It produces a graph of how much time is # left before the grid fills up. The plugin should be configured with # env_url= pointing at the diskwatcher.tac webport. import os, sys, urllib, json if len(sys.argv) > 1 and sys.argv[1] == "config": print("""\ graph_title Tahoe Remaining Time Predictor graph_vlabel days remaining graph_category tahoe graph_info This graph shows the estimated number of days left until storage space is exhausted days_1hr.label days left (one hour sample) days_1hr.draw LINE1 days_1day.label days left (one day sample) days_1day.draw LINE1 days_2wk.label days left (two week sample) days_2wk.draw LINE2 days_4wk.label days left (four week sample) days_4wk.draw LINE2""") sys.exit(0) url = os.environ["url"] timespans = json.load(urllib.urlopen(url))["rates"] data = dict([(name, timeleft) for (name, timespan, growth, timeleft) in timespans if timeleft]) # timeleft is in seconds DAY = 24*60*60 if "1hr" in data: print("days_1hr.value", data["1hr"]/DAY) if "1day" in data: print("days_1day.value", data["1day"]/DAY) if "2wk" in data: print("days_2wk.value", data["2wk"]/DAY) if "4wk" in data: print("days_4wk.value", data["4wk"]/DAY) tahoe_lafs-1.20.0/misc/operations_helpers/munin/tahoe_estimate_files0000644000000000000000000000310213615410400022701 0ustar00#!/usr/bin/env python import sys, os.path if len(sys.argv) > 1 and sys.argv[1] == "config": print("""\ graph_title Tahoe File Estimate graph_vlabel files graph_category tahoe graph_info This graph shows the estimated number of files and directories present in the grid files.label files files.draw LINE2""") sys.exit(0) # Edit this to point at some subset of storage directories. node_dirs = [os.path.expanduser("~amduser/prodnet/storage1"), os.path.expanduser("~amduser/prodnet/storage2"), os.path.expanduser("~amduser/prodnet/storage3"), os.path.expanduser("~amduser/prodnet/storage4"), ] sections = ["aa", "ab", "ac", "ad", "ae", "af", "ag", "ah", "ai", "aj"] # and edit this to reflect your default encoding's "total_shares" value, and # the total number of servers. N = 10 num_servers = 20 index_strings = set() for base in node_dirs: for section in sections: sampledir = os.path.join(base, "storage", "shares", section) indices = os.listdir(sampledir) index_strings.update(indices) unique_strings = len(index_strings) # the chance that any given file appears on any given server chance = 1.0 * N / num_servers # the chance that the file does *not* appear on the servers that we're # examining no_chance = (1-chance) ** len(node_dirs) # if a file has a 25% chance of not appearing in our sample, then we need to # raise our estimate by (1.25/1) correction = 1+no_chance #print "correction", correction files = unique_strings * (32*32/len(sections)) * correction print("files.value %d" % int(files)) tahoe_lafs-1.20.0/misc/operations_helpers/munin/tahoe_files0000644000000000000000000000337013615410400021015 0ustar00#!/usr/bin/env python # This is a munin plugin to track the number of files that each node's # StorageServer is holding on behalf of other nodes. Each file that has been # uploaded to the mesh (and has shares present on this node) will be counted # here. When there are <= 100 nodes in the mesh, this count will equal the # total number of files that are active in the entire mesh. When there are # 200 nodes present in the mesh, it will represent about half of the total # number. # Copy this plugin into /etc/munun/plugins/tahoe-files and then put # the following in your /etc/munin/plugin-conf.d/foo file to let it know # where to find the basedirectory for each node: # # [tahoe-files] # env.basedir_NODE1 /path/to/node1 # env.basedir_NODE2 /path/to/node2 # env.basedir_NODE3 /path/to/node3 # import os, sys nodedirs = [] for k,v in os.environ.items(): if k.startswith("basedir_"): nodename = k[len("basedir_"):] nodedirs.append( (nodename, v) ) nodedirs.sort() configinfo = \ """graph_title Allmydata Tahoe Filecount graph_vlabel files graph_category tahoe graph_info This graph shows the number of files hosted by this node's StorageServer """ for nodename, basedir in nodedirs: configinfo += "%s.label %s\n" % (nodename, nodename) configinfo += "%s.draw LINE2\n" % (nodename,) if len(sys.argv) > 1: if sys.argv[1] == "config": print(configinfo.rstrip()) sys.exit(0) for nodename, basedir in nodedirs: shares = 0 root = os.path.join(basedir, "storage", "shares") for dirpath, dirnames, filenames in os.walk(root, topdown=True): if dirpath == root and "incoming" in dirnames: dirnames.remove("incoming") shares += len(filenames) print("%s.value %d" % (nodename, shares)) tahoe_lafs-1.20.0/misc/operations_helpers/munin/tahoe_helperstats_active0000644000000000000000000000105713615410400023604 0ustar00#!/usr/bin/env python import os, sys import urllib import json configinfo = """\ graph_title Tahoe Helper Stats - Active Files graph_vlabel bytes graph_category tahoe graph_info This graph shows the number of files being actively processed by the helper fetched.label Active Files fetched.draw LINE2 """ if len(sys.argv) > 1: if sys.argv[1] == "config": print(configinfo.rstrip()) sys.exit(0) url = os.environ["url"] data = json.loads(urllib.urlopen(url).read()) print("fetched.value %d" % data["chk_upload_helper.active_uploads"]) tahoe_lafs-1.20.0/misc/operations_helpers/munin/tahoe_helperstats_fetched0000644000000000000000000000110413615410400023724 0ustar00#!/usr/bin/env python import os, sys import urllib import json configinfo = """\ graph_title Tahoe Helper Stats - Bytes Fetched graph_vlabel bytes graph_category tahoe graph_info This graph shows the amount of data being fetched by the helper fetched.label Bytes Fetched fetched.type GAUGE fetched.draw LINE1 fetched.min 0 """ if len(sys.argv) > 1: if sys.argv[1] == "config": print(configinfo.rstrip()) sys.exit(0) url = os.environ["url"] data = json.loads(urllib.urlopen(url).read()) print("fetched.value %d" % data["chk_upload_helper.fetched_bytes"]) tahoe_lafs-1.20.0/misc/operations_helpers/munin/tahoe_introstats0000644000000000000000000000153213615410400022123 0ustar00#!/usr/bin/env python import os, sys import urllib import json configinfo = """\ graph_title Tahoe Introducer Stats graph_vlabel hosts graph_category tahoe graph_info This graph shows the number of hosts announcing and subscribing to various services storage_server.label Storage Servers storage_server.draw LINE1 storage_hosts.label Distinct Storage Hosts storage_hosts.draw LINE1 storage_client.label Clients storage_client.draw LINE2 """ if len(sys.argv) > 1: if sys.argv[1] == "config": print(configinfo.rstrip()) sys.exit(0) url = os.environ["url"] data = json.loads(urllib.urlopen(url).read()) print("storage_server.value %d" % data["announcement_summary"]["storage"]) print("storage_hosts.value %d" % data["announcement_distinct_hosts"]["storage"]) print("storage_client.value %d" % data["subscription_summary"]["storage"]) tahoe_lafs-1.20.0/misc/operations_helpers/munin/tahoe_nodememory0000644000000000000000000000420113615410400022063 0ustar00#!/usr/bin/env python # This munin plugin isolates processes by looking for the 'pid' file created # by 'allmydata start', then extracts the amount of memory they consume (both # VmSize and VmRSS) from /proc import os, sys, re # for testing # os.environ["nodememory_warner1"] = "run/warner1" # os.environ["nodememory_warner2"] = "run/warner2" nodedirs = [] for k,v in os.environ.items(): if k.startswith("nodememory_"): nodename = k[len("nodememory_"):] nodedirs.append((nodename, v)) nodedirs.sort(lambda a,b: cmp(a[0],b[0])) pids = {} for node,nodedir in nodedirs: pidfile = os.path.join(nodedir, "twistd.pid") if os.path.exists(pidfile): pid = int(open(pidfile,"r").read()) pids[node] = pid fields = ["VmSize", "VmRSS"] if len(sys.argv) > 1: if sys.argv[1] == "config": configinfo = \ """graph_title Memory Consumed by Nodes graph_vlabel bytes graph_category Tahoe graph_info This graph shows the memory used by specific processes """ for nodename,nodedir in nodedirs: for f in fields: configinfo += "%s_%s.label %s used by %s\n" % (nodename, f, f, nodename) linetype = "LINE1" if f == "VmSize": linetype = "LINE2" configinfo += "%s_%s.draw %s\n" % (nodename, f, linetype) if f == "VmData": configinfo += "%s_%s.graph no\n" % (nodename, f) print(configinfo) sys.exit(0) nodestats = {} for node,pid in pids.items(): stats = {} statusfile = "/proc/%s/status" % pid if not os.path.exists(statusfile): continue for line in open(statusfile,"r").readlines(): for f in fields: if line.startswith(f + ":"): m = re.search(r'(\d+)', line) stats[f] = int(m.group(1)) nodestats[node] = stats for node,stats in nodestats.items(): for f,value in stats.items(): # TODO: not sure if /proc/%d/status means 1000 or 1024 when it says # 'kB' print("%s_%s.value %d" % (node, f, 1024*value)) tahoe_lafs-1.20.0/misc/operations_helpers/munin/tahoe_overhead0000644000000000000000000000561513615410400021514 0ustar00#!/usr/bin/env python # This is a munin plugin which pulls total-used data from the server in # misc/operations_helpers/spacetime/diskwatcher.tac, and a total-deep-size number from custom # PHP database-querying scripts on a different server. It produces a graph of # how much garbage/overhead is present in the grid: the ratio of total-used # over (total-deep-size*N/k), expressed as a percentage. No overhead would be # 0, using twice as much space as we'd prefer would be 100. This is the # percentage which could be saved if we made GC work perfectly and reduced # other forms of overhead to zero. This script assumes 3-of-10. # A second graph is produced with how much of the total-deep-size number # would be saved if we removed data from inactive accounts. This is also on a # percentage scale. # A separate number (without a graph) is produced with the "effective # expansion factor". If there were no overhead, with 3-of-10, this would be # 3.33 . # Overhead is caused by the following problems (in order of size): # uncollected garbage: files that are no longer referenced but not yet deleted # inactive accounts: files that are referenced by cancelled accounts # share storage overhead: bucket directories # filesystem overhead: 4kB minimum block sizes # share overhead: hashes, pubkeys, lease information # This plugin should be configured with env_diskwatcher_url= pointing at the # diskwatcher.tac webport, and env_deepsize_url= pointing at the PHP script. import os, sys, urllib, json if len(sys.argv) > 1 and sys.argv[1] == "config": print("""\ graph_title Tahoe Overhead Calculator graph_vlabel Percentage graph_category tahoe graph_info This graph shows the estimated amount of storage overhead (ratio of actual disk usage to ideal disk usage). The 'overhead' number is how much space we could save if we implemented GC, and the 'inactive' number is how much additional space we could save if we could delete data for cancelled accounts. overhead.label disk usage overhead overhead.draw LINE2 inactive.label inactive account usage inactive.draw LINE1 effective_expansion.label Effective Expansion Factor effective_expansion.graph no""") sys.exit(0) diskwatcher_url = os.environ["diskwatcher_url"] total = json.load(urllib.urlopen(diskwatcher_url))["used"] deepsize_url = os.environ["deepsize_url"] deepsize = json.load(urllib.urlopen(deepsize_url)) k = 3; N = 10 expansion = float(N) / k ideal = expansion * deepsize["all"] overhead = (total - ideal) / ideal if overhead > 0: # until all the storage-servers come online, this number will be nonsense print("overhead.value %f" % (100.0 * overhead)) # same for this one effective_expansion = total / deepsize["all"] print("effective_expansion.value %f" % effective_expansion) # this value remains valid, though inactive_savings = (deepsize["all"] - deepsize["active"]) / deepsize["active"] print("inactive.value %f" % (100.0 * inactive_savings)) tahoe_lafs-1.20.0/misc/operations_helpers/munin/tahoe_rootdir_space0000644000000000000000000000077213615410400022553 0ustar00#!/usr/bin/env python import os, sys import urllib configinfo = """\ graph_title Tahoe Root Directory Size graph_vlabel bytes graph_category tahoe graph_info This graph shows the amount of space consumed by all files reachable from a given directory space.label Space space.draw LINE2 """ if len(sys.argv) > 1: if sys.argv[1] == "config": print(configinfo.rstrip()) sys.exit(0) url = os.environ["url"] data = int(urllib.urlopen(url).read().strip()) print("space.value %d" % data) tahoe_lafs-1.20.0/misc/operations_helpers/munin/tahoe_server_latency_0000644000000000000000000000601313615410400023074 0ustar00#!/usr/bin/env python # retrieve a latency statistic for a given operation and percentile from a # set of storage servers. # the OPERATION value should come from the following list: # allocate: allocate_buckets, first step to upload an immutable file # write: write data to an immutable share # close: finish writing to an immutable share # cancel: abandon a partial immutable share # get: get_buckets, first step to download an immutable file # read: read data from an immutable share # writev: slot_testv_and_readv_and_writev, modify/create a directory # readv: read a directory (or mutable file) # the PERCENTILE value should come from the following list: # 01_0: 1% # 10_0: 10% # 50_0: 50% (median) # 90_0: 90% # 99_0: 99% # 99_9: 99.9% # mean: # To use this, create a symlink from # /etc/munin/plugins/tahoe_server_latency_OPERATION_PERCENTILE to this # script. For example: # ln -s /usr/share/munin/plugins/tahoe_server_latency_ \ # /etc/munin/plugins/tahoe_server_latency_allocate_99_9 # Also, you will need to put a list of node statistics URLs in the plugin's # environment, by adding a stanza like the following to a file in # /etc/munin/plugin-conf.d/, such as /etc/munin/plugin-conf.d/tahoe_latencies: # # [tahoe_server_latency*] # env.url_storage1 http://localhost:9011/statistics?t=json # env.url_storage2 http://localhost:9012/statistics?t=json # env.url_storage3 http://localhost:9013/statistics?t=json # env.url_storage4 http://localhost:9014/statistics?t=json # of course, these URLs must match the webports you have configured into the # storage nodes. import os, sys import urllib import json node_urls = [] for k,v in os.environ.items(): if k.startswith("url_"): nodename = k[len("url_"):] node_urls.append( (nodename, v) ) node_urls.sort() my_name = os.path.basename(sys.argv[0]) PREFIX = "tahoe_server_latency_" assert my_name.startswith(PREFIX) my_name = my_name[len(PREFIX):] (operation, percentile) = my_name.split("_", 1) if percentile == "mean": what = "mean" else: what = percentile.replace("_", ".") + "th percentile" configinfo = \ """graph_title Tahoe Server '%(operation)s' Latency (%(what)s) graph_vlabel seconds graph_category tahoe graph_info This graph shows how long '%(operation)s' operations took on the storage server, the %(what)s delay between message receipt and response generation, calculated over the last thousand operations. """ % {'operation': operation, 'what': what} for nodename, url in node_urls: configinfo += "%s.label %s\n" % (nodename, nodename) configinfo += "%s.draw LINE2\n" % (nodename,) if len(sys.argv) > 1: if sys.argv[1] == "config": print(configinfo.rstrip()) sys.exit(0) for nodename, url in node_urls: data = json.loads(urllib.urlopen(url).read()) if percentile == "mean": p_key = "mean" else: p_key = percentile + "_percentile" key = "storage_server.latencies.%s.%s" % (operation, p_key) value = data["stats"][key] print("%s.value %s" % (nodename, value)) tahoe_lafs-1.20.0/misc/operations_helpers/munin/tahoe_server_operations_0000644000000000000000000000500613615410400023621 0ustar00#!/usr/bin/env python # graph operations-per-second from a set of storage servers. # the OPERATION value should come from the following list: # allocate: allocate_buckets, first step to upload an immutable file # write: write data to an immutable share # close: finish writing to an immutable share # cancel: abandon a partial immutable share # get: get_buckets, first step to download an immutable file # read: read data from an immutable share # writev: slot_testv_and_readv_and_writev, modify/create a directory # readv: read a directory (or mutable file) # To use this, create a symlink from # /etc/munin/plugins/tahoe_server_operations_OPERATION to this script. For # example: # ln -s /usr/share/munin/plugins/tahoe_server_operations_ \ # /etc/munin/plugins/tahoe_server_operations_allocate # Also, you will need to put a list of node statistics URLs in the plugin's # environment, by adding a stanza like the following to a file in # /etc/munin/plugin-conf.d/, such as /etc/munin/plugin-conf.d/tahoe_operations: # # [tahoe_server_operations*] # env.url_storage1 http://localhost:9011/statistics?t=json # env.url_storage2 http://localhost:9012/statistics?t=json # env.url_storage3 http://localhost:9013/statistics?t=json # env.url_storage4 http://localhost:9014/statistics?t=json # of course, these URLs must match the webports you have configured into the # storage nodes. import os, sys import urllib import json node_urls = [] for k,v in os.environ.items(): if k.startswith("url_"): nodename = k[len("url_"):] node_urls.append( (nodename, v) ) node_urls.sort() my_name = os.path.basename(sys.argv[0]) PREFIX = "tahoe_server_operations_" assert my_name.startswith(PREFIX) operation = my_name[len(PREFIX):] configinfo = \ """graph_title Tahoe Server '%(operation)s' Operations graph_vlabel ops per second graph_category tahoe graph_info This graph shows how many '%(operation)s' operations take place on the storage server """ % {'operation': operation} for nodename, url in node_urls: configinfo += "%s.label %s\n" % (nodename, nodename) configinfo += "%s.type DERIVE\n" % (nodename,) configinfo += "%s.min 0\n" % (nodename,) configinfo += "%s.draw LINE2\n" % (nodename,) if len(sys.argv) > 1: if sys.argv[1] == "config": print(configinfo.rstrip()) sys.exit(0) for nodename, url in node_urls: data = json.loads(urllib.urlopen(url).read()) key = "storage_server.%s" % operation value = data["counters"][key] print("%s.value %s" % (nodename, value)) tahoe_lafs-1.20.0/misc/operations_helpers/munin/tahoe_spacetime0000644000000000000000000000703013615410400021662 0ustar00#!/usr/bin/env python # copy .rrd files from a remote munin master host, sum the 'df' stats from a # list of hosts, use them to estimate a rate-of-change for the past month, # then extrapolate to guess how many weeks/months/years of storage space we # have left, and output it to another munin graph import sys, os, time import rrdtool MUNIN_HOST = "munin.allmydata.com" PREFIX = "%s:/var/lib/munin/prodtahoe/" % MUNIN_HOST FILES = [ "prodtahoe%d.allmydata.com-df-_dev_sd%s3-g.rrd" % (a,b) for a in (1,2,3,4,5) for b in ("a", "b", "c", "d") ] REMOTEFILES = [ PREFIX + f for f in FILES ] LOCALFILES = ["/var/lib/munin/prodtahoe/" + f for f in FILES ] WEBFILE = "/var/www/tahoe/spacetime.json" def rsync_rrd(): # copy the RRD files from your munin master host to a local one cmd = "rsync %s rrds/" % (" ".join(REMOTEFILES)) rc = os.system(cmd) assert rc == 0, rc def format_time(t): return time.strftime("%b %d %H:%M", time.localtime(t)) def predict_future(past_s): start_df = [] end_df = [] durations = [] for fn in LOCALFILES: d = rrdtool.fetch(fn, "AVERAGE", "-s", "-"+past_s, "-e", "-1hr") # ((start, end, step), (name1, name2, ...), [(data1, data2, ..), ...]) (start_time, end_time ,step) = d[0] #print format_time(start_time), " - ", format_time(end_time), step #for points in d[2]: # point = points[0] # print point start_space = d[2][0][0] if start_space is None: return None # I don't know why, but the last few points are always bogus. Running # 'rrdtool fetch' on the command line is usually ok.. I blame the python # bindinds. end_space = d[2][-4][0] if end_space is None: return None end_time = end_time - (4*step) start_df.append(start_space) end_df.append(end_space) durations.append(end_time - start_time) avg_start_df = sum(start_df) / len(start_df) avg_end_df = sum(end_df) / len(end_df) avg_duration = sum(durations) / len(durations) #print avg_start_df, avg_end_df, avg_duration rate = (avg_end_df - avg_start_df) / avg_duration #print "Rate", rate, " %/s" #print "measured over", avg_duration / 86400, "days" remaining = 100 - avg_end_df remaining_seconds = remaining / rate #print "remaining seconds", remaining_seconds remaining_days = remaining_seconds / 86400 #print "remaining days", remaining_days return remaining_days def write_to_file(samples): # write a JSON-formatted dictionary f = open(WEBFILE + ".tmp", "w") f.write("{ ") f.write(", ".join(['"%s": %s' % (k, samples[k]) for k in sorted(samples.keys())])) f.write("}\n") f.close() os.rename(WEBFILE + ".tmp", WEBFILE) if len(sys.argv) > 1 and sys.argv[1] == "config": print("""\ graph_title Tahoe Remaining Space Predictor graph_vlabel days remaining graph_category tahoe graph_info This graph shows the estimated number of days left until storage space is exhausted days_2wk.label days left (2wk sample) days_2wk.draw LINE2 days_4wk.label days left (4wk sample) days_4wk.draw LINE2""") sys.exit(0) #rsync_rrd() samples = {} remaining_4wk = predict_future("4wk") if remaining_4wk is not None: print("days_4wk.value", remaining_4wk) samples["remaining_4wk"] = remaining_4wk remaining_2wk = predict_future("2wk") if remaining_2wk is not None: print("days_2wk.value", remaining_2wk) samples["remaining_2wk"] = remaining_2wk write_to_file(samples) tahoe_lafs-1.20.0/misc/operations_helpers/munin/tahoe_stats0000644000000000000000000005613613615410400021061 0ustar00#!/usr/bin/env python import os import json import re import sys import time STAT_VALIDITY = 300 # 5min limit on reporting stats PLUGINS = { # LOAD AVERAGE 'tahoe_runtime_load_avg': { 'statid': 'load_monitor.avg_load', 'category': 'stats', 'configheader': '\n'.join(['graph_title Tahoe Runtime Load Average', 'graph_vlabel load', 'graph_category tahoe', 'graph_info This graph shows average reactor delay', ]), 'graph_config': '\n'.join(['%(name)s.label %(name)s', '%(name)s.draw LINE1', ]), 'graph_render': '\n'.join(['%(name)s.value %(value)s', ]), }, 'tahoe_runtime_load_peak': { 'statid': 'load_monitor.max_load', 'category': 'stats', 'configheader': '\n'.join(['graph_title Tahoe Runtime Load Peak', 'graph_vlabel load', 'graph_category tahoe', 'graph_info This graph shows peak reactor delay', ]), 'graph_config': '\n'.join(['%(name)s.label %(name)s', '%(name)s.draw LINE1', ]), 'graph_render': '\n'.join(['%(name)s.value %(value)s', ]), }, # STORAGE ALLOCATION (BYTES) 'tahoe_storage_consumed': { 'statid': 'storage_server.consumed', 'category': 'stats', 'configheader': '\n'.join(['graph_title Tahoe Storage Server Space Consumed', 'graph_vlabel bytes', 'graph_category tahoe_storage_server', 'graph_info This graph shows space consumed', 'graph_args --base 1024', ]), 'graph_config': '\n'.join(['%(name)s.label %(name)s', '%(name)s.draw LINE1', ]), 'graph_render': '\n'.join(['%(name)s.value %(value)s', ]), }, 'tahoe_storage_allocated': { 'statid': 'storage_server.allocated', 'category': 'stats', 'configheader': '\n'.join(['graph_title Tahoe Storage Server Space Allocated', 'graph_vlabel bytes', 'graph_category tahoe_storage_server', 'graph_info This graph shows space allocated', 'graph_args --base 1024', ]), 'graph_config': '\n'.join(['%(name)s.label %(name)s', '%(name)s.draw LINE1', ]), 'graph_render': '\n'.join(['%(name)s.value %(value)s', ]), }, 'tahoe_storage_bytes_added': { 'statid': 'storage_server.bytes_added', 'category': 'counters', 'configheader': '\n'.join(['graph_title Tahoe Storage Server Bytes Added', 'graph_vlabel bytes', 'graph_category tahoe_storage_server', 'graph_info This graph shows cummulative bytes added', ]), 'graph_config': '\n'.join(['%(name)s.label %(name)s', '%(name)s.draw LINE1', ]), 'graph_render': '\n'.join(['%(name)s.value %(value)s', ]), }, 'tahoe_storage_bytes_freed': { 'statid': 'storage_server.bytes_freed', 'category': 'counters', 'configheader': '\n'.join(['graph_title Tahoe Storage Server Bytes Removed', 'graph_vlabel bytes', 'graph_category tahoe_storage_server', 'graph_info This graph shows cummulative bytes removed', ]), 'graph_config': '\n'.join(['%(name)s.label %(name)s', '%(name)s.draw LINE1', ]), 'graph_render': '\n'.join(['%(name)s.value %(value)s', ]), }, 'tahoe_storage_operations_allocate': { 'statid': 'storage_server.allocate', 'category': 'counters', 'configheader': '\n'.join(['graph_title Tahoe Storage Server Allocate_Bucket Operations', 'graph_vlabel operations per second', 'graph_category tahoe_storage_server', 'graph_info This graph shows how many allocate_buckets operations occured per second. Each immutable file upload causes one such operation per server.', ]), 'graph_config': '\n'.join(['%(name)s.label %(name)s', '%(name)s.type DERIVE', '%(name)s.min 0', '%(name)s.draw LINE1', ]), 'graph_render': '\n'.join(['%(name)s.value %(value)s', ]), }, 'tahoe_storage_operations_get': { 'statid': 'storage_server.get', 'category': 'counters', 'configheader': '\n'.join(['graph_title Tahoe Storage Server get_bucket Operations', 'graph_vlabel operations per second', 'graph_category tahoe_storage_server', 'graph_info This graph shows how many get_bucket operations occured per second. Each immutable file download/check causes one such operation per server.', ]), 'graph_config': '\n'.join(['%(name)s.label %(name)s', '%(name)s.type DERIVE', '%(name)s.min 0', '%(name)s.draw LINE1', ]), 'graph_render': '\n'.join(['%(name)s.value %(value)s', ]), }, 'tahoe_storage_operations_writev': { 'statid': 'storage_server.writev', 'category': 'counters', 'configheader': '\n'.join(['graph_title Tahoe Storage Server writev Operations', 'graph_vlabel operations per second', 'graph_category tahoe_storage_server', 'graph_info This graph shows how many writev operations occured per second. Each mutable file / dirnode write causes one such operation per server.', ]), 'graph_config': '\n'.join(['%(name)s.label %(name)s', '%(name)s.type DERIVE', '%(name)s.min 0', '%(name)s.draw LINE1', ]), 'graph_render': '\n'.join(['%(name)s.value %(value)s', ]), }, 'tahoe_storage_operations_readv': { 'statid': 'storage_server.readv', 'category': 'counters', 'configheader': '\n'.join(['graph_title Tahoe Storage Server readv Operations', 'graph_vlabel operations per second', 'graph_category tahoe_storage_server', 'graph_info This graph shows how many readv operations occured per second. Each dirnode read causes one such operation per server.', ]), 'graph_config': '\n'.join(['%(name)s.label %(name)s', '%(name)s.type DERIVE', '%(name)s.min 0', '%(name)s.draw LINE1', ]), 'graph_render': '\n'.join(['%(name)s.value %(value)s', ]), }, # HELPER 'tahoe_helper_incoming_files': { 'statid': 'chk_upload_helper.incoming_count', 'category': 'stats', 'configheader': '\n'.join(['graph_title Tahoe Upload Helper Incoming File Count', 'graph_vlabel n files', 'graph_category tahoe_helper', 'graph_info This graph shows number of incoming files', ]), 'graph_config': '\n'.join(['%(name)s.label %(name)s', '%(name)s.draw LINE1', ]), 'graph_render': '\n'.join(['%(name)s.value %(value)s', ]), }, 'tahoe_helper_incoming_filesize': { 'statid': 'chk_upload_helper.incoming_size', 'category': 'stats', 'configheader': '\n'.join(['graph_title Tahoe Upload Helper Incoming File Size', 'graph_vlabel bytes', 'graph_category tahoe_helper', 'graph_info This graph shows total size of incoming files', ]), 'graph_config': '\n'.join(['%(name)s.label %(name)s', '%(name)s.draw LINE1', ]), 'graph_render': '\n'.join(['%(name)s.value %(value)s', ]), }, 'tahoe_helper_incoming_files_old': { 'statid': 'chk_upload_helper.incoming_size_old', 'category': 'stats', 'configheader': '\n'.join(['graph_title Tahoe Upload Helper Incoming Old Files', 'graph_vlabel bytes', 'graph_category tahoe_helper', 'graph_info This graph shows total size of old incoming files', ]), 'graph_config': '\n'.join(['%(name)s.label %(name)s', '%(name)s.draw LINE1', ]), 'graph_render': '\n'.join(['%(name)s.value %(value)s', ]), }, 'tahoe_helper_encoding_files': { 'statid': 'chk_upload_helper.encoding_count', 'category': 'stats', 'configheader': '\n'.join(['graph_title Tahoe Upload Helper Encoding File Count', 'graph_vlabel n files', 'graph_category tahoe_helper', 'graph_info This graph shows number of encoding files', ]), 'graph_config': '\n'.join(['%(name)s.label %(name)s', '%(name)s.draw LINE1', ]), 'graph_render': '\n'.join(['%(name)s.value %(value)s', ]), }, 'tahoe_helper_encoding_filesize': { 'statid': 'chk_upload_helper.encoding_size', 'category': 'stats', 'configheader': '\n'.join(['graph_title Tahoe Upload Helper Encoding File Size', 'graph_vlabel bytes', 'graph_category tahoe_helper', 'graph_info This graph shows total size of encoding files', ]), 'graph_config': '\n'.join(['%(name)s.label %(name)s', '%(name)s.draw LINE1', ]), 'graph_render': '\n'.join(['%(name)s.value %(value)s', ]), }, 'tahoe_helper_encoding_files_old': { 'statid': 'chk_upload_helper.encoding_size_old', 'category': 'stats', 'configheader': '\n'.join(['graph_title Tahoe Upload Helper Encoding Old Files', 'graph_vlabel bytes', 'graph_category tahoe_helper', 'graph_info This graph shows total size of old encoding files', ]), 'graph_config': '\n'.join(['%(name)s.label %(name)s', '%(name)s.draw LINE1', ]), 'graph_render': '\n'.join(['%(name)s.value %(value)s', ]), }, 'tahoe_helper_active_uploads': { 'statid': 'chk_upload_helper.active_uploads', 'category': 'stats', 'configheader': '\n'.join(['graph_title Tahoe Upload Helper Active Files', 'graph_vlabel n files', 'graph_category tahoe_helper', 'graph_info This graph shows number of files actively being processed by the helper', ]), 'graph_config': '\n'.join(['%(name)s.label %(name)s', '%(name)s.draw LINE1', ]), 'graph_render': '\n'.join(['%(name)s.value %(value)s', ]), }, 'tahoe_helper_upload_requests': { 'statid': 'chk_upload_helper.upload_requests', 'category': 'counters', 'configheader': '\n'.join(['graph_title Tahoe Upload Helper Upload Requests', 'graph_vlabel requests', 'graph_category tahoe_helper', 'graph_info This graph shows the number of upload requests arriving at the helper', ]), 'graph_config': '\n'.join(['%(name)s.label %(name)s', '%(name)s.type DERIVE', '%(name)s.min 0', '%(name)s.draw LINE1', ]), 'graph_render': '\n'.join(['%(name)s.value %(value)s', ]), }, 'tahoe_helper_upload_already_present': { 'statid': 'chk_upload_helper.upload_already_present', 'category': 'counters', 'configheader': '\n'.join(['graph_title Tahoe Upload Helper Uploads Already Present', 'graph_vlabel requests', 'graph_category tahoe_helper', 'graph_info This graph shows the number of uploads whose files are already present in the grid', ]), 'graph_config': '\n'.join(['%(name)s.label %(name)s', '%(name)s.type DERIVE', '%(name)s.min 0', '%(name)s.draw LINE1', ]), 'graph_render': '\n'.join(['%(name)s.value %(value)s', ]), }, 'tahoe_helper_upload_need_upload': { 'statid': 'chk_upload_helper.upload_need_upload', 'category': 'counters', 'configheader': '\n'.join(['graph_title Tahoe Upload Helper Uploads Needing Upload', 'graph_vlabel requests', 'graph_category tahoe_helper', 'graph_info This graph shows the number of uploads whose files are not already present in the grid', ]), 'graph_config': '\n'.join(['%(name)s.label %(name)s', '%(name)s.type DERIVE', '%(name)s.min 0', '%(name)s.draw LINE1', ]), 'graph_render': '\n'.join(['%(name)s.value %(value)s', ]), }, 'tahoe_helper_encoded_bytes': { 'statid': 'chk_upload_helper.encoded_bytes', 'category': 'counters', 'configheader': '\n'.join(['graph_title Tahoe Upload Helper Encoded Bytes', 'graph_vlabel bytes', 'graph_category tahoe_helper', 'graph_info This graph shows the number of bytes encoded by the helper', ]), 'graph_config': '\n'.join(['%(name)s.label %(name)s', '%(name)s.type DERIVE', '%(name)s.min 0', '%(name)s.draw LINE1', ]), 'graph_render': '\n'.join(['%(name)s.value %(value)s', ]), }, 'tahoe_helper_fetched_bytes': { 'statid': 'chk_upload_helper.fetched_bytes', 'category': 'counters', 'configheader': '\n'.join(['graph_title Tahoe Upload Helper Fetched Bytes', 'graph_vlabel bytes', 'graph_category tahoe_helper', 'graph_info This graph shows the number of bytes fetched by the helper', ]), 'graph_config': '\n'.join(['%(name)s.label %(name)s', '%(name)s.type DERIVE', '%(name)s.min 0', '%(name)s.draw LINE1', ]), 'graph_render': '\n'.join(['%(name)s.value %(value)s', ]), }, # WEBAPI 'tahoe_uploader_bytes_uploaded': { 'statid': 'uploader.bytes_uploaded', 'category': 'counters', 'configheader': '\n'.join(['graph_title Tahoe Uploader Bytes Uploaded', 'graph_vlabel bytes', 'graph_category tahoe_traffic', 'graph_info This graph shows the number of bytes uploaded', ]), 'graph_config': '\n'.join(['%(name)s.label %(name)s', '%(name)s.type DERIVE', '%(name)s.min 0', '%(name)s.draw LINE1', ]), 'graph_render': '\n'.join(['%(name)s.value %(value)s', ]), }, 'tahoe_uploader_files_uploaded': { 'statid': 'uploader.files_uploaded', 'category': 'counters', 'configheader': '\n'.join(['graph_title Tahoe Uploader Bytes Uploaded', 'graph_vlabel files', 'graph_category tahoe_traffic', 'graph_info This graph shows the number of files uploaded', ]), 'graph_config': '\n'.join(['%(name)s.label %(name)s', '%(name)s.type DERIVE', '%(name)s.min 0', '%(name)s.draw LINE1', ]), 'graph_render': '\n'.join(['%(name)s.value %(value)s', ]), }, 'tahoe_mutable_files_published': { 'statid': 'mutable.files_published', 'category': 'counters', 'configheader': '\n'.join(['graph_title Tahoe Mutable Files Published', 'graph_vlabel files', 'graph_category tahoe_traffic', 'graph_info This graph shows the number of mutable files published', ]), 'graph_config': '\n'.join(['%(name)s.label %(name)s', '%(name)s.type DERIVE', '%(name)s.min 0', '%(name)s.draw LINE1', ]), 'graph_render': '\n'.join(['%(name)s.value %(value)s', ]), }, 'tahoe_mutable_files_retrieved': { 'statid': 'mutable.files_retrieved', 'category': 'counters', 'configheader': '\n'.join(['graph_title Tahoe Mutable Files Retrieved', 'graph_vlabel files', 'graph_category tahoe_traffic', 'graph_info This graph shows the number of files retrieved', ]), 'graph_config': '\n'.join(['%(name)s.label %(name)s', '%(name)s.type DERIVE', '%(name)s.min 0', '%(name)s.draw LINE1', ]), 'graph_render': '\n'.join(['%(name)s.value %(value)s', ]), }, } def smash_name(name): return re.sub('[^a-zA-Z0-9]', '_', name) def open_stats(fname): f = open(fname, 'rb') stats = json.load(f) f.close() return stats def main(argv): graph_name = os.path.basename(argv[0]) if graph_name.endswith('.py'): graph_name = graph_name[:-3] plugin_conf = PLUGINS.get(graph_name) for k,v in os.environ.items(): if k.startswith('statsfile'): stats_file = v break else: raise RuntimeError("No 'statsfile' env var found") stats = open_stats(stats_file) now = time.time() def output_nodes(output_section, check_time): for tubid, nodestats in stats.items(): if check_time and (now - nodestats.get('timestamp', 0)) > STAT_VALIDITY: continue name = smash_name("%s_%s" % (nodestats['nickname'], tubid[:4])) #value = nodestats['stats'][plugin_conf['category']].get(plugin_conf['statid']) category = plugin_conf['category'] statid = plugin_conf['statid'] value = nodestats['stats'][category].get(statid) if value is not None: args = { 'name': name, 'value': value } print(plugin_conf[output_section] % args) if len(argv) > 1: if sys.argv[1] == 'config': print(plugin_conf['configheader']) output_nodes('graph_config', False) sys.exit(0) output_nodes('graph_render', True) if __name__ == '__main__': main(sys.argv) tahoe_lafs-1.20.0/misc/operations_helpers/munin/tahoe_storagespace0000644000000000000000000000334313615410400022373 0ustar00#!/usr/bin/env python # This is a munin plugin to track the amount of disk space each node's # StorageServer is consuming on behalf of other nodes. This is where the # shares are kept. If there are N nodes present in the mesh, the total space # consumed by the entire mesh will be about N times the space reported by # this plugin. # Copy this plugin into /etc/munun/plugins/tahoe_storagespace and then put # the following in your /etc/munin/plugin-conf.d/foo file to let it know # where to find the basedirectory for each node: # # [tahoe_storagespace] # env.basedir_NODE1 /path/to/node1 # env.basedir_NODE2 /path/to/node2 # env.basedir_NODE3 /path/to/node3 # # Allmydata-tahoe must be installed on the system where this plugin is used, # since it imports a utility module from allmydata.utils . import os, sys import commands nodedirs = [] for k,v in os.environ.items(): if k.startswith("basedir_"): nodename = k[len("basedir_"):] nodedirs.append( (nodename, v) ) nodedirs.sort() seriesname = "storage" configinfo = \ """graph_title Allmydata Tahoe Shareholder Space graph_vlabel bytes graph_category tahoe graph_info This graph shows the space consumed by this node's StorageServer """ for nodename, basedir in nodedirs: configinfo += "%s.label %s\n" % (nodename, nodename) configinfo += "%s.draw LINE2\n" % (nodename,) if len(sys.argv) > 1: if sys.argv[1] == "config": print(configinfo.rstrip()) sys.exit(0) for nodename, basedir in nodedirs: cmd = "du --bytes --summarize %s" % os.path.join(basedir, "storage") rc,out = commands.getstatusoutput(cmd) if rc != 0: sys.exit(rc) bytes, extra = out.split() usage = int(bytes) print("%s.value %d" % (nodename, usage)) tahoe_lafs-1.20.0/misc/operations_helpers/provisioning/provisioning.py0000644000000000000000000010720013615410400023305 0ustar00 from nevow import inevow, rend, loaders, tags as T import math import util # factorial and binomial copied from # http://mail.python.org/pipermail/python-list/2007-April/435718.html def div_ceil(n, d): """ The smallest integer k such that k*d >= n. """ return (n/d) + (n%d != 0) def factorial(n): """factorial(n): return the factorial of the integer n. factorial(0) = 1 factorial(n) with n<0 is -factorial(abs(n)) """ result = 1 for i in range(1, abs(n)+1): result *= i assert n >= 0 return result def binomial(n, k): assert 0 <= k <= n if k == 0 or k == n: return 1 # calculate n!/k! as one product, avoiding factors that # just get canceled P = k+1 for i in range(k+2, n+1): P *= i # if you are paranoid: # C, rem = divmod(P, factorial(n-k)) # assert rem == 0 # return C return P//factorial(n-k) class ProvisioningTool(rend.Page): addSlash = True docFactory = loaders.xmlfile(util.sibling("provisioning.xhtml")) def render_forms(self, ctx, data): req = inevow.IRequest(ctx) def getarg(name, astype=int): if req.method != b"POST": return None if name in req.fields: return astype(req.fields[name].value) return None return self.do_forms(getarg) def do_forms(self, getarg): filled = getarg("filled", bool) def get_and_set(name, options, default=None, astype=int): current_value = getarg(name, astype) i_select = T.select(name=name) for (count, description) in options: count = astype(count) if ((current_value is not None and count == current_value) or (current_value is None and count == default)): o = T.option(value=str(count), selected="true")[description] else: o = T.option(value=str(count))[description] i_select = i_select[o] if current_value is None: current_value = default return current_value, i_select sections = {} def add_input(section, text, entry): if section not in sections: sections[section] = [] sections[section].extend([T.div[text, ": ", entry], "\n"]) def add_output(section, entry): if section not in sections: sections[section] = [] sections[section].extend([entry, "\n"]) def build_section(section): return T.fieldset[T.legend[section], sections[section]] def number(value, suffix=""): scaling = 1 if value < 1: fmt = "%1.2g%s" elif value < 100: fmt = "%.1f%s" elif value < 1000: fmt = "%d%s" elif value < 1e6: fmt = "%.2fk%s"; scaling = 1e3 elif value < 1e9: fmt = "%.2fM%s"; scaling = 1e6 elif value < 1e12: fmt = "%.2fG%s"; scaling = 1e9 elif value < 1e15: fmt = "%.2fT%s"; scaling = 1e12 elif value < 1e18: fmt = "%.2fP%s"; scaling = 1e15 else: fmt = "huge! %g%s" return fmt % (value / scaling, suffix) user_counts = [(5, "5 users"), (50, "50 users"), (200, "200 users"), (1000, "1k users"), (10000, "10k users"), (50000, "50k users"), (100000, "100k users"), (500000, "500k users"), (1000000, "1M users"), ] num_users, i_num_users = get_and_set("num_users", user_counts, 50000) add_input("Users", "How many users are on this network?", i_num_users) files_per_user_counts = [(100, "100 files"), (1000, "1k files"), (10000, "10k files"), (100000, "100k files"), (1e6, "1M files"), ] files_per_user, i_files_per_user = get_and_set("files_per_user", files_per_user_counts, 1000) add_input("Users", "How many files for each user? (avg)", i_files_per_user) space_per_user_sizes = [(1e6, "1MB"), (10e6, "10MB"), (100e6, "100MB"), (200e6, "200MB"), (1e9, "1GB"), (2e9, "2GB"), (5e9, "5GB"), (10e9, "10GB"), (100e9, "100GB"), (1e12, "1TB"), (2e12, "2TB"), (5e12, "5TB"), ] # Estimate ~5gb per user as a more realistic case space_per_user, i_space_per_user = get_and_set("space_per_user", space_per_user_sizes, 5e9) add_input("Users", "How much data for each user? (avg)", i_space_per_user) sharing_ratios = [(1.0, "1.0x"), (1.1, "1.1x"), (2.0, "2.0x"), ] sharing_ratio, i_sharing_ratio = get_and_set("sharing_ratio", sharing_ratios, 1.0, float) add_input("Users", "What is the sharing ratio? (1.0x is no-sharing and" " no convergence)", i_sharing_ratio) # Encoding parameters encoding_choices = [("3-of-10-5", "3.3x (3-of-10, repair below 5)"), ("3-of-10-8", "3.3x (3-of-10, repair below 8)"), ("5-of-10-7", "2x (5-of-10, repair below 7)"), ("8-of-10-9", "1.25x (8-of-10, repair below 9)"), ("27-of-30-28", "1.1x (27-of-30, repair below 28"), ("25-of-100-50", "4x (25-of-100, repair below 50)"), ] encoding_parameters, i_encoding_parameters = \ get_and_set("encoding_parameters", encoding_choices, "3-of-10-5", str) encoding_pieces = encoding_parameters.split("-") k = int(encoding_pieces[0]) assert encoding_pieces[1] == "of" n = int(encoding_pieces[2]) # we repair the file when the number of available shares drops below # this value repair_threshold = int(encoding_pieces[3]) add_input("Servers", "What are the default encoding parameters?", i_encoding_parameters) # Server info num_server_choices = [ (5, "5 servers"), (10, "10 servers"), (15, "15 servers"), (30, "30 servers"), (50, "50 servers"), (100, "100 servers"), (200, "200 servers"), (300, "300 servers"), (500, "500 servers"), (1000, "1k servers"), (2000, "2k servers"), (5000, "5k servers"), (10e3, "10k servers"), (100e3, "100k servers"), (1e6, "1M servers"), ] num_servers, i_num_servers = \ get_and_set("num_servers", num_server_choices, 30, int) add_input("Servers", "How many servers are there?", i_num_servers) # availability is measured in dBA = -dBF, where 0dBF is 100% failure, # 10dBF is 10% failure, 20dBF is 1% failure, etc server_dBA_choices = [ (10, "90% [10dBA] (2.4hr/day)"), (13, "95% [13dBA] (1.2hr/day)"), (20, "99% [20dBA] (14min/day or 3.5days/year)"), (23, "99.5% [23dBA] (7min/day or 1.75days/year)"), (30, "99.9% [30dBA] (87sec/day or 9hours/year)"), (40, "99.99% [40dBA] (60sec/week or 53min/year)"), (50, "99.999% [50dBA] (5min per year)"), ] server_dBA, i_server_availability = \ get_and_set("server_availability", server_dBA_choices, 20, int) add_input("Servers", "What is the server availability?", i_server_availability) drive_MTBF_choices = [ (40, "40,000 Hours"), ] drive_MTBF, i_drive_MTBF = \ get_and_set("drive_MTBF", drive_MTBF_choices, 40, int) add_input("Drives", "What is the hard drive MTBF?", i_drive_MTBF) # http://www.tgdaily.com/content/view/30990/113/ # http://labs.google.com/papers/disk_failures.pdf # google sees: # 1.7% of the drives they replaced were 0-1 years old # 8% of the drives they repalced were 1-2 years old # 8.6% were 2-3 years old # 6% were 3-4 years old, about 8% were 4-5 years old drive_size_choices = [ (100, "100 GB"), (250, "250 GB"), (500, "500 GB"), (750, "750 GB"), (1000, "1000 GB"), (2000, "2000 GB"), (3000, "3000 GB"), ] drive_size, i_drive_size = \ get_and_set("drive_size", drive_size_choices, 3000, int) drive_size = drive_size * 1e9 add_input("Drives", "What is the capacity of each hard drive?", i_drive_size) drive_failure_model_choices = [ ("E", "Exponential"), ("U", "Uniform"), ] drive_failure_model, i_drive_failure_model = \ get_and_set("drive_failure_model", drive_failure_model_choices, "E", str) add_input("Drives", "How should we model drive failures?", i_drive_failure_model) # drive_failure_rate is in failures per second if drive_failure_model == "E": drive_failure_rate = 1.0 / (drive_MTBF * 1000 * 3600) else: drive_failure_rate = 0.5 / (drive_MTBF * 1000 * 3600) # deletion/gc/ownership mode ownership_choices = [ ("A", "no deletion, no gc, no owners"), ("B", "deletion, no gc, no owners"), ("C", "deletion, share timers, no owners"), ("D", "deletion, no gc, yes owners"), ("E", "deletion, owner timers"), ] ownership_mode, i_ownership_mode = \ get_and_set("ownership_mode", ownership_choices, "A", str) add_input("Servers", "What is the ownership mode?", i_ownership_mode) # client access behavior access_rates = [ (1, "one file per day"), (10, "10 files per day"), (100, "100 files per day"), (1000, "1k files per day"), (10e3, "10k files per day"), (100e3, "100k files per day"), ] download_files_per_day, i_download_rate = \ get_and_set("download_rate", access_rates, 100, int) add_input("Users", "How many files are downloaded per day?", i_download_rate) download_rate = 1.0 * download_files_per_day / (24*60*60) upload_files_per_day, i_upload_rate = \ get_and_set("upload_rate", access_rates, 10, int) add_input("Users", "How many files are uploaded per day?", i_upload_rate) upload_rate = 1.0 * upload_files_per_day / (24*60*60) delete_files_per_day, i_delete_rate = \ get_and_set("delete_rate", access_rates, 10, int) add_input("Users", "How many files are deleted per day?", i_delete_rate) delete_rate = 1.0 * delete_files_per_day / (24*60*60) # the value is in days lease_timers = [ (1, "one refresh per day"), (7, "one refresh per week"), ] lease_timer, i_lease = \ get_and_set("lease_timer", lease_timers, 7, int) add_input("Users", "How frequently do clients refresh files or accounts? " "(if necessary)", i_lease) seconds_per_lease = 24*60*60*lease_timer check_timer_choices = [ (1, "every week"), (4, "every month"), (8, "every two months"), (16, "every four months"), ] check_timer, i_check_timer = \ get_and_set("check_timer", check_timer_choices, 4, int) add_input("Users", "How frequently should we check on each file?", i_check_timer) file_check_interval = check_timer * 7 * 24 * 3600 if filled: add_output("Users", T.div["Total users: %s" % number(num_users)]) add_output("Users", T.div["Files per user: %s" % number(files_per_user)]) file_size = 1.0 * space_per_user / files_per_user add_output("Users", T.div["Average file size: ", number(file_size)]) total_files = num_users * files_per_user / sharing_ratio add_output("Grid", T.div["Total number of files in grid: ", number(total_files)]) total_space = num_users * space_per_user / sharing_ratio add_output("Grid", T.div["Total volume of plaintext in grid: ", number(total_space, "B")]) total_shares = n * total_files add_output("Grid", T.div["Total shares in grid: ", number(total_shares)]) expansion = float(n) / float(k) total_usage = expansion * total_space add_output("Grid", T.div["Share data in grid: ", number(total_usage, "B")]) if n > num_servers: # silly configuration, causes Tahoe2 to wrap and put multiple # shares on some servers. add_output("Servers", T.div["non-ideal: more shares than servers" " (n=%d, servers=%d)" % (n, num_servers)]) # every file has at least one share on every server buckets_per_server = total_files shares_per_server = total_files * ((1.0 * n) / num_servers) else: # if nobody is full, then no lease requests will be turned # down for lack of space, and no two shares for the same file # will share a server. Therefore the chance that any given # file has a share on any given server is n/num_servers. buckets_per_server = total_files * ((1.0 * n) / num_servers) # since each such represented file only puts one share on a # server, the total number of shares per server is the same. shares_per_server = buckets_per_server add_output("Servers", T.div["Buckets per server: ", number(buckets_per_server)]) add_output("Servers", T.div["Shares per server: ", number(shares_per_server)]) # how much space is used on the storage servers for the shares? # the share data itself share_data_per_server = total_usage / num_servers add_output("Servers", T.div["Share data per server: ", number(share_data_per_server, "B")]) # this is determined empirically. H=hashsize=32, for a one-segment # file and 3-of-10 encoding share_validation_per_server = 266 * shares_per_server # this could be 423*buckets_per_server, if we moved the URI # extension into a separate file, but that would actually consume # *more* space (minimum filesize is 4KiB), unless we moved all # shares for a given bucket into a single file. share_uri_extension_per_server = 423 * shares_per_server # ownership mode adds per-bucket data H = 32 # depends upon the desired security of delete/refresh caps # bucket_lease_size is the amount of data needed to keep track of # the delete/refresh caps for each bucket. bucket_lease_size = 0 client_bucket_refresh_rate = 0 owner_table_size = 0 if ownership_mode in ("B", "C", "D", "E"): bucket_lease_size = sharing_ratio * 1.0 * H if ownership_mode in ("B", "C"): # refreshes per second per client client_bucket_refresh_rate = (1.0 * n * files_per_user / seconds_per_lease) add_output("Users", T.div["Client share refresh rate (outbound): ", number(client_bucket_refresh_rate, "Hz")]) server_bucket_refresh_rate = (client_bucket_refresh_rate * num_users / num_servers) add_output("Servers", T.div["Server share refresh rate (inbound): ", number(server_bucket_refresh_rate, "Hz")]) if ownership_mode in ("D", "E"): # each server must maintain a bidirectional mapping from # buckets to owners. One way to implement this would be to # put a list of four-byte owner numbers into each bucket, and # a list of four-byte share numbers into each owner (although # of course we'd really just throw it into a database and let # the experts take care of the details). owner_table_size = 2*(buckets_per_server * sharing_ratio * 4) if ownership_mode in ("E",): # in this mode, clients must refresh one timer per server client_account_refresh_rate = (1.0 * num_servers / seconds_per_lease) add_output("Users", T.div["Client account refresh rate (outbound): ", number(client_account_refresh_rate, "Hz")]) server_account_refresh_rate = (client_account_refresh_rate * num_users / num_servers) add_output("Servers", T.div["Server account refresh rate (inbound): ", number(server_account_refresh_rate, "Hz")]) # TODO: buckets vs shares here is a bit wonky, but in # non-wrapping grids it shouldn't matter share_lease_per_server = bucket_lease_size * buckets_per_server share_ownertable_per_server = owner_table_size share_space_per_server = (share_data_per_server + share_validation_per_server + share_uri_extension_per_server + share_lease_per_server + share_ownertable_per_server) add_output("Servers", T.div["Share space per server: ", number(share_space_per_server, "B"), " (data ", number(share_data_per_server, "B"), ", validation ", number(share_validation_per_server, "B"), ", UEB ", number(share_uri_extension_per_server, "B"), ", lease ", number(share_lease_per_server, "B"), ", ownertable ", number(share_ownertable_per_server, "B"), ")", ]) # rates client_download_share_rate = download_rate * k client_download_byte_rate = download_rate * file_size add_output("Users", T.div["download rate: shares = ", number(client_download_share_rate, "Hz"), " , bytes = ", number(client_download_byte_rate, "Bps"), ]) total_file_check_rate = 1.0 * total_files / file_check_interval client_check_share_rate = total_file_check_rate / num_users add_output("Users", T.div["file check rate: shares = ", number(client_check_share_rate, "Hz"), " (interval = %s)" % number(1 / client_check_share_rate, "s"), ]) client_upload_share_rate = upload_rate * n # TODO: doesn't include overhead client_upload_byte_rate = upload_rate * file_size * expansion add_output("Users", T.div["upload rate: shares = ", number(client_upload_share_rate, "Hz"), " , bytes = ", number(client_upload_byte_rate, "Bps"), ]) client_delete_share_rate = delete_rate * n server_inbound_share_rate = (client_upload_share_rate * num_users / num_servers) server_inbound_byte_rate = (client_upload_byte_rate * num_users / num_servers) add_output("Servers", T.div["upload rate (inbound): shares = ", number(server_inbound_share_rate, "Hz"), " , bytes = ", number(server_inbound_byte_rate, "Bps"), ]) add_output("Servers", T.div["share check rate (inbound): ", number(total_file_check_rate * n / num_servers, "Hz"), ]) server_share_modify_rate = ((client_upload_share_rate + client_delete_share_rate) * num_users / num_servers) add_output("Servers", T.div["share modify rate: shares = ", number(server_share_modify_rate, "Hz"), ]) server_outbound_share_rate = (client_download_share_rate * num_users / num_servers) server_outbound_byte_rate = (client_download_byte_rate * num_users / num_servers) add_output("Servers", T.div["download rate (outbound): shares = ", number(server_outbound_share_rate, "Hz"), " , bytes = ", number(server_outbound_byte_rate, "Bps"), ]) total_share_space = num_servers * share_space_per_server add_output("Grid", T.div["Share space consumed: ", number(total_share_space, "B")]) add_output("Grid", T.div[" %% validation: %.2f%%" % (100.0 * share_validation_per_server / share_space_per_server)]) add_output("Grid", T.div[" %% uri-extension: %.2f%%" % (100.0 * share_uri_extension_per_server / share_space_per_server)]) add_output("Grid", T.div[" %% lease data: %.2f%%" % (100.0 * share_lease_per_server / share_space_per_server)]) add_output("Grid", T.div[" %% owner data: %.2f%%" % (100.0 * share_ownertable_per_server / share_space_per_server)]) add_output("Grid", T.div[" %% share data: %.2f%%" % (100.0 * share_data_per_server / share_space_per_server)]) add_output("Grid", T.div["file check rate: ", number(total_file_check_rate, "Hz")]) total_drives = max(div_ceil(int(total_share_space), int(drive_size)), num_servers) add_output("Drives", T.div["Total drives: ", number(total_drives), " drives"]) drives_per_server = div_ceil(total_drives, num_servers) add_output("Servers", T.div["Drives per server: ", drives_per_server]) # costs if drive_size == 3000 * 1e9: add_output("Servers", T.div["3000GB drive: $250 each"]) drive_cost = 250 else: add_output("Servers", T.div[T.b["unknown cost per drive, assuming $100"]]) drive_cost = 100 if drives_per_server <= 4: add_output("Servers", T.div["1U box with <= 4 drives: $1500"]) server_cost = 1500 # typical 1U box elif drives_per_server <= 12: add_output("Servers", T.div["2U box with <= 12 drives: $2500"]) server_cost = 2500 # 2U box else: add_output("Servers", T.div[T.b["Note: too many drives per server, " "assuming $3000"]]) server_cost = 3000 server_capital_cost = (server_cost + drives_per_server * drive_cost) total_server_cost = float(num_servers * server_capital_cost) add_output("Servers", T.div["Capital cost per server: $", server_capital_cost]) add_output("Grid", T.div["Capital cost for all servers: $", number(total_server_cost)]) # $70/Mbps/mo # $44/server/mo power+space server_bandwidth = max(server_inbound_byte_rate, server_outbound_byte_rate) server_bandwidth_mbps = div_ceil(int(server_bandwidth*8), int(1e6)) server_monthly_cost = 70*server_bandwidth_mbps + 44 add_output("Servers", T.div["Monthly cost per server: $", server_monthly_cost]) add_output("Users", T.div["Capital cost per user: $", number(total_server_cost / num_users)]) # reliability any_drive_failure_rate = total_drives * drive_failure_rate any_drive_MTBF = 1 // any_drive_failure_rate # in seconds any_drive_MTBF_days = any_drive_MTBF / 86400 add_output("Drives", T.div["MTBF (any drive): ", number(any_drive_MTBF_days), " days"]) drive_replacement_monthly_cost = (float(drive_cost) * any_drive_failure_rate *30*86400) add_output("Grid", T.div["Monthly cost of replacing drives: $", number(drive_replacement_monthly_cost)]) total_server_monthly_cost = float(num_servers * server_monthly_cost + drive_replacement_monthly_cost) add_output("Grid", T.div["Monthly cost for all servers: $", number(total_server_monthly_cost)]) add_output("Users", T.div["Monthly cost per user: $", number(total_server_monthly_cost / num_users)]) # availability file_dBA = self.file_availability(k, n, server_dBA) user_files_dBA = self.many_files_availability(file_dBA, files_per_user) all_files_dBA = self.many_files_availability(file_dBA, total_files) add_output("Users", T.div["availability of: ", "arbitrary file = %d dBA, " % file_dBA, "all files of user1 = %d dBA, " % user_files_dBA, "all files in grid = %d dBA" % all_files_dBA, ], ) time_until_files_lost = (n-k+1) / any_drive_failure_rate add_output("Grid", T.div["avg time until files are lost: ", number(time_until_files_lost, "s"), ", ", number(time_until_files_lost/86400, " days"), ]) share_data_loss_rate = any_drive_failure_rate * drive_size add_output("Grid", T.div["share data loss rate: ", number(share_data_loss_rate,"Bps")]) # the worst-case survival numbers occur when we do a file check # and the file is just above the threshold for repair (so we # decide to not repair it). The question is then: what is the # chance that the file will decay so badly before the next check # that we can't recover it? The resulting probability is per # check interval. # Note that the chances of us getting into this situation are low. P_disk_failure_during_interval = (drive_failure_rate * file_check_interval) disk_failure_dBF = 10*math.log10(P_disk_failure_during_interval) disk_failure_dBA = -disk_failure_dBF file_survives_dBA = self.file_availability(k, repair_threshold, disk_failure_dBA) user_files_survives_dBA = self.many_files_availability( \ file_survives_dBA, files_per_user) all_files_survives_dBA = self.many_files_availability( \ file_survives_dBA, total_files) add_output("Users", T.div["survival of: ", "arbitrary file = %d dBA, " % file_survives_dBA, "all files of user1 = %d dBA, " % user_files_survives_dBA, "all files in grid = %d dBA" % all_files_survives_dBA, " (per worst-case check interval)", ]) all_sections = [] all_sections.append(build_section("Users")) all_sections.append(build_section("Servers")) all_sections.append(build_section("Drives")) if "Grid" in sections: all_sections.append(build_section("Grid")) f = T.form(action=".", method="post", enctype="multipart/form-data") if filled: action = "Recompute" else: action = "Compute" f = f[T.input(type="hidden", name="filled", value="true"), T.input(type="submit", value=action), all_sections, ] try: from allmydata import reliability # we import this just to test to see if the page is available _hush_pyflakes = reliability del _hush_pyflakes f = [T.div[T.a(href="../reliability")["Reliability Math"]], f] except ImportError: pass return f def file_availability(self, k, n, server_dBA): """ The full formula for the availability of a specific file is:: 1 - sum([choose(N,i) * p**i * (1-p)**(N-i)] for i in range(k)]) Where choose(N,i) = N! / ( i! * (N-i)! ) . Note that each term of this summation is the probability that there are exactly 'i' servers available, and what we're doing is adding up the cases where i is too low. This is a nuisance to calculate at all accurately, especially once N gets large, and when p is close to unity. So we make an engineering approximation: if (1-p) is very small, then each [i] term is much larger than the [i-1] term, and the sum is dominated by the i=k-1 term. This only works for (1-p) < 10%, and when the choose() function doesn't rise fast enough to compensate. For high-expansion encodings (3-of-10, 25-of-100), the choose() function is rising at the same time as the (1-p)**(N-i) term, so that's not an issue. For low-expansion encodings (7-of-10, 75-of-100) the two values are moving in opposite directions, so more care must be taken. Note that the p**i term has only a minor effect as long as (1-p)*N is small, and even then the effect is attenuated by the 1-p term. """ assert server_dBA > 9 # >=90% availability to use the approximation factor = binomial(n, k-1) factor_dBA = 10 * math.log10(factor) exponent = n - k + 1 file_dBA = server_dBA * exponent - factor_dBA return file_dBA def many_files_availability(self, file_dBA, num_files): """The probability that 'num_files' independent bernoulli trials will succeed (i.e. we can recover all files in the grid at any given moment) is p**num_files . Since p is close to unity, we express in p in dBA instead, so we can get useful precision on q (=1-p), and then the formula becomes:: P_some_files_unavailable = 1 - (1 - q)**num_files That (1-q)**n expands with the usual binomial sequence, 1 - nq + Xq**2 ... + Xq**n . We use the same approximation as before, since we know q is close to zero, and we get to ignore all the terms past -nq. """ many_files_dBA = file_dBA - 10 * math.log10(num_files) return many_files_dBA tahoe_lafs-1.20.0/misc/operations_helpers/provisioning/provisioning.xhtml0000644000000000000000000000104413615410400024010 0ustar00 Tahoe-LAFS - Provisioning Tool

Tahoe-LAFS Provisioning Tool

This page will help you determine how much disk space and network bandwidth will be required by various sizes and types of Tahoe-LAFS networks.

tahoe_lafs-1.20.0/misc/operations_helpers/provisioning/reliability.py0000644000000000000000000002615013615410400023074 0ustar00#! /usr/bin/python import math from allmydata.util import statistics from numpy import array, matrix, dot DAY=24*60*60 MONTH=31*DAY YEAR=365*DAY class ReliabilityModel(object): """Generate a model of system-wide reliability, given several input parameters. This runs a simulation in which time is quantized down to 'delta' seconds (default is one month): a smaller delta will result in a more accurate simulation, but will take longer to run. 'report_span' simulated seconds will be run. The encoding parameters are provided as 'k' (minimum number of shares needed to recover the file) and 'N' (total number of shares generated). The default parameters are 3-of-10. The first step is to build a probability of individual drive loss during any given delta. This uses a simple exponential model, in which the average drive lifetime is specified by the 'drive_lifetime' parameter (default is 8 years). The second step is to calculate a 'transition matrix': a table of probabilities that shows, given A shares at the start of the delta, what the chances are of having B shares left at the end of the delta. The current code optimistically assumes all drives are independent. A subclass could override that assumption. An additional 'repair matrix' is created to show what happens when the Checker/Repairer is run. In the simulation, the Checker will be run every 'check_period' seconds (default is one month), and the Repairer will be run if it sees fewer than 'R' shares (default 7). The third step is to finally run the simulation. An initial probability vector is created (with a 100% chance of N shares and a 0% chance of fewer than N shares), then it is multiplied by the transition matrix for every delta of time. Each time the Checker is to be run, the repair matrix is multiplied in, and some additional stats are accumulated (average number of repairs that occur, average number of shares regenerated per repair). The output is a ReliabilityReport instance, which contains a table that samples the state of the simulation once each 'report_period' seconds (defaults to 3 months). Each row of this table will contain the probability vector for one sample period (chance of having X shares, from 0 to N, at the end of the period). The report will also contain other information. """ @classmethod def run(klass, drive_lifetime=8*YEAR, k=3, R=7, N=10, delta=1*MONTH, check_period=1*MONTH, report_period=3*MONTH, report_span=5*YEAR, ): self = klass() check_period = check_period-1 P = self.p_in_period(drive_lifetime, delta) decay = self.build_decay_matrix(N, P) repair = self.build_repair_matrix(k, N, R) #print("DECAY:", decay) #print("OLD-POST-REPAIR:", old_post_repair) #print("NEW-POST-REPAIR:", decay * repair) #print("REPAIR:", repair) #print("DIFF:", (old_post_repair - decay * repair)) START = array([0]*N + [1]) DEAD = array([1]*k + [0]*(1+N-k)) REPAIRp = array([0]*k + [1]*(R-k) + [0]*(1+N-R)) REPAIR_newshares = array([0]*k + [N-i for i in range(k, R)] + [0]*(1+N-R)) assert REPAIR_newshares.shape[0] == N+1 #print("START", START) #print("REPAIRp", REPAIRp) #print("REPAIR_newshares", REPAIR_newshares) unmaintained_state = START maintained_state = START last_check = 0 last_report = 0 P_repaired_last_check_period = 0.0 needed_repairs = [] needed_new_shares = [] report = ReliabilityReport() for t in range(0, report_span+delta, delta): # the .A[0] turns the one-row matrix back into an array unmaintained_state = (unmaintained_state * decay).A[0] maintained_state = (maintained_state * decay).A[0] if (t-last_check) > check_period: last_check = t # we do a check-and-repair this frequently need_repair = dot(maintained_state, REPAIRp) P_repaired_last_check_period = need_repair new_shares = dot(maintained_state, REPAIR_newshares) needed_repairs.append(need_repair) needed_new_shares.append(new_shares) maintained_state = (maintained_state * repair).A[0] if (t-last_report) > report_period: last_report = t P_dead_unmaintained = dot(unmaintained_state, DEAD) P_dead_maintained = dot(maintained_state, DEAD) cumulative_number_of_repairs = sum(needed_repairs) cumulative_number_of_new_shares = sum(needed_new_shares) report.add_sample(t, unmaintained_state, maintained_state, P_repaired_last_check_period, cumulative_number_of_repairs, cumulative_number_of_new_shares, P_dead_unmaintained, P_dead_maintained) # record one more sample at the end of the run P_dead_unmaintained = dot(unmaintained_state, DEAD) P_dead_maintained = dot(maintained_state, DEAD) cumulative_number_of_repairs = sum(needed_repairs) cumulative_number_of_new_shares = sum(needed_new_shares) report.add_sample(t, unmaintained_state, maintained_state, P_repaired_last_check_period, cumulative_number_of_repairs, cumulative_number_of_new_shares, P_dead_unmaintained, P_dead_maintained) #def yandm(seconds): # return "%dy.%dm" % (int(seconds/YEAR), int( (seconds%YEAR)/MONTH)) #needed_repairs_total = sum(needed_repairs) #needed_new_shares_total = sum(needed_new_shares) #print("at 2y:") #print(" unmaintained", unmaintained_state) #print(" maintained", maintained_state) #print(" number of repairs", needed_repairs_total) #print(" new shares generated", needed_new_shares_total) #repair_rate_inv = report_span / needed_repairs_total #print(" avg repair rate: once every %s" % yandm(repair_rate_inv)) #print(" avg repair download: one share every %s" % yandm(repair_rate_inv/k)) #print(" avg repair upload: one share every %s" % yandm(report_span / needed_new_shares_total)) return report def p_in_period(self, avg_lifetime, period): """Given an average lifetime of a disk (using an exponential model), what is the chance that a live disk will survive the next 'period' seconds?""" # eg p_in_period(8*YEAR, MONTH) = 98.94% return math.exp(-1.0*period/avg_lifetime) def build_decay_matrix(self, N, P): """Return a decay matrix. decay[start_shares][end_shares] is the conditional probability of finishing with end_shares, given that we started with start_shares.""" decay_rows = [] decay_rows.append( [0.0]*(N+1) ) for start_shares in range(1, (N+1)): end_shares = self.build_decay_row(start_shares, P) decay_row = end_shares + [0.0] * (N-start_shares) assert len(decay_row) == (N+1), len(decay_row) decay_rows.append(decay_row) decay = matrix(decay_rows) return decay def build_decay_row(self, start_shares, P): """Return a decay row 'end_shares'. end_shares[i] is the chance that we finish with i shares, given that we started with start_shares, for all i between 0 and start_shares, inclusive. This implementation assumes that all shares are independent (IID), but a more complex model could incorporate inter-share failure correlations like having two shares on the same server.""" end_shares = statistics.binomial_distribution_pmf(start_shares, P) return end_shares def build_repair_matrix(self, k, N, R): """Return a repair matrix. repair[start][end]: is the conditional probability of the repairer finishing with 'end' shares, given that it began with 'start' shares (repair if fewer than R shares). The repairer's behavior is deterministic, so all values in this matrix are either 0 or 1. This matrix should be applied *after* the decay matrix.""" new_repair_rows = [] for start_shares in range(0, N+1): new_repair_row = [0] * (N+1) if start_shares < k: new_repair_row[start_shares] = 1 elif start_shares < R: new_repair_row[N] = 1 else: new_repair_row[start_shares] = 1 new_repair_rows.append(new_repair_row) repair = matrix(new_repair_rows) return repair class ReliabilityReport(object): def __init__(self): self.samples = [] def add_sample(self, when, unmaintained_shareprobs, maintained_shareprobs, P_repaired_last_check_period, cumulative_number_of_repairs, cumulative_number_of_new_shares, P_dead_unmaintained, P_dead_maintained): """ when: the timestamp at the end of the report period unmaintained_shareprobs: a vector of probabilities, element[S] is the chance that there are S shares left at the end of the report period. This tracks what happens if no repair is ever done. maintained_shareprobs: same, but for 'maintained' grids, where check and repair is done at the end of each check period P_repaired_last_check_period: a float, with the probability that a repair was performed at the end of the most recent check period. cumulative_number_of_repairs: a float, with the average number of repairs that will have been performed by the end of the report period cumulative_number_of_new_shares: a float, with the average number of new shares that repair proceses generated by the end of the report period P_dead_unmaintained: a float, with the chance that the file will be unrecoverable at the end of the period P_dead_maintained: same, but for maintained grids """ row = (when, unmaintained_shareprobs, maintained_shareprobs, P_repaired_last_check_period, cumulative_number_of_repairs, cumulative_number_of_new_shares, P_dead_unmaintained, P_dead_maintained) self.samples.append(row) tahoe_lafs-1.20.0/misc/operations_helpers/provisioning/reliability.xhtml0000644000000000000000000000402313615410400023573 0ustar00 Tahoe-LAFS - Reliability Tool

Tahoe-LAFS Reliability Tool

Given certain assumptions, this page calculates probability of share loss over time, to help make informed decisions about how much redundancy and repair bandwidth to configure on a Tahoe-LAFS grid.

Simulation Results

At the end of the report span (elapsed time ), the simulated file had the following properties:

  • Probability of loss (no maintenance):
  • Probability of loss (with maintenance):
  • Average repair frequency: once every secs
  • Average shares generated per repair:

This table shows how the following properties change over time:

  • P_repair: the chance that a repair was performed in the most recent check period.
  • P_dead (unmaintained): the chance that the file will be unrecoverable without periodic check+repair
  • P_dead (maintained): the chance that the file will be unrecoverable even with periodic check+repair
t P_repair P_dead (unmaintained) P_dead (maintained)
no simulation data!
tahoe_lafs-1.20.0/misc/operations_helpers/provisioning/run.py0000644000000000000000000000235313615410400021366 0ustar00#!/usr/bin/env python # this depends upon Twisted and Nevow, but not upon Tahoe itself import webbrowser from twisted.application import strports from twisted.internet import reactor from nevow import appserver, rend, loaders from twisted.web import static import web_reliability, provisioning class Root(rend.Page): docFactory = loaders.xmlstr('''\ Tahoe-LAFS Provisioning/Reliability Calculator

Reliability Tool

Provisioning Tool

''') child_reliability = web_reliability.ReliabilityTool() child_provisioning = provisioning.ProvisioningTool() def run(portnum): root = Root() root.putChild(b"tahoe.css", static.File("tahoe.css")) site = appserver.NevowSite(root) s = strports.service("tcp:%d" % portnum, site) s.startService() reactor.callLater(1.0, webbrowser.open, "http://localhost:%d/" % portnum) reactor.run() if __name__ == '__main__': import sys portnum = 8070 if len(sys.argv) > 1: portnum = int(sys.argv[1]) run(portnum) tahoe_lafs-1.20.0/misc/operations_helpers/provisioning/tahoe.css0000644000000000000000000000571213615410400022024 0ustar00 pre.overflow { background: #f7f7f7; border: 1px solid #d7d7d7; margin: 1em 1.75em; padding: .25em; overflow: auto; } /* ----------------------------------------------------------------------- */ /* colors borrowed from the Allmydata logo */ /* general style */ h1 { text-align: center; } table { margin: 1em auto; border: .2em solid #3289b4; border-spacing: 1px; } th { color: white; background-color: #58a1c3; } td { padding: .3em .3em; } th { padding: .3em .3em; } .table-headings-top th { text-align: center; } .table-headings-left th { text-align: right; vertical-align: top; } legend { font-weight: bold; } .connected-yes, .connected-True { border: 1px solid #75d24a; background-color: #EFE; } .connected-no, .connected-False { border: 1px solid #F00; background-color: #FBB; } .encoded, .nodeid { font-family: monospace; font-size: 80%; } .empty-marker { background-color: white; color: gray; } table td.empty-marker { padding: 6em 10em; text-align: center; vertical-align: center; } /* styles for server listings in tables (nickname above nodeid) */ th.nickname-and-peerid { text-align: left; } .nickname { font: inherit; font-family: sans-serif; font-weight: bold; } /* just in case, make sure floats don't stomp on big tables etc. */ #section { clear: both; } /* section-specific styles - turn this client info into a sidebar */ #this-client { font-size: 60%; border: .2em solid #3289b4; float: right; width: 40%; margin: 0 0 .5em .5em; padding: 3px; } #this-client .nodeid { font-size: inherit; } #this-client h2 { text-align: center; background: #3289b4; color: white; margin: -2px -2px 0 -2px; /* matches padding */ padding: .3em; } #this-client table { font-size: inherit; margin: 0 -3px -3px -3px; /* matches padding */ } #this-client td > ul { list-style-type: outside; margin: 0 0 0 2.3em; padding-left: 0; } /* services table */ .services { } /* --- Directory page styles --- */ body.tahoe-directory-page { color: black; background: #c0d9e6; margin: 1em 0; /* zero margin so the table can be flush */ } table.tahoe-directory { color: black; background: white; width: 100%; /*border-left-color: #D7E0E5; border-right-color: #D7E0E5;*/ border-left: 0; border-right: 0; } .tahoe-directory-footer { color: black; background: #c0d9e6; margin: 0 1em; /* compensate for page 0 margin */ } /* directory-screen toolbar */ .toolbar { display: table; margin: .2em auto; text-align: center; /*width: 100%;*/ } .toolbar .toolbar-item { display: inline; text-align: center; padding: 0 1em; } /* recent upload/download status pages */ table.status-download-events { #border: 1px solid #aaa; margin: 1em auto; border: .2em solid #3289b4; border-spacing: 1px; } table.status-download-events td { border: 1px solid #a00; padding: 2px } tahoe_lafs-1.20.0/misc/operations_helpers/provisioning/test_provisioning.py0000644000000000000000000001017113615410400024344 0ustar00 import unittest from allmydata import provisioning ReliabilityModel = None try: from allmydata.reliability import ReliabilityModel except ImportError: pass # might not be importable, since it needs NumPy from nevow import inevow from zope.interface import implements class MyRequest(object): implements(inevow.IRequest) pass class Provisioning(unittest.TestCase): def getarg(self, name, astype=int): if name in self.fields: return astype(self.fields[name]) return None def test_load(self): pt = provisioning.ProvisioningTool() self.fields = {} #r = MyRequest() #r.fields = self.fields #ctx = RequestContext() #unfilled = pt.renderSynchronously(ctx) lots_of_stan = pt.do_forms(self.getarg) self.failUnless(lots_of_stan is not None) self.fields = {'filled': True, "num_users": 50e3, "files_per_user": 1000, "space_per_user": 1e9, "sharing_ratio": 1.0, "encoding_parameters": "3-of-10-5", "num_servers": 30, "ownership_mode": "A", "download_rate": 100, "upload_rate": 10, "delete_rate": 10, "lease_timer": 7, } #filled = pt.renderSynchronously(ctx) more_stan = pt.do_forms(self.getarg) self.failUnless(more_stan is not None) # trigger the wraparound configuration self.fields["num_servers"] = 5 #filled = pt.renderSynchronously(ctx) more_stan = pt.do_forms(self.getarg) # and other ownership modes self.fields["ownership_mode"] = "B" more_stan = pt.do_forms(self.getarg) self.fields["ownership_mode"] = "E" more_stan = pt.do_forms(self.getarg) def test_provisioning_math(self): self.failUnlessEqual(provisioning.binomial(10, 0), 1) self.failUnlessEqual(provisioning.binomial(10, 1), 10) self.failUnlessEqual(provisioning.binomial(10, 2), 45) self.failUnlessEqual(provisioning.binomial(10, 9), 10) self.failUnlessEqual(provisioning.binomial(10, 10), 1) DAY=24*60*60 MONTH=31*DAY YEAR=365*DAY class Reliability(unittest.TestCase): def test_basic(self): if ReliabilityModel is None: raise unittest.SkipTest("reliability model requires NumPy") # test that numpy math works the way I think it does import numpy decay = numpy.matrix([[1,0,0], [.1,.9,0], [.01,.09,.9], ]) start = numpy.array([0,0,1]) g2 = (start * decay).A[0] self.failUnlessEqual(repr(g2), repr(numpy.array([.01,.09,.9]))) g3 = (g2 * decay).A[0] self.failUnlessEqual(repr(g3), repr(numpy.array([.028,.162,.81]))) # and the dot product recoverable = numpy.array([0,1,1]) P_recoverable_g2 = numpy.dot(g2, recoverable) self.failUnlessAlmostEqual(P_recoverable_g2, .9 + .09) P_recoverable_g3 = numpy.dot(g3, recoverable) self.failUnlessAlmostEqual(P_recoverable_g3, .81 + .162) r = ReliabilityModel.run(delta=100000, report_period=3*MONTH, report_span=5*YEAR) self.failUnlessEqual(len(r.samples), 20) last_row = r.samples[-1] #print(last_row) (when, unmaintained_shareprobs, maintained_shareprobs, P_repaired_last_check_period, cumulative_number_of_repairs, cumulative_number_of_new_shares, P_dead_unmaintained, P_dead_maintained) = last_row self.failUnless(isinstance(P_repaired_last_check_period, float)) self.failUnless(isinstance(P_dead_unmaintained, float)) self.failUnless(isinstance(P_dead_maintained, float)) self.failUnlessAlmostEqual(P_dead_unmaintained, 0.033591004555395272) self.failUnlessAlmostEqual(P_dead_maintained, 3.2983995819177542e-08) if __name__=='__main__': unittest.main() tahoe_lafs-1.20.0/misc/operations_helpers/provisioning/util.py0000644000000000000000000000016613615410400021537 0ustar00 import os.path def sibling(filename): return os.path.join(os.path.dirname(os.path.abspath(__file__)), filename) tahoe_lafs-1.20.0/misc/operations_helpers/provisioning/web_reliability.py0000644000000000000000000001434613615410400023735 0ustar00 from nevow import rend, loaders, tags as T from nevow.inevow import IRequest import reliability # requires NumPy import util def get_arg(ctx_or_req, argname, default=None, multiple=False): """Extract an argument from either the query args (req.args) or the form body fields (req.fields). If multiple=False, this returns a single value (or the default, which defaults to None), and the query args take precedence. If multiple=True, this returns a tuple of arguments (possibly empty), starting with all those in the query args. """ req = IRequest(ctx_or_req) results = [] if argname in req.args: results.extend(req.args[argname]) if req.fields and argname in req.fields: results.append(req.fields[argname].value) if multiple: return tuple(results) if results: return results[0] return default DAY=24*60*60 MONTH=31*DAY YEAR=365*DAY def is_available(): if reliability: return True return False def yandm(seconds): return "%dy.%dm" % (int(seconds/YEAR), int( (seconds%YEAR)/MONTH)) class ReliabilityTool(rend.Page): addSlash = True docFactory = loaders.xmlfile(util.sibling("reliability.xhtml")) DEFAULT_PARAMETERS = [ ("drive_lifetime", "8Y", "time", "Average drive lifetime"), ("k", 3, "int", "Minimum number of shares needed to recover the file"), ("R", 7, "int", "Repair threshold: repair will not occur until fewer than R shares " "are left"), ("N", 10, "int", "Total number of shares of the file generated"), ("delta", "1M", "time", "Amount of time between each simulation step"), ("check_period", "1M", "time", "How often to run the checker and repair if fewer than R shares"), ("report_period", "3M", "time", "Amount of time between result rows in this report"), ("report_span", "5Y", "time", "Total amount of time covered by this report"), ] def parse_time(self, s): if s.endswith("M"): return int(s[:-1]) * MONTH if s.endswith("Y"): return int(s[:-1]) * YEAR return int(s) def format_time(self, s): if s%YEAR == 0: return "%dY" % (s/YEAR) if s%MONTH == 0: return "%dM" % (s/MONTH) return "%d" % s def get_parameters(self, ctx): parameters = {} for (name,default,argtype,description) in self.DEFAULT_PARAMETERS: v = get_arg(ctx, name, default) if argtype == "time": value = self.parse_time(v) else: value = int(v) parameters[name] = value return parameters def renderHTTP(self, ctx): self.parameters = self.get_parameters(ctx) self.results = reliability.ReliabilityModel.run(**self.parameters) return rend.Page.renderHTTP(self, ctx) def make_input(self, name, old_value): return T.input(name=name, type="text", size="5", value=self.format_time(old_value)) def render_forms(self, ctx, data): f = T.form(action=".", method="get") table = [] for (name,default_value,argtype,description) in self.DEFAULT_PARAMETERS: old_value = self.parameters[name] i = self.make_input(name, old_value) table.append(T.tr[T.td[name+":"], T.td[i], T.td[description]]) go = T.input(type="submit", value="Recompute") return [T.h2["Simulation Parameters:"], f[T.table[table], go], ] def data_simulation_table(self, ctx, data): for row in self.results.samples: yield row def render_simulation_row(self, ctx, row): (when, unmaintained_shareprobs, maintained_shareprobs, P_repaired_last_check_period, cumulative_number_of_repairs, cumulative_number_of_new_shares, P_dead_unmaintained, P_dead_maintained) = row ctx.fillSlots("t", yandm(when)) ctx.fillSlots("P_repair", "%.6f" % P_repaired_last_check_period) ctx.fillSlots("P_dead_unmaintained", "%.6g" % P_dead_unmaintained) ctx.fillSlots("P_dead_maintained", "%.6g" % P_dead_maintained) return ctx.tag def render_report_span(self, ctx, row): (when, unmaintained_shareprobs, maintained_shareprobs, P_repaired_last_check_period, cumulative_number_of_repairs, cumulative_number_of_new_shares, P_dead_unmaintained, P_dead_maintained) = self.results.samples[-1] return ctx.tag[yandm(when)] def render_P_loss_unmaintained(self, ctx, row): (when, unmaintained_shareprobs, maintained_shareprobs, P_repaired_last_check_period, cumulative_number_of_repairs, cumulative_number_of_new_shares, P_dead_unmaintained, P_dead_maintained) = self.results.samples[-1] return ctx.tag["%.6g (%1.8f%%)" % (P_dead_unmaintained, 100*P_dead_unmaintained)] def render_P_loss_maintained(self, ctx, row): (when, unmaintained_shareprobs, maintained_shareprobs, P_repaired_last_check_period, cumulative_number_of_repairs, cumulative_number_of_new_shares, P_dead_unmaintained, P_dead_maintained) = self.results.samples[-1] return ctx.tag["%.6g (%1.8f%%)" % (P_dead_maintained, 100*P_dead_maintained)] def render_P_repair_rate(self, ctx, row): (when, unmaintained_shareprobs, maintained_shareprobs, P_repaired_last_check_period, cumulative_number_of_repairs, cumulative_number_of_new_shares, P_dead_unmaintained, P_dead_maintained) = self.results.samples[-1] freq = when / cumulative_number_of_repairs return ctx.tag["%.6g" % freq] def render_P_repair_shares(self, ctx, row): (when, unmaintained_shareprobs, maintained_shareprobs, P_repaired_last_check_period, cumulative_number_of_repairs, cumulative_number_of_new_shares, P_dead_unmaintained, P_dead_maintained) = self.results.samples[-1] generated_shares = cumulative_number_of_new_shares / cumulative_number_of_repairs return ctx.tag["%1.2f" % generated_shares] tahoe_lafs-1.20.0/misc/operations_helpers/spacetime/diskwatcher.py0000644000000000000000000000175513615410400022323 0ustar00 from axiom.item import Item from axiom.attributes import text, integer, timestamp class Sample(Item): # we didn't originally set typeName, so it was generated from the # fully-qualified classname ("diskwatcher.Sample"), then Axiom # automatically lowercases and un-dot-ifies it to get # "diskwatcher_sample". Now we explicitly provide a name. typeName = "diskwatcher_sample" # version 2 added the 'total' field schemaVersion = 2 url = text(indexed=True) when = timestamp(indexed=True) total = integer() used = integer() avail = integer() def upgradeSample1to2(old): return old.upgradeVersion("diskwatcher_sample", 1, 2, url=old.url, when=old.when, total=0, used=old.used, avail=old.avail) from axiom.upgrade import registerUpgrader registerUpgrader(upgradeSample1to2, "diskwatcher_sample", 1, 2) tahoe_lafs-1.20.0/misc/operations_helpers/spacetime/diskwatcher.tac0000644000000000000000000003516313615410400022442 0ustar00# -*- python -*- """ Run this tool with twistd in its own directory, with a file named 'urls.txt' describing which nodes to query. Make sure to copy diskwatcher.py into the same directory. It will request disk-usage numbers from the nodes once per hour (or slower), and store them in a local database. It will compute usage-per-unit time values over several time ranges and make them available through an HTTP query (using ./webport). It will also provide an estimate of how much time is left before the grid's storage is exhausted. There are munin plugins (named tahoe_doomsday and tahoe_diskusage) to graph the values this tool computes. Each line of urls.txt points to a single node. Each node should have its own dedicated disk: if multiple nodes share a disk, only list one of them in urls.txt (otherwise that space will be double-counted, confusing the results). Each line should be in the form: http://host:webport/statistics?t=json """ # TODO: # built-in graphs on web interface import os.path, urllib, time from datetime import timedelta from twisted.application import internet, service, strports from twisted.web import server, resource, http, client from twisted.internet import defer from twisted.python import log import json from axiom.attributes import AND from axiom.store import Store from epsilon import extime from diskwatcher import Sample #from axiom.item import Item #from axiom.attributes import text, integer, timestamp #class Sample(Item): # url = text() # when = timestamp() # used = integer() # avail = integer() #s = Store("history.axiom") #ns = Store("new-history.axiom") #for sa in s.query(Sample): # diskwatcher.Sample(store=ns, # url=sa.url, when=sa.when, used=sa.used, avail=sa.avail) #print "done" HOUR = 3600 DAY = 24*3600 WEEK = 7*DAY MONTH = 30*DAY YEAR = 365*DAY class DiskWatcher(service.MultiService, resource.Resource): POLL_INTERVAL = 1*HOUR AVERAGES = {#"60s": 60, #"5m": 5*60, #"30m": 30*60, "1hr": 1*HOUR, "1day": 1*DAY, "2wk": 2*WEEK, "4wk": 4*WEEK, } def __init__(self): assert os.path.exists("diskwatcher.tac") # run from the right directory self.growth_cache = {} service.MultiService.__init__(self) resource.Resource.__init__(self) self.store = Store("history.axiom") self.store.whenFullyUpgraded().addCallback(self._upgrade_complete) service.IService(self.store).setServiceParent(self) # let upgrader run ts = internet.TimerService(self.POLL_INTERVAL, self.poll) ts.setServiceParent(self) def _upgrade_complete(self, ignored): print("Axiom store upgrade complete") def startService(self): service.MultiService.startService(self) try: desired_webport = open("webport", "r").read().strip() except EnvironmentError: desired_webport = None webport = desired_webport or "tcp:0" root = self serv = strports.service(webport, server.Site(root)) serv.setServiceParent(self) if not desired_webport: got_port = serv._port.getHost().port open("webport", "w").write("tcp:%d\n" % got_port) def get_urls(self): for url in open("urls.txt","r").readlines(): if "#" in url: url = url[:url.find("#")] url = url.strip() if not url: continue yield url def poll(self): log.msg("polling..") #return self.poll_synchronous() return self.poll_asynchronous() def poll_asynchronous(self): # this didn't actually seem to work any better than poll_synchronous: # logs are more noisy, and I got frequent DNS failures. But with a # lot of servers to query, this is probably the better way to go. A # significant advantage of this approach is that we can use a # timeout= argument to tolerate hanging servers. dl = [] for url in self.get_urls(): when = extime.Time() d = client.getPage(url, timeout=60) d.addCallback(self.got_response, when, url) dl.append(d) d = defer.DeferredList(dl) def _done(res): fetched = len([1 for (success, value) in res if success]) log.msg("fetched %d of %d" % (fetched, len(dl))) d.addCallback(_done) return d def poll_synchronous(self): attempts = 0 fetched = 0 for url in self.get_urls(): attempts += 1 try: when = extime.Time() # if a server accepts the connection and then hangs, this # will block forever data_json = urllib.urlopen(url).read() self.got_response(data_json, when, url) fetched += 1 except: log.msg("error while fetching: %s" % url) log.err() log.msg("fetched %d of %d" % (fetched, attempts)) def got_response(self, data_json, when, url): data = json.loads(data_json) total = data[u"stats"][u"storage_server.disk_total"] used = data[u"stats"][u"storage_server.disk_used"] avail = data[u"stats"][u"storage_server.disk_avail"] print("%s : total=%s, used=%s, avail=%s" % (url, total, used, avail)) Sample(store=self.store, url=unicode(url), when=when, total=total, used=used, avail=avail) def calculate_growth_timeleft(self): timespans = [] total_avail_space = self.find_total_available_space() pairs = [ (timespan,name) for name,timespan in self.AVERAGES.items() ] pairs.sort() for (timespan,name) in pairs: growth = self.growth(timespan) print(name, total_avail_space, growth) if growth is not None: timeleft = None if growth > 0: timeleft = total_avail_space / growth timespans.append( (name, timespan, growth, timeleft) ) return timespans def find_total_space(self): # this returns the sum of disk-avail stats for all servers that 1) # are listed in urls.txt and 2) have responded recently. now = extime.Time() recent = now - timedelta(seconds=2*self.POLL_INTERVAL) total_space = 0 for url in self.get_urls(): url = unicode(url) latest = list(self.store.query(Sample, AND(Sample.url == url, Sample.when > recent), sort=Sample.when.descending, limit=1)) if latest: total_space += latest[0].total return total_space def find_total_available_space(self): # this returns the sum of disk-avail stats for all servers that 1) # are listed in urls.txt and 2) have responded recently. now = extime.Time() recent = now - timedelta(seconds=2*self.POLL_INTERVAL) total_avail_space = 0 for url in self.get_urls(): url = unicode(url) latest = list(self.store.query(Sample, AND(Sample.url == url, Sample.when > recent), sort=Sample.when.descending, limit=1)) if latest: total_avail_space += latest[0].avail return total_avail_space def find_total_used_space(self): # this returns the sum of disk-used stats for all servers that 1) are # listed in urls.txt and 2) have responded recently. now = extime.Time() recent = now - timedelta(seconds=2*self.POLL_INTERVAL) total_used_space = 0 for url in self.get_urls(): url = unicode(url) latest = list(self.store.query(Sample, AND(Sample.url == url, Sample.when > recent), sort=Sample.when.descending, limit=1)) if latest: total_used_space += latest[0].used return total_used_space def growth(self, timespan): """Calculate the bytes-per-second growth of the total disk-used stat, over a period of TIMESPAN seconds (i.e. between the most recent sample and the latest one that's at least TIMESPAN seconds ago), summed over all nodes which 1) are listed in urls.txt, 2) have responded recently, and 3) have a response at least as old as TIMESPAN. If there are no nodes which meet these criteria, we'll return None; this is likely to happen for the longer timespans (4wk) until the gatherer has been running and collecting data for that long.""" # a note about workload: for our oldest storage servers, as of # 25-Jan-2009, the first DB query here takes about 40ms per server # URL (some take as little as 10ms). There are about 110 servers, and # two queries each, so the growth() function takes about 7s to run # for each timespan. We track 4 timespans, and find_total_*_space() # takes about 2.3s to run, so calculate_growth_timeleft() takes about # 27s. Each HTTP query thus takes 27s, and we have six munin plugins # which perform HTTP queries every 5 minutes. By adding growth_cache(), # I hope to reduce this: the first HTTP query will still take 27s, # but the subsequent five should be about 2.3s each. # we're allowed to cache this value for 3 minutes if timespan in self.growth_cache: (when, value) = self.growth_cache[timespan] if time.time() - when < 3*60: return value td = timedelta(seconds=timespan) now = extime.Time() then = now - td recent = now - timedelta(seconds=2*self.POLL_INTERVAL) total_growth = 0.0 num_nodes = 0 for url in self.get_urls(): url = unicode(url) latest = list(self.store.query(Sample, AND(Sample.url == url, Sample.when > recent), sort=Sample.when.descending, limit=1)) if not latest: #print "no latest sample from", url continue # skip this node latest = latest[0] old = list(self.store.query(Sample, AND(Sample.url == url, Sample.when < then), sort=Sample.when.descending, limit=1)) if not old: #print "no old sample from", url continue # skip this node old = old[0] duration = latest.when.asPOSIXTimestamp() - old.when.asPOSIXTimestamp() if not duration: print("only one sample from", url) continue rate = float(latest.used - old.used) / duration #print url, rate total_growth += rate num_nodes += 1 if not num_nodes: return None self.growth_cache[timespan] = (time.time(), total_growth) return total_growth def getChild(self, path, req): if path == "": return self return resource.Resource.getChild(self, path, req) def abbreviate_time(self, s): def _plural(count, unit): count = int(count) if count == 1: return "%d %s" % (count, unit) return "%d %ss" % (count, unit) if s is None: return "unknown" if s < 120: return _plural(s, "second") if s < 3*HOUR: return _plural(s/60, "minute") if s < 2*DAY: return _plural(s/HOUR, "hour") if s < 2*MONTH: return _plural(s/DAY, "day") if s < 4*YEAR: return _plural(s/MONTH, "month") return _plural(s/YEAR, "year") def abbreviate_space2(self, s, SI=True): if s is None: return "unknown" if SI: U = 1000.0 isuffix = "B" else: U = 1024.0 isuffix = "iB" def r(count, suffix): return "%.2f %s%s" % (count, suffix, isuffix) if s < 1024: # 1000-1023 get emitted as bytes, even in SI mode return r(s, "") if s < U*U: return r(s/U, "k") if s < U*U*U: return r(s/(U*U), "M") if s < U*U*U*U: return r(s/(U*U*U), "G") if s < U*U*U*U*U: return r(s/(U*U*U*U), "T") return r(s/(U*U*U*U*U), "P") def abbreviate_space(self, s): return "(%s, %s)" % (self.abbreviate_space2(s, True), self.abbreviate_space2(s, False)) def render(self, req): t = req.args.get("t", ["html"])[0] ctype = "text/plain" data = "" if t == "html": data = "" for (name, timespan, growth, timeleft) in self.calculate_growth_timeleft(): data += "%f bytes per second (%sps), %s remaining (over %s)\n" % \ (growth, self.abbreviate_space2(growth, True), self.abbreviate_time(timeleft), name) used = self.find_total_used_space() data += "total used: %d bytes %s\n" % (used, self.abbreviate_space(used)) total = self.find_total_space() data += "total space: %d bytes %s\n" % (total, self.abbreviate_space(total)) elif t == "json": current = {"rates": self.calculate_growth_timeleft(), "total": self.find_total_space(), "used": self.find_total_used_space(), "available": self.find_total_available_space(), } data = json.dumps(current, indent=True) else: req.setResponseCode(http.BAD_REQUEST) data = "Unknown t= %s\n" % t req.setHeader("content-type", ctype) return data application = service.Application("disk-watcher") DiskWatcher().setServiceParent(application) tahoe_lafs-1.20.0/misc/python3/audit-dict-for-loops.py0000644000000000000000000000243713615410400017503 0ustar00""" The following code is valid in Python 2: for x in my_dict.keys(): if something(x): del my_dict[x] But broken in Python 3. One solution is: for x in list(my_dict.keys()): if something(x): del my_dict[x] Some but not all code in Tahoe has been changed to that. In other cases, the code was left unchanged since there was no `del`. However, some mistakes may have slept through. To help catch cases that were incorrectly ported, this script runs futurize on all ported modules, which should convert it into the `list()` form. You can then look at git diffs to see if any of the impacted would be buggy without the newly added `list()`. """ import os from subprocess import check_call from allmydata.util import _python3 def fix_potential_issue(): for module in _python3.PORTED_MODULES + _python3.PORTED_TEST_MODULES: filename = "src/" + module.replace(".", "/") + ".py" if not os.path.exists(filename): # Package, probably filename = "src/" + module.replace(".", "/") + "/__init__.py" check_call(["futurize", "-f", "lib2to3.fixes.fix_dict", "-w", filename]) print( "All loops converted. Check diff to see if there are any that need to be commitedd." ) if __name__ == "__main__": fix_potential_issue() tahoe_lafs-1.20.0/misc/python3/depgraph.sh0000755000000000000000000000140713615410400015311 0ustar00#!/usr/bin/env bash set -x set -eo pipefail TAHOE="${PWD}" git clone -b gh-pages git@github.com:tahoe-lafs/tahoe-depgraph.git cd tahoe-depgraph # Generate the maybe-changed data. python "${TAHOE}"/misc/python3/tahoe-depgraph.py "${TAHOE}" if git diff-index --quiet HEAD; then echo "Declining to commit without any changes." exit 0 fi git config user.name 'Build Automation' git config user.email 'tahoe-dev@lists.tahoe-lafs.org' git add tahoe-deps.json tahoe-ported.json git commit -m "\ Built from ${CIRCLE_REPOSITORY_URL}@${CIRCLE_SHA1} tahoe-depgraph was $(git rev-parse HEAD) " if [ "${CIRCLE_BRANCH}" != "master" ]; then echo "Declining to update dependency graph for non-master build." exit 0 fi # Publish it on GitHub. git push -q origin gh-pages tahoe_lafs-1.20.0/misc/python3/tahoe-depgraph.py0000644000000000000000000001102413615410400016416 0ustar00# Copyright 2004, 2009 Toby Dickenson # Copyright 2014-2015 Aaron Gallagher # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject # to the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. # IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY # CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, # TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. import collections import functools import json import os import modulefinder import sys import tempfile from twisted.python import reflect class mymf(modulefinder.ModuleFinder): def __init__(self, *args, **kwargs): self._depgraph = collections.defaultdict(set) self._types = {} self._last_caller = None modulefinder.ModuleFinder.__init__(self, *args, **kwargs) def import_hook(self, name, caller=None, fromlist=None, level=None): old_last_caller = self._last_caller try: self._last_caller = caller return modulefinder.ModuleFinder.import_hook( self, name, caller, fromlist) finally: self._last_caller = old_last_caller def import_module(self, partnam, fqname, parent): if partnam.endswith('_py3'): return None r = modulefinder.ModuleFinder.import_module( self, partnam, fqname, parent) last_caller = self._last_caller if r is not None and 'allmydata' in r.__name__: if last_caller is None or last_caller.__name__ == '__main__': self._depgraph[fqname] else: self._depgraph[last_caller.__name__].add(fqname) return r def load_module(self, fqname, fp, pathname, additional_info): (suffix, mode, type) = additional_info r = modulefinder.ModuleFinder.load_module( self, fqname, fp, pathname, (suffix, mode, type)) if r is not None: self._types[r.__name__] = type return r def as_json(self): return { 'depgraph': { name: dict.fromkeys(deps, 1) for name, deps in self._depgraph.items()}, 'types': self._types, } json_dump = functools.partial( json.dump, indent=4, separators=(',', ': '), sort_keys=True) def main(target): mf = mymf(sys.path[:], 0, []) moduleNames = [] for path, dirnames, filenames in os.walk(os.path.join(target, 'src', 'allmydata')): if 'test' in dirnames: dirnames.remove('test') for filename in filenames: if not filename.endswith('.py'): continue if filename in ('setup.py',): continue if '-' in filename: # a script like update-documentation.py continue if filename != '__init__.py': filepath = os.path.join(path, filename) else: filepath = path moduleNames.append(reflect.filenameToModuleName(filepath)) with tempfile.NamedTemporaryFile("w") as tmpfile: for moduleName in moduleNames: tmpfile.write('import %s\n' % moduleName) tmpfile.flush() mf.run_script(tmpfile.name) with open('tahoe-deps.json', 'w') as outfile: json_dump(mf.as_json(), outfile) outfile.write('\n') ported_modules_path = os.path.join(target, "src", "allmydata", "util", "_python3.py") with open(ported_modules_path) as f: ported_modules = {} exec(f.read(), ported_modules, ported_modules) port_status = dict.fromkeys( ported_modules["PORTED_MODULES"] + ported_modules["PORTED_TEST_MODULES"], "ported" ) with open('tahoe-ported.json', 'w') as outfile: json_dump(port_status, outfile) outfile.write('\n') if __name__ == '__main__': main(*sys.argv[1:]) tahoe_lafs-1.20.0/misc/simulators/bench_spans.py0000644000000000000000000000561213615410400016615 0ustar00 """ To use this, get a trace file such as this one: wget http://tahoe-lafs.org/trac/tahoe-lafs/raw-attachment/ticket/1170/run-112-above28-flog-dump-sh8-on-nsziz.txt And run this command passing that trace file's name: python bench_spans.py run-112-above28-flog-dump-sh8-on-nsziz.txt """ from pyutil import benchutil from allmydata.util.spans import DataSpans import re, sys DUMP_S='_received spans trace .dump()' GET_R=re.compile('_received spans trace .get\(([0-9]*), ([0-9]*)\)') POP_R=re.compile('_received spans trace .pop\(([0-9]*), ([0-9]*)\)') REMOVE_R=re.compile('_received spans trace .remove\(([0-9]*), ([0-9]*)\)') GET_SPANS_S='_received spans trace .get_spans()' ADD_R=re.compile('_received spans trace .add\(([0-9]*), len=([0-9]*)\)') INIT_S='_received spans trace = DataSpans' class B(object): def __init__(self, inf): self.inf = inf def init(self, N): self.s = DataSpans() # self.stats = {} def run(self, N): count = 0 inline = self.inf.readline() while count < N and inline != '': if DUMP_S in inline: self.s.dump() # self.stats['dump'] = self.stats.get('dump', 0) + 1 elif GET_SPANS_S in inline: self.s.get_spans() # self.stats['get_spans'] = self.stats.get('get_spans', 0) + 1 elif ADD_R.search(inline): mo = ADD_R.search(inline) start = int(mo.group(1)) length = int(mo.group(2)) self.s.add(start, 'x'*length) # self.stats['add'] = self.stats.get('add', 0) + 1 elif GET_R.search(inline): mo = GET_R.search(inline) start = int(mo.group(1)) length = int(mo.group(2)) self.s.get(start, length) # self.stats['get'] = self.stats.get('get', 0) + 1 elif REMOVE_R.search(inline): mo = REMOVE_R.search(inline) start = int(mo.group(1)) length = int(mo.group(2)) self.s.remove(start, length) # self.stats['remove'] = self.stats.get('remove', 0) + 1 elif POP_R.search(inline): mo = POP_R.search(inline) start = int(mo.group(1)) length = int(mo.group(2)) self.s.pop(start, length) # self.stats['pop'] = self.stats.get('pop', 0) + 1 elif INIT_S in inline: pass else: print("Warning, didn't recognize this line: %r" % (inline,)) count += 1 inline = self.inf.readline() # print(self.stats) benchutil.print_bench_footer(UNITS_PER_SECOND=1000000) print("(microseconds)") for N in [600, 6000, 60000]: b = B(open(sys.argv[1], 'rU')) print("%7d" % N, end=' ') benchutil.rep_bench(b.run, N, b.init, UNITS_PER_SECOND=1000000) tahoe_lafs-1.20.0/misc/simulators/count_dirs.py0000644000000000000000000001057513615410400016507 0ustar00#!/usr/bin/env python """ This tool estimates how much space would be consumed by a filetree into which a native directory was copied. One open question is how we should encode directories. One approach is to put a block of data on a server, one per directory, which effectively contains a dictionary that maps child names to targets (URIs for children which are files, slotnames for children which are directories). To prevent the server which hosts this data from either learning its contents or corrupting them, we can add encryption and integrity checks to the data, at the cost of storage overhead. This program is intended to estimate the size of these data blocks using real-world filenames and directories. You point it at a real directory, and it does a recursive walk of the filesystem, adding up the size of the filetree data structures that would be required to represent it. MODES: A: no confidentiality or integrity checking. Directories are serialized plaintext dictionaries which map file/subdir names to targets (either URIs or slotnames). Each entry can be changed independently. B1: child names and targets are encrypted. No integrity checks, so the server can still corrupt the contents undetectably. Each entry can still be changed independently. B2: same security properties as B1, but the dictionary is serialized before encryption. This reduces overhead at the cost of preventing independent updates of entries (all entries must be updated at the same time, so test-and-set operations are required to avoid data-losing races) C1: like B1, but adding HMACs to each entry to guarantee data integrity C2: like B2, but adding a single block-wide HMAC for data integrity """ import sys, os.path #URI:7jzbza6iwdsk5xbxsvdgjaugyrhetw64zpflp4gihmyh5krjblra====:a5qdejwbimu5b2wfke7xwexxlq======:gzeub5v42rjbgd7ccawnahu2evqd42lpdpzd447c6zkmdvjkpowq====:25:100:219889 # that's a printable representation of two 32-byte hashes (storage index, URI # extension block hash) and a 16-byte AES read-capability key, and some # share-count and size information URI_SIZE = 164 #pb://xextf3eap44o3wi27mf7ehiur6wvhzr6@207.7.153.180:56677,127.0.0.1:56677/zilcw5uz2yyyo=== # that's a FURL which points at the slot. Modes that need to add a # read-capability AES key will need more space. SLOTNAME_SIZE = 90 def slotsize(mode, numfiles, numdirs): # URI_sizes is the total space taken up by the target (dict keys) strings # for all of the targets that are files, instead of directories target_sizes_for_files = numfiles * URI_SIZE slotname_size = SLOTNAME_SIZE if mode in ("B1", "B2", "C1", "C2"): slotname_size += 16 # slotname_sizes is the total space taken up by the target strings for # all the targets that are directories, instead of files. These are # bigger when the read+write-cap slotname is larger than the store-cap, # which happens as soon as we seek to prevent the slot's host from # reading or corrupting it. target_sizes_for_subdirs = numdirs * slotname_size # now how much overhead is there for each entry? per_slot, per_entry = 0, 0 if mode == "B1": per_entry = 16+12+12 elif mode == "C1": per_entry = 16+12+12 + 32+32 elif mode == "B2": per_slot = 12 elif mode == "C2": per_slot = 12+32 num_entries = numfiles + numdirs total = (target_sizes_for_files + target_sizes_for_subdirs + per_slot + per_entry * num_entries ) return total MODES = ("A", "B1", "B2", "C1", "C2") def scan(root): total = dict([(mode,0) for mode in MODES]) num_files = 0 num_dirs = 0 for absroot, dirs, files in os.walk(root): #print(absroot) #print(" %d files" % len(files)) #print(" %d subdirs" % len(dirs)) num_files += len(files) num_dirs += len(dirs) stringsize = len(''.join(files) + ''.join(dirs)) for mode in MODES: total[mode] += slotsize(mode, len(files), len(dirs)) + stringsize print("%d directories" % num_dirs) print("%d files" % num_files) for mode in sorted(total.keys()): print("%s: %d bytes" % (mode, total[mode])) if __name__ == '__main__': scan(sys.argv[1]) """ 260:warner@monolith% ./count_dirs.py ~ 70925 directories 457199 files A: 90042361 bytes B1: 112302121 bytes B2: 92027061 bytes C1: 146102057 bytes C2: 94293461 bytes """ tahoe_lafs-1.20.0/misc/simulators/hashbasedsig.py0000644000000000000000000003351513615410400016762 0ustar00#!python # range of hash output lengths range_L_hash = [128] lg_M = 53 # lg(required number of signatures before losing security) limit_bytes = 480000 # limit on signature length limit_cost = 500 # limit on Mcycles_Sig + weight_ver*Mcycles_ver weight_ver = 1 # how important verification cost is relative to signature cost # (note: setting this too high will just exclude useful candidates) L_block = 512 # bitlength of hash input blocks L_pad = 64 # bitlength of hash padding overhead (for M-D hashes) L_label = 80 # bitlength of hash position label L_prf = 256 # bitlength of hash output when used as a PRF cycles_per_byte = 15.8 # cost of hash Mcycles_per_block = cycles_per_byte * L_block / (8 * 1000000.0) from math import floor, ceil, log, log1p, pow, e from sys import stderr from gc import collect def lg(x): return log(x, 2) def ln(x): return log(x, e) def ceil_log(x, B): return int(ceil(log(x, B))) def ceil_div(x, y): return int(ceil(float(x) / float(y))) def floor_div(x, y): return int(floor(float(x) / float(y))) # number of compression function evaluations to hash k bits # we assume that there is a label in each block def compressions(k): return ceil_div(k + L_pad, L_block - L_label) # sum of power series sum([pow(p, i) for i in range(n)]) def sum_powers(p, n): if p == 1: return n return (pow(p, n) - 1)/(p - 1) def make_candidate(B, K, K1, K2, q, T, T_min, L_hash, lg_N, sig_bytes, c_sign, c_ver, c_ver_pm): Mcycles_sign = c_sign * Mcycles_per_block Mcycles_ver = c_ver * Mcycles_per_block Mcycles_ver_pm = c_ver_pm * Mcycles_per_block cost = Mcycles_sign + weight_ver*Mcycles_ver if sig_bytes >= limit_bytes or cost > limit_cost: return [] return [{ 'B': B, 'K': K, 'K1': K1, 'K2': K2, 'q': q, 'T': T, 'T_min': T_min, 'L_hash': L_hash, 'lg_N': lg_N, 'sig_bytes': sig_bytes, 'c_sign': c_sign, 'Mcycles_sign': Mcycles_sign, 'c_ver': c_ver, 'c_ver_pm': c_ver_pm, 'Mcycles_ver': Mcycles_ver, 'Mcycles_ver_pm': Mcycles_ver_pm, 'cost': cost, }] # K1 = size of root Merkle tree # K = size of middle Merkle trees # K2 = size of leaf Merkle trees # q = number of revealed private keys per signed message # Winternitz with B < 4 is never optimal. For example, going from B=4 to B=2 halves the # chain depth, but that is cancelled out by doubling (roughly) the number of digits. range_B = range(4, 33) M = pow(2, lg_M) def calculate(K, K1, K2, q_max, L_hash, trees): candidates = [] lg_K = lg(K) lg_K1 = lg(K1) lg_K2 = lg(K2) # We want the optimal combination of q and T. That takes too much time and memory # to search for directly, so we start by calculating the lowest possible value of T # for any q. Then for potential values of T, we calculate the smallest q such that we # will have at least L_hash bits of security against forgery using revealed private keys # (i.e. this method of forgery is no easier than finding a hash preimage), provided # that fewer than 2^lg_S_min messages are signed. # min height of certification tree (excluding root and bottom layer) T_min = ceil_div(lg_M - lg_K1, lg_K) last_q = None for T in range(T_min, T_min+21): # lg(total number of leaf private keys) lg_S = lg_K1 + lg_K*T lg_N = lg_S + lg_K2 # Suppose that m signatures have been made. The number of times X that a given bucket has # been chosen follows a binomial distribution B(m, p) where p = 1/S and S is the number of # buckets. I.e. Pr(X = x) = C(m, x) * p^x * (1-p)^(m-x). # # If an attacker picks a random seed and message that falls into a bucket that has been # chosen x times, then at most q*x private values in that bucket have been revealed, so # (ignoring the possibility of guessing private keys, which is negligable) the attacker's # success probability for a forgery using the revealed values is at most min(1, q*x / K2)^q. # # Let j = floor(K2/q). Conditioning on x, we have # # Pr(forgery) = sum_{x = 0..j}(Pr(X = x) * (q*x / K2)^q) + Pr(x > j) # = sum_{x = 1..j}(Pr(X = x) * (q*x / K2)^q) + Pr(x > j) # # We lose nothing by approximating (q*x / K2)^q as 1 for x > 4, i.e. ignoring the resistance # of the HORS scheme to forgery when a bucket has been chosen 5 or more times. # # Pr(forgery) < sum_{x = 1..4}(Pr(X = x) * (q*x / K2)^q) + Pr(x > 4) # # where Pr(x > 4) = 1 - sum_{x = 0..4}(Pr(X = x)) # # We use log arithmetic here because values very close to 1 cannot be represented accurately # in floating point, but their logarithms can (provided we use appropriate functions such as # log1p). lg_p = -lg_S lg_1_p = log1p(-pow(2, lg_p))/ln(2) # lg(1-p), computed accurately j = 5 lg_px = [lg_1_p * M]*j # We approximate lg(M-x) as lg(M) lg_px_step = lg_M + lg_p - lg_1_p for x in range(1, j): lg_px[x] = lg_px[x-1] - lg(x) + lg_px_step q = None # Find the minimum acceptable value of q. for q_cand in range(1, q_max+1): lg_q = lg(q_cand) lg_pforge = [lg_px[x] + (lg_q*x - lg_K2)*q_cand for x in range(1, j)] if max(lg_pforge) < -L_hash + lg(j) and lg_px[j-1] + 1.0 < -L_hash: #print("K = %d, K1 = %d, K2 = %d, L_hash = %d, lg_K2 = %.3f, q = %d, lg_pforge_1 = %.3f, lg_pforge_2 = %.3f, lg_pforge_3 = %.3f" # % (K, K1, K2, L_hash, lg_K2, q, lg_pforge_1, lg_pforge_2, lg_pforge_3)) q = q_cand break if q is None or q == last_q: # if q hasn't decreased, this will be strictly worse than the previous candidate continue last_q = q # number of compressions to compute the Merkle hashes (h_M, c_M, _) = trees[K] (h_M1, c_M1, _) = trees[K1] (h_M2, c_M2, (dau, tri)) = trees[K2] # B = generalized Winternitz base for B in range_B: # n is the number of digits needed to sign the message representative and checksum. # The representation is base-B, except that we allow the most significant digit # to be up to 2B-1. n_L = ceil_div(L_hash-1, lg(B)) firstL_max = floor_div(pow(2, L_hash)-1, pow(B, n_L-1)) C_max = firstL_max + (n_L-1)*(B-1) n_C = ceil_log(ceil_div(C_max, 2), B) n = n_L + n_C firstC_max = floor_div(C_max, pow(B, n_C-1)) # Total depth of Winternitz hash chains. The chains for the most significant # digit of the message representative and of the checksum may be a different # length to those for the other digits. c_D = (n-2)*(B-1) + firstL_max + firstC_max # number of compressions to hash a Winternitz public key c_W = compressions(n*L_hash) # bitlength of a single Winternitz signature and authentication path L_MW = (n + h_M ) * L_hash L_MW1 = (n + h_M1) * L_hash # bitlength of the HORS signature and authentication paths # For all but one of the q authentication paths, one of the sibling elements in # another path is made redundant where they intersect. This cancels out the hash # that would otherwise be needed at the bottom of the path, making the total # length of the signature q*h_M2 + 1 hashes, rather than q*(h_M2 + 1). L_leaf = (q*h_M2 + 1) * L_hash # length of the overall GMSS+HORS signature and seeds sig_bytes = ceil_div(L_MW1 + T*L_MW + L_leaf + L_prf + ceil(lg_N), 8) c_MW = K *(c_D + c_W) + c_M + ceil_div(K *n*L_hash, L_prf) c_MW1 = K1*(c_D + c_W) + c_M1 + ceil_div(K1*n*L_hash, L_prf) # For simplicity, c_sign and c_ver don't take into account compressions saved # as a result of intersecting authentication paths in the HORS signature, so # are slight overestimates. c_sign = c_MW1 + T*c_MW + q*(c_M2 + 1) + ceil_div(K2*L_hash, L_prf) # *expected* number of compressions to verify a signature c_ver = c_D/2.0 + c_W + c_M1 + T*(c_D/2.0 + c_W + c_M) + q*(c_M2 + 1) c_ver_pm = (1 + T)*c_D/2.0 candidates += make_candidate(B, K, K1, K2, q, T, T_min, L_hash, lg_N, sig_bytes, c_sign, c_ver, c_ver_pm) return candidates def search(): for L_hash in range_L_hash: print("collecting... \r", end=' ', file=stderr) collect() print("precomputing... \r", end=' ', file=stderr) """ # d/dq (lg(q+1) + L_hash/q) = 1/(ln(2)*(q+1)) - L_hash/q^2 # Therefore lg(q+1) + L_hash/q is at a minimum when 1/(ln(2)*(q+1)) = L_hash/q^2. # Let alpha = L_hash*ln(2), then from the quadratic formula, the integer q that # minimizes lg(q+1) + L_hash/q is the floor or ceiling of (alpha + sqrt(alpha^2 - 4*alpha))/2. # (We don't want the other solution near 0.) alpha = floor(L_hash*ln(2)) # float q = floor((alpha + sqrt(alpha*(alpha-4)))/2) if lg(q+2) + L_hash/(q+1) < lg(q+1) + L_hash/q: q += 1 lg_S_margin = lg(q+1) + L_hash/q q_max = int(q) q = floor(L_hash*ln(2)) # float if lg(q+1) + L_hash/(q+1) < lg(q) + L_hash/q: q += 1 lg_S_margin = lg(q) + L_hash/q q_max = int(q) """ q_max = 4000 # find optimal Merkle tree shapes for this L_hash and each K trees = {} K_max = 50 c2 = compressions(2*L_hash) c3 = compressions(3*L_hash) for dau in range(0, 10): a = pow(2, dau) for tri in range(0, ceil_log(30-dau, 3)): x = int(a*pow(3, tri)) h = dau + 2*tri c_x = int(sum_powers(2, dau)*c2 + a*sum_powers(3, tri)*c3) for y in range(1, x+1): if tri > 0: # If the bottom level has arity 3, then for every 2 nodes by which the tree is # imperfect, we can save c3 compressions by pruning 3 leaves back to their parent. # If the tree is imperfect by an odd number of nodes, we can prune one extra leaf, # possibly saving a compression if c2 < c3. c_y = c_x - floor_div(x-y, 2)*c3 - ((x-y) % 2)*(c3-c2) else: # If the bottom level has arity 2, then for each node by which the tree is # imperfect, we can save c2 compressions by pruning 2 leaves back to their parent. c_y = c_x - (x-y)*c2 if y not in trees or (h, c_y, (dau, tri)) < trees[y]: trees[y] = (h, c_y, (dau, tri)) #for x in range(1, K_max+1): # print(x, trees[x]) candidates = [] progress = 0 fuzz = 0 complete = (K_max-1)*(2200-200)/100 for K in range(2, K_max+1): for K2 in range(200, 2200, 100): for K1 in range(max(2, K-fuzz), min(K_max, K+fuzz)+1): candidates += calculate(K, K1, K2, q_max, L_hash, trees) progress += 1 print("searching: %3d %% \r" % (100.0 * progress / complete,), end=' ', file=stderr) print("filtering... \r", end=' ', file=stderr) step = 2.0 bins = {} limit = floor_div(limit_cost, step) for bin in range(0, limit+2): bins[bin] = [] for c in candidates: bin = floor_div(c['cost'], step) bins[bin] += [c] del candidates # For each in a range of signing times, find the best candidate. best = [] for bin in range(0, limit): candidates = bins[bin] + bins[bin+1] + bins[bin+2] if len(candidates) > 0: best += [min(candidates, key=lambda c: c['sig_bytes'])] def format_candidate(candidate): return ("%(B)3d %(K)3d %(K1)3d %(K2)5d %(q)4d %(T)4d " "%(L_hash)4d %(lg_N)5.1f %(sig_bytes)7d " "%(c_sign)7d (%(Mcycles_sign)7.2f) " "%(c_ver)7d +/-%(c_ver_pm)5d (%(Mcycles_ver)5.2f +/-%(Mcycles_ver_pm)5.2f) " ) % candidate print(" \r", end=' ', file=stderr) if len(best) > 0: print(" B K K1 K2 q T L_hash lg_N sig_bytes c_sign (Mcycles) c_ver ( Mcycles )") print("---- ---- ---- ------ ---- ---- ------ ------ --------- ------------------ --------------------------------") best.sort(key=lambda c: (c['sig_bytes'], c['cost'])) last_sign = None last_ver = None for c in best: if last_sign is None or c['c_sign'] < last_sign or c['c_ver'] < last_ver: print(format_candidate(c)) last_sign = c['c_sign'] last_ver = c['c_ver'] print() else: print("No candidates found for L_hash = %d or higher." % (L_hash)) return del bins del best print("Maximum signature size: %d bytes" % (limit_bytes,)) print("Maximum (signing + %d*verification) cost: %.1f Mcycles" % (weight_ver, limit_cost)) print("Hash parameters: %d-bit blocks with %d-bit padding and %d-bit labels, %.2f cycles per byte" \ % (L_block, L_pad, L_label, cycles_per_byte)) print("PRF output size: %d bits" % (L_prf,)) print("Security level given by L_hash is maintained for up to 2^%d signatures.\n" % (lg_M,)) search() tahoe_lafs-1.20.0/misc/simulators/ringsim.py0000644000000000000000000002002513615410400015775 0ustar00#! /usr/bin/python # used to discuss ticket #302: "stop permuting peerlist?" # import time import math from hashlib import md5 # sha1, sha256 myhash = md5 # md5: 1520 "uploads" per second # sha1: 1350 ups # sha256: 930 ups from itertools import count from twisted.python import usage def abbreviate_space(s, SI=True): if s is None: return "unknown" if SI: U = 1000.0 isuffix = "B" else: U = 1024.0 isuffix = "iB" def r(count, suffix): return "%.2f %s%s" % (count, suffix, isuffix) if s < 1024: # 1000-1023 get emitted as bytes, even in SI mode return "%d B" % s if s < U*U: return r(s/U, "k") if s < U*U*U: return r(s/(U*U), "M") if s < U*U*U*U: return r(s/(U*U*U), "G") if s < U*U*U*U*U: return r(s/(U*U*U*U), "T") return r(s/(U*U*U*U*U), "P") def make_up_a_file_size(seed): h = int(myhash(seed).hexdigest(),16) # exponential distribution e = 8 + (h % (31-8)) return 2 ** e # uniform distribution #max=2**31 #return h % max # avg 1GB sizes = [make_up_a_file_size(str(i)) for i in range(10000)] avg_filesize = sum(sizes)/len(sizes) print("average file size:", abbreviate_space(avg_filesize)) SERVER_CAPACITY = 10**12 class Server(object): def __init__(self, nodeid, capacity): self.nodeid = nodeid self.used = 0 self.capacity = capacity self.numshares = 0 self.full_at_tick = None def upload(self, sharesize): if self.used + sharesize < self.capacity: self.used += sharesize self.numshares += 1 return True return False def __repr__(self): if self.full_at_tick is not None: return "<%s %s full at %d>" % (self.__class__.__name__, self.nodeid, self.full_at_tick) else: return "<%s %s>" % (self.__class__.__name__, self.nodeid) class Ring(object): SHOW_MINMAX = False def __init__(self, numservers, seed, permute): self.servers = [] for i in range(numservers): nodeid = myhash(str(seed)+str(i)).hexdigest() capacity = SERVER_CAPACITY s = Server(nodeid, capacity) self.servers.append(s) self.servers.sort(key=lambda s: s.nodeid) self.permute = permute #self.list_servers() def list_servers(self): for i in range(len(self.servers)): s = self.servers[i] next_s = self.servers[(i+1)%len(self.servers)] diff = "%032x" % (int(next_s.nodeid,16) - int(s.nodeid,16)) s.next_diff = diff prev_s = self.servers[(i-1)%len(self.servers)] diff = "%032x" % (int(s.nodeid,16) - int(prev_s.nodeid,16)) s.prev_diff = diff print(s, s.prev_diff) print("sorted by delta") for s in sorted(self.servers, key=lambda s:s.prev_diff): print(s, s.prev_diff) def servers_for_si(self, si): if self.permute: def sortkey(s): return myhash(s.nodeid+si).digest() return sorted(self.servers, key=sortkey) for i in range(len(self.servers)): if self.servers[i].nodeid >= si: return self.servers[i:] + self.servers[:i] return list(self.servers) def show_servers(self, picked): bits = [] for s in self.servers: if s in picked: bits.append("1") else: bits.append("0") #d = [s in picked and "1" or "0" for s in self.servers] return "".join(bits) def dump_usage(self, numfiles, avg_space_per_file): print("uploaded", numfiles) # avg_space_per_file measures expected grid-wide ciphertext per file used = list(reversed(sorted([s.used for s in self.servers]))) # used is actual per-server ciphertext usedpf = [1.0*u/numfiles for u in used] # usedpf is actual per-server-per-file ciphertext #print("min/max usage: %s/%s" % (abbreviate_space(used[-1]), # abbreviate_space(used[0]))) avg_usage_per_file = avg_space_per_file/len(self.servers) # avg_usage_per_file is expected per-server-per-file ciphertext spreadpf = usedpf[0] - usedpf[-1] average_usagepf = sum(usedpf) / len(usedpf) variance = sum([(u-average_usagepf)**2 for u in usedpf])/(len(usedpf)-1) std_deviation = math.sqrt(variance) sd_of_total = std_deviation / avg_usage_per_file print("min/max/(exp) usage-pf-ps %s/%s/(%s):" % ( abbreviate_space(usedpf[-1]), abbreviate_space(usedpf[0]), abbreviate_space(avg_usage_per_file) ), end=' ') print("spread-pf: %s (%.2f%%)" % ( abbreviate_space(spreadpf), 100.0*spreadpf/avg_usage_per_file), end=' ') #print("average_usage:", abbreviate_space(average_usagepf)) print("stddev: %s (%.2f%%)" % (abbreviate_space(std_deviation), 100.0*sd_of_total)) if self.SHOW_MINMAX: s2 = sorted(self.servers, key=lambda s: s.used) print("least:", s2[0].nodeid) print("most:", s2[-1].nodeid) class Options(usage.Options): optParameters = [ ("k", "k", 3, "required shares", int), ("N", "N", 10, "total shares", int), ("servers", None, 100, "number of servers", int), ("seed", None, None, "seed to use for creating ring"), ("fileseed", None, "blah", "seed to use for creating files"), ("permute", "p", 1, "1 to permute, 0 to use flat ring", int), ] def postOptions(self): assert self["seed"] def do_run(ring, opts): avg_space_per_file = avg_filesize * opts["N"] / opts["k"] fileseed = opts["fileseed"] all_servers_have_room = True no_files_have_wrapped = True for filenum in count(0): #used = list(reversed(sorted([s.used for s in ring.servers]))) #used = [s.used for s in ring.servers] #print(used) si = myhash(fileseed+str(filenum)).hexdigest() filesize = make_up_a_file_size(si) sharesize = filesize / opts["k"] if filenum%4000==0 and filenum > 1: ring.dump_usage(filenum, avg_space_per_file) servers = ring.servers_for_si(si) #print(ring.show_servers(servers[:opts["N"]])) remaining_shares = opts["N"] index = 0 server_was_full = False file_was_wrapped = False remaining_servers = set(servers) while remaining_shares: if index >= len(servers): index = 0 file_was_wrapped = True s = servers[index] accepted = s.upload(sharesize) if not accepted: server_was_full = True remaining_servers.discard(s) if not remaining_servers: print("-- GRID IS FULL") ring.dump_usage(filenum, avg_space_per_file) return filenum index += 1 continue remaining_shares -= 1 index += 1 # file is done being uploaded if server_was_full and all_servers_have_room: all_servers_have_room = False print("-- FIRST SERVER FULL") ring.dump_usage(filenum, avg_space_per_file) if file_was_wrapped and no_files_have_wrapped: no_files_have_wrapped = False print("-- FIRST FILE WRAPPED") ring.dump_usage(filenum, avg_space_per_file) def do_ring(opts): total_capacity = opts["servers"]*SERVER_CAPACITY avg_space_per_file = avg_filesize * opts["N"] / opts["k"] avg_files = total_capacity / avg_space_per_file print("expected number of uploads:", avg_files) if opts["permute"]: print(" PERMUTED") else: print(" LINEAR") seed = opts["seed"] ring = Ring(opts["servers"], seed, opts["permute"]) do_run(ring, opts) def run(opts): do_ring(opts) if __name__ == "__main__": opts = Options() opts.parseOptions() run(opts) tahoe_lafs-1.20.0/misc/simulators/simulate_load.py0000644000000000000000000001154013615410400017151 0ustar00#!/usr/bin/env python # WARNING. There is a bug in this script so that it does not simulate the actual Tahoe Two server selection algorithm that it was intended to simulate. See http://allmydata.org/trac/tahoe-lafs/ticket/302 (stop permuting peerlist, use SI as offset into ring instead?) from past.builtins import cmp import random SERVER_CAPACITY = 10**12 class Server(object): def __init__(self): self.si = random.randrange(0, 2**31) self.used = 0 self.max = SERVER_CAPACITY self.full_at_tick = None def __repr__(self): if self.full_at_tick is not None: return "<%s %s full at %d>" % (self.__class__.__name__, self.si, self.full_at_tick) else: return "<%s %s>" % (self.__class__.__name__, self.si) SERVERS = 4 K = 3 N = 10 def make_up_a_file_size(): return (2 ** random.randrange(8, 31)) def go(permutedpeerlist): servers = [ Server() for x in range(SERVERS) ] servers.sort(cmp=lambda x,y: cmp(x.si, y.si)) doubled_up_shares = 0 tick = 0 fullservers = 0 while True: nextsharesize = make_up_a_file_size() / K if permutedpeerlist: random.shuffle(servers) else: # rotate a random number rot = random.randrange(0, len(servers)) servers = servers[rot:] + servers[:rot] i = 0 wrapped = False sharestoput = N while sharestoput: server = servers[i] if server.used + nextsharesize < server.max: server.used += nextsharesize sharestoput -= 1 if wrapped: doubled_up_shares += 1 else: if server.full_at_tick is None: server.full_at_tick = tick fullservers += 1 if fullservers == len(servers): # print("Couldn't place share -- all servers full. Stopping.") return (servers, doubled_up_shares) i += 1 if i == len(servers): wrapped = True i = 0 tick += 1 def div_ceil(n, d): """ The smallest integer k such that k*d >= n. """ return (n/d) + (n%d != 0) DESIRED_COLUMNS = 70 START_FILES = 137000 STOP_FILES = 144000 def test(permutedpeerlist, iters): # The i'th element of the filledat list is how many servers got full when the i'th file was uploaded. filledat = [] for test in range(iters): (servers, doubled_up_shares) = go(permutedpeerlist) print("doubled_up_shares: ", doubled_up_shares) for server in servers: fidx = server.full_at_tick filledat.extend([0]*(fidx-len(filledat)+1)) filledat[fidx] += 1 startfiles = 0 while filledat[startfiles] == 0: startfiles += 1 filespercolumn = div_ceil(len(filledat) - startfiles, (DESIRED_COLUMNS - 3)) # to make comparisons between runs line up: # startfiles = START_FILES # filespercolumn = div_ceil(STOP_FILES - startfiles, (DESIRED_COLUMNS - 3)) # The i'th element of the compressedfilledat list is how many servers got full when the filespercolumn files starting at startfiles + i were uploaded. compressedfilledat = [] idx = startfiles while idx < len(filledat): compressedfilledat.append(0) for i in range(filespercolumn): compressedfilledat[-1] += filledat[idx] idx += 1 if idx >= len(filledat): break # The i'th element of the fullat list is how many servers were full by the tick numbered startfiles + i * filespercolumn (on average). fullat = [0] * len(compressedfilledat) for idx, num in enumerate(compressedfilledat): for fidx in range(idx, len(fullat)): fullat[fidx] += num for idx in range(len(fullat)): fullat[idx] = fullat[idx] / float(iters) # Now print it out as an ascii art graph. import sys for serversfull in range(40, 0, -1): sys.stdout.write("%2d " % serversfull) for numfull in fullat: if int(numfull) == serversfull: sys.stdout.write("*") else: sys.stdout.write(" ") sys.stdout.write("\n") sys.stdout.write(" ^-- servers full\n") idx = 0 while idx < len(fullat): nextmark = "%d--^ " % (startfiles + idx * filespercolumn) sys.stdout.write(nextmark) idx += len(nextmark) sys.stdout.write("\nfiles uploaded --> \n") if __name__ == "__main__": import sys iters = 16 for arg in sys.argv: if arg.startswith("--iters="): iters = int(arg[8:]) if "--permute" in sys.argv: print("doing permuted peerlist, iterations: %d" % iters) test(True, iters) else: print("doing simple ring, iterations: %d" % iters) test(False, iters) tahoe_lafs-1.20.0/misc/simulators/simulator.py0000644000000000000000000002171313615410400016351 0ustar00#! /usr/bin/env python import hashlib import os, random from pkg_resources import require require('PyRRD') from pyrrd import graph from pyrrd.rrd import DataSource, RRD, RRA def sha(s): return hashlib.sha1(s).digest() def randomid(): return os.urandom(20) class Node(object): def __init__(self, nid, introducer, simulator): self.nid = nid self.introducer = introducer self.simulator = simulator self.shares = {} self.capacity = random.randrange(1000) self.utilization = 0 self.files = [] def permute_peers(self, fileid): permuted = [(sha(fileid+n.nid),n) for n in self.introducer.get_all_nodes()] permuted.sort() return permuted def publish_file(self, fileid, size, numshares=100): sharesize = 4 * size / numshares permuted = self.permute_peers(fileid) last_givento = None tried = 0 givento = [] while numshares and permuted: pid,node = permuted.pop(0) tried += 1 last_givento = pid if node.accept_share(fileid, sharesize): givento.append((pid,node)) numshares -= 1 if numshares: # couldn't push, should delete for pid,node in givento: node.delete_share(fileid) return False self.files.append((fileid, numshares)) self.introducer.please_preserve(fileid, size, tried, last_givento) return (True, tried) def accept_share(self, fileid, sharesize): if self.utilization < self.capacity: # we have room! yay! self.shares[fileid] = sharesize self.utilization += sharesize return True if self.decide(sharesize): # we don't, but we'll make room self.make_space(sharesize) self.shares[fileid] = sharesize self.utilization += sharesize return True else: # we're full, try elsewhere return False def decide(self, sharesize): return False def make_space(self, sharesize): assert sharesize <= self.capacity while self.capacity - self.utilization < sharesize: victim = random.choice(self.shares.keys()) self.simulator.lost_data(self.shares[victim]) self.delete_share(victim) def delete_share(self, fileid): if fileid in self.shares: self.utilization -= self.shares[fileid] del self.shares[fileid] return True return False def retrieve_file(self): if not self.files: return fileid,numshares = random.choice(self.files) needed = numshares / 4 peers = [] for pid,node in self.permute_peers(fileid): if random.random() > self.simulator.P_NODEAVAIL: continue # node isn't available right now if node.has_share(fileid): peers.append(node) if len(peers) >= needed: return True return False def delete_file(self): if not self.files: return False which = random.choice(self.files) self.files.remove(which) fileid,numshares = which self.introducer.delete(fileid) return True class Introducer(object): def __init__(self, simulator): self.living_files = {} self.utilization = 0 # total size of all active files self.simulator = simulator self.simulator.stamp_utilization(self.utilization) def get_all_nodes(self): return self.all_nodes def please_preserve(self, fileid, size, tried, last_givento): self.living_files[fileid] = (size, tried, last_givento) self.utilization += size self.simulator.stamp_utilization(self.utilization) def please_delete(self, fileid): self.delete(fileid) def permute_peers(self, fileid): permuted = [(sha(fileid+n.nid),n) for n in self.get_all_nodes()] permuted.sort() return permuted def delete(self, fileid): permuted = self.permute_peers(fileid) size, tried, last_givento = self.living_files[fileid] pid = "" while tried and pid < last_givento: pid,node = permuted.pop(0) had_it = node.delete_share(fileid) if had_it: tried -= 1 self.utilization -= size self.simulator.stamp_utilization(self.utilization) del self.living_files[fileid] class Simulator(object): NUM_NODES = 1000 EVENTS = ["ADDFILE", "DELFILE", "ADDNODE", "DELNODE"] RATE_ADDFILE = 1.0 / 10 RATE_DELFILE = 1.0 / 20 RATE_ADDNODE = 1.0 / 3000 RATE_DELNODE = 1.0 / 4000 P_NODEAVAIL = 1.0 def __init__(self): self.time = 1164783600 # small numbers of seconds since the epoch confuse rrdtool self.prevstamptime = int(self.time) ds = DataSource(ds_name='utilizationds', ds_type='GAUGE', heartbeat=1) rra = RRA(cf='AVERAGE', xff=0.1, steps=1, rows=1200) self.rrd = RRD("/tmp/utilization.rrd", ds=[ds], rra=[rra], start=self.time) self.rrd.create() self.introducer = q = Introducer(self) self.all_nodes = [Node(randomid(), q, self) for i in range(self.NUM_NODES)] q.all_nodes = self.all_nodes self.next = [] self.schedule_events() self.verbose = False self.added_files = 0 self.added_data = 0 self.deleted_files = 0 self.published_files = [] self.failed_files = 0 self.lost_data_bytes = 0 # bytes deleted to make room for new shares def stamp_utilization(self, utilization): if int(self.time) > (self.prevstamptime+1): self.rrd.bufferValue(self.time, utilization) self.prevstamptime = int(self.time) def write_graph(self): self.rrd.update() self.rrd = None import gc gc.collect() def1 = graph.DataDefinition(vname="a", rrdfile='/tmp/utilization.rrd', ds_name='utilizationds') area1 = graph.Area(value="a", color="#990033", legend='utilizationlegend') g = graph.Graph('/tmp/utilization.png', imgformat='PNG', width=540, height=100, vertical_label='utilizationverticallabel', title='utilizationtitle', lower_limit=0) g.data.append(def1) g.data.append(area1) g.write() def add_file(self): size = random.randrange(1000) n = random.choice(self.all_nodes) if self.verbose: print("add_file(size=%d, from node %s)" % (size, n)) fileid = randomid() able = n.publish_file(fileid, size) if able: able, tried = able self.added_files += 1 self.added_data += size self.published_files.append(tried) else: self.failed_files += 1 def lost_data(self, size): self.lost_data_bytes += size def delete_file(self): all_nodes = self.all_nodes[:] random.shuffle(all_nodes) for n in all_nodes: if n.delete_file(): self.deleted_files += 1 return print("no files to delete") def _add_event(self, etype): rate = getattr(self, "RATE_" + etype) next = self.time + random.expovariate(rate) self.next.append((next, etype)) self.next.sort() def schedule_events(self): types = set([e[1] for e in self.next]) for etype in self.EVENTS: if not etype in types: self._add_event(etype) def do_event(self): time, etype = self.next.pop(0) assert time > self.time # current_time = self.time self.time = time self._add_event(etype) if etype == "ADDFILE": self.add_file() elif etype == "DELFILE": self.delete_file() elif etype == "ADDNODE": pass #self.add_node() elif etype == "DELNODE": #self.del_node() pass # self.print_stats(current_time, etype) def print_stats_header(self): print("time: added failed lost avg_tried") def print_stats(self, time, etype): if not self.published_files: avg_tried = "NONE" else: avg_tried = sum(self.published_files) / len(self.published_files) print(time, etype, self.added_data, self.failed_files, self.lost_data_bytes, avg_tried, len(self.introducer.living_files), self.introducer.utilization) s = None def main(): # rrdtool.create("foo.rrd", # "--step 10", # "DS:files-added:DERIVE::0:1000", # "RRA:AVERAGE:1:1:1200", # ) global s s = Simulator() # s.print_stats_header() for i in range(1000): s.do_event() print("%d files added, %d files deleted" % (s.added_files, s.deleted_files)) return s if __name__ == '__main__': main() tahoe_lafs-1.20.0/misc/simulators/sizes.py0000644000000000000000000001724513615410400015474 0ustar00#! /usr/bin/env python import random, math, re from twisted.python import usage class Args(usage.Options): optParameters = [ ["mode", "m", "alpha", "validation scheme"], ["arity", "k", 2, "k (airty) for hash tree"], ] def opt_arity(self, option): self['arity'] = int(option) def parseArgs(self, *args): if len(args) > 0: self['mode'] = args[0] def charttest(): import gdchart sizes = [random.randrange(10, 20) for i in range(10)] x = gdchart.Line() x.width = 250 x.height = 250 x.xtitle = "sample" x.ytitle = "size" x.title = "Example Graph" #x.ext_color = [ "white", "yellow", "red", "blue", "green"] x.setData(sizes) #x.setLabels(["Mon", "Tue", "Wed", "Thu", "Fri"]) x.draw("simple.png") KiB=1024 MiB=1024*KiB GiB=1024*MiB TiB=1024*GiB PiB=1024*TiB class Sizes(object): def __init__(self, mode, file_size, arity=2): MAX_SEGSIZE = 128*KiB self.mode = mode self.file_size = file_size self.seg_size = seg_size = 1.0 * min(MAX_SEGSIZE, file_size) self.num_segs = num_segs = math.ceil(file_size / seg_size) self.num_blocks = num_blocks = num_segs self.num_shares = num_shares = 10 self.shares_needed = shares_needed = 3 self.block_size = block_size = seg_size / shares_needed self.share_size = share_size = block_size * num_blocks # none of this includes the share-level hash chain yet, since that is # only a function of the number of shares. All overhead numbers # assume that the share-level hash chain has already been sent, # including the root of the block-level hash tree. if mode == "alpha": # no hash tree at all self.block_arity = 0 self.block_tree_depth = 0 self.block_overhead = 0 self.bytes_until_some_data = 32 + share_size self.share_storage_overhead = 0 self.share_transmission_overhead = 0 elif mode == "beta": # k=num_blocks, d=1 # each block has a 32-byte hash self.block_arity = num_blocks self.block_tree_depth = 1 self.block_overhead = 32 # the share has a list of hashes, one for each block self.share_storage_overhead = (self.block_overhead * num_blocks) # we can get away with not sending the hash of the share that # we're sending in full, once self.share_transmission_overhead = self.share_storage_overhead - 32 # we must get the whole list (so it can be validated) before # any data can be validated self.bytes_until_some_data = (self.share_transmission_overhead + block_size) elif mode == "gamma": self.block_arity = k = arity d = math.ceil(math.log(num_blocks, k)) self.block_tree_depth = d num_leaves = k ** d # to make things easier, we make the pessimistic assumption that # we have to store hashes for all the empty places in the tree # (when the number of shares is not an exact exponent of k) self.block_overhead = 32 # the block hashes are organized into a k-ary tree, which # means storing (and eventually transmitting) more hashes. This # count includes all the low-level share hashes and the root. hash_nodes = (num_leaves*k - 1) / (k - 1) #print("hash_depth", d) #print("num_leaves", num_leaves) #print("hash_nodes", hash_nodes) # the storage overhead is this self.share_storage_overhead = 32 * (hash_nodes - 1) # the transmission overhead is smaller: if we actually transmit # every block, we don't have to transmit 1/k of the # lowest-level block hashes, and we don't have to transmit the # root because it was already sent with the share-level hash tree self.share_transmission_overhead = 32 * (hash_nodes - 1 # the root - num_leaves / k) # we must get a full sibling hash chain before we can validate # any data sibling_length = d * (k-1) self.bytes_until_some_data = 32 * sibling_length + block_size else: raise ValueError("unknown mode '%s" % mode) self.storage_overhead = self.share_storage_overhead * num_shares self.storage_overhead_percentage = 100.0 * self.storage_overhead / file_size def dump(self): for k in ("mode", "file_size", "seg_size", "num_segs", "num_blocks", "num_shares", "shares_needed", "block_size", "share_size", "block_arity", "block_tree_depth", "block_overhead", "share_storage_overhead", "share_transmission_overhead", "storage_overhead", "storage_overhead_percentage", "bytes_until_some_data"): print(k, getattr(self, k)) def fmt(num, trim=False): if num < KiB: #s = str(num) + "#" s = "%.2f#" % num elif num < MiB: s = "%.2fk" % (num / KiB) elif num < GiB: s = "%.2fM" % (num / MiB) elif num < TiB: s = "%.2fG" % (num / GiB) elif num < PiB: s = "%.2fT" % (num / TiB) else: s = "big" if trim: s = re.sub(r'(\.0+)([kMGT#])', lambda m: m.group(2), s) else: s = re.sub(r'(\.0+)([kMGT#])', lambda m: (" "*len(m.group(1))+m.group(2)), s) if s.endswith("#"): s = s[:-1] + " " return s def text(): opts = Args() opts.parseOptions() mode = opts["mode"] arity = opts["arity"] # 0123456789012345678901234567890123456789012345678901234567890123456 print("mode=%s" % mode, " arity=%d" % arity) print(" storage storage") print("Size sharesize overhead overhead k d alacrity") print(" (bytes) (%)") print("------- ------- -------- -------- ---- -- --------") #sizes = [2 ** i for i in range(7, 41)] #radix = math.sqrt(10); expstep = 2 radix = 2; expstep = 2 #radix = 10; expstep = 1 maxexp = int(math.ceil(math.log(1e12, radix)))+2 sizes = [radix ** i for i in range(2,maxexp,expstep)] for file_size in sizes: s = Sizes(mode, file_size, arity) out = "" out += "%7s " % fmt(file_size, trim=True) out += "%7s " % fmt(s.share_size) out += "%8s" % fmt(s.storage_overhead) out += "%10.2f " % s.storage_overhead_percentage out += " %4d" % int(s.block_arity) out += " %2d" % int(s.block_tree_depth) out += " %8s" % fmt(s.bytes_until_some_data) print(out) def graph(): # doesn't work yet import Gnuplot opts = Args() opts.parseOptions() mode = opts["mode"] arity = opts["arity"] g = Gnuplot.Gnuplot(debug=1) g.title("overhead / alacrity tradeoffs") g.xlabel("file size") g.ylabel("stuff") sizes = [2 ** i for i in range(7, 32)] series = {"overhead": {}, "alacrity": {}} for file_size in sizes: s = Sizes(mode, file_size, arity) series["overhead"][file_size] = s.storage_overhead_percentage series["alacrity"][file_size] = s.bytes_until_some_data g.plot([ (fs, series["overhead"][fs]) for fs in sizes ]) input("press return") if __name__ == '__main__': text() #graph() tahoe_lafs-1.20.0/misc/simulators/storage-overhead.py0000644000000000000000000000602113615410400017564 0ustar00#!/usr/bin/env python import sys, math from allmydata import uri, storage from allmydata.immutable import upload from allmydata.interfaces import DEFAULT_IMMUTABLE_MAX_SEGMENT_SIZE from allmydata.util import mathutil def roundup(size, blocksize=4096): return blocksize * mathutil.div_ceil(size, blocksize) class BigFakeString(object): def __init__(self, length): self.length = length self.fp = 0 def seek(self, offset, whence=0): if whence == 0: self.fp = offset elif whence == 1: self.fp += offset elif whence == 2: self.fp = self.length - offset def tell(self): return self.fp def calc(filesize, params=(3,7,10), segsize=DEFAULT_IMMUTABLE_MAX_SEGMENT_SIZE): num_shares = params[2] if filesize <= upload.Uploader.URI_LIT_SIZE_THRESHOLD: urisize = len(uri.LiteralFileURI("A"*filesize).to_string()) sharesize = 0 sharespace = 0 else: u = upload.FileUploader(None) # XXX changed u.set_params(params) # unfortunately, Encoder doesn't currently lend itself to answering # this question without measuring a filesize, so we have to give it a # fake one data = BigFakeString(filesize) u.set_filehandle(data) u.set_encryption_key("a"*16) sharesize, blocksize = u.setup_encoder() # how much overhead? # 0x20 bytes of offsets # 0x04 bytes of extension length # 0x1ad bytes of extension (=429) # total is 465 bytes num_segments = mathutil.div_ceil(filesize, segsize) num_share_hashes = int(math.log(mathutil.next_power_of_k(num_shares, 2), 2)) + 1 sharesize = storage.allocated_size(sharesize, num_segments, num_share_hashes, 429) sharespace = num_shares * roundup(sharesize) urisize = len(uri.pack_uri(storage_index="a"*32, key="a"*16, uri_extension_hash="a"*32, needed_shares=params[0], total_shares=params[2], size=filesize)) return urisize, sharesize, sharespace def main(): filesize = int(sys.argv[1]) urisize, sharesize, sharespace = calc(filesize) print("urisize:", urisize) print("sharesize: %10d" % sharesize) print("sharespace: %10d" % sharespace) print("desired expansion: %1.1f" % (1.0 * 10 / 3)) print("effective expansion: %1.1f" % (1.0 * sharespace / filesize)) def chart(): filesize = 2 while filesize < 2**20: urisize, sharesize, sharespace = calc(int(filesize)) expansion = 1.0 * sharespace / int(filesize) print("%d,%d,%d,%1.2f" % (int(filesize), urisize, sharespace, expansion)) filesize = filesize * 2**0.5 if __name__ == '__main__': if sys.argv[1] == "chart": chart() else: main() tahoe_lafs-1.20.0/misc/windows-enospc/passthrough.py0000644000000000000000000000174413615410400017460 0ustar00""" Writing to non-blocking pipe can result in ENOSPC when using Unix APIs on Windows. So, this program passes through data from stdin to stdout, using Windows APIs instead of Unix-y APIs. """ from twisted.internet.stdio import StandardIO from twisted.internet import reactor from twisted.internet.protocol import Protocol from twisted.internet.interfaces import IHalfCloseableProtocol from twisted.internet.error import ReactorNotRunning from zope.interface import implementer @implementer(IHalfCloseableProtocol) class Passthrough(Protocol): def readConnectionLost(self): self.transport.loseConnection() def writeConnectionLost(self): try: reactor.stop() except ReactorNotRunning: pass def dataReceived(self, data): self.transport.write(data) def connectionLost(self, reason): try: reactor.stop() except ReactorNotRunning: pass std = StandardIO(Passthrough()) reactor.run() tahoe_lafs-1.20.0/src/allmydata/__init__.py0000644000000000000000000000421713615410400015473 0ustar00""" Decentralized storage grid. community web site: U{https://tahoe-lafs.org/} """ __all__ = [ "__version__", "full_version", "branch", "__appname__", "__full_version__", ] __version__ = "unknown" try: # type ignored as it fails in CI # (https://app.circleci.com/pipelines/github/tahoe-lafs/tahoe-lafs/1647/workflows/60ae95d4-abe8-492c-8a03-1ad3b9e42ed3/jobs/40972) from allmydata._version import __version__ # type: ignore except ImportError: # We're running in a tree that hasn't run update_version, and didn't # come with a _version.py, so we don't know what our version is. # This should not happen very often. pass full_version = "unknown" branch = "unknown" try: # type ignored as it fails in CI # (https://app.circleci.com/pipelines/github/tahoe-lafs/tahoe-lafs/1647/workflows/60ae95d4-abe8-492c-8a03-1ad3b9e42ed3/jobs/40972) from allmydata._version import full_version, branch # type: ignore except ImportError: # We're running in a tree that hasn't run update_version, and didn't # come with a _version.py, so we don't know what our full version or # branch is. This should not happen very often. pass __appname__ = "tahoe-lafs" # __full_version__ is the one that you ought to use when identifying yourself # in the "application" part of the Tahoe versioning scheme: # https://tahoe-lafs.org/trac/tahoe-lafs/wiki/Versioning __full_version__ = __appname__ + '/' + str(__version__) # Monkey-patch 3rd party libraries: from ._monkeypatch import patch patch() del patch # On Python 3, turn BytesWarnings into exceptions. This can have potential # production impact... if BytesWarnings are actually present in the codebase. # Given that this has been enabled before Python 3 Tahoe-LAFS was publicly # released, no such code should exist, and this will ensure it doesn't get # added either. # # Also note that BytesWarnings only happen if Python is run with -b option, so # in practice this should only affect tests. import warnings # Error on BytesWarnings, to catch things like str(b""), but only for # allmydata code. warnings.filterwarnings("error", category=BytesWarning, module=".*allmydata.*") tahoe_lafs-1.20.0/src/allmydata/__main__.py0000644000000000000000000000020213615410400015442 0ustar00""" Ported to Python 3. """ import sys from allmydata.scripts.runner import run if __name__ == "__main__": sys.exit(run()) tahoe_lafs-1.20.0/src/allmydata/_monkeypatch.py0000644000000000000000000000022313615410400016406 0ustar00""" Monkey-patching of third party libraries. Ported to Python 3. """ def patch(): """Path third-party libraries to make Tahoe-LAFS work.""" tahoe_lafs-1.20.0/src/allmydata/_version.py0000644000000000000000000000063513615410400015560 0ustar00# file generated by setuptools_scm # don't change, don't track in version control TYPE_CHECKING = False if TYPE_CHECKING: from typing import Tuple, Union VERSION_TUPLE = Tuple[Union[int, str], ...] else: VERSION_TUPLE = object version: str __version__: str __version_tuple__: VERSION_TUPLE version_tuple: VERSION_TUPLE __version__ = version = '1.20.0' __version_tuple__ = version_tuple = (1, 20, 0) tahoe_lafs-1.20.0/src/allmydata/blacklist.py0000644000000000000000000001150013615410400015675 0ustar00""" Ported to Python 3. """ import os from zope.interface import implementer from twisted.internet import defer from twisted.python import log as twisted_log from allmydata.interfaces import IFileNode, IFilesystemNode from allmydata.util import base32 from allmydata.util.encodingutil import quote_output class FileProhibited(Exception): """This client has been configured to prohibit access to this object.""" def __init__(self, reason): Exception.__init__(self, "Access Prohibited: %s" % quote_output(reason, encoding='utf-8', quotemarks=False)) self.reason = reason class Blacklist(object): def __init__(self, blacklist_fn): self.blacklist_fn = blacklist_fn self.last_mtime = None self.entries = {} self.read_blacklist() # sets .last_mtime and .entries def read_blacklist(self): try: current_mtime = os.stat(self.blacklist_fn).st_mtime except EnvironmentError: # unreadable blacklist file means no blacklist self.entries.clear() return try: if self.last_mtime is None or current_mtime > self.last_mtime: self.entries.clear() with open(self.blacklist_fn, "rb") as f: for line in f: line = line.strip() if not line or line.startswith(b"#"): continue si_s, reason = line.split(None, 1) si = base32.a2b(si_s) # must be valid base32 self.entries[si] = reason self.last_mtime = current_mtime except Exception as e: twisted_log.err(e, "unparseable blacklist file") raise def check_storageindex(self, si): self.read_blacklist() reason = self.entries.get(si, None) if reason is not None: # log this to logs/twistd.log, since web logs go there too twisted_log.msg("blacklist prohibited access to SI %r: %r" % (base32.b2a(si), reason)) return reason @implementer(IFileNode) class ProhibitedNode(object): def __init__(self, wrapped_node, reason): assert IFilesystemNode.providedBy(wrapped_node), wrapped_node self.wrapped_node = wrapped_node self.reason = reason def get_cap(self): return self.wrapped_node.get_cap() def get_readcap(self): return self.wrapped_node.get_readcap() def is_readonly(self): return self.wrapped_node.is_readonly() def is_mutable(self): return self.wrapped_node.is_mutable() def is_unknown(self): return self.wrapped_node.is_unknown() def is_allowed_in_immutable_directory(self): return self.wrapped_node.is_allowed_in_immutable_directory() def is_alleged_immutable(self): return self.wrapped_node.is_alleged_immutable() def raise_error(self): # We don't raise an exception here because that would prevent the node from being listed. pass def get_uri(self): return self.wrapped_node.get_uri() def get_write_uri(self): return self.wrapped_node.get_write_uri() def get_readonly_uri(self): return self.wrapped_node.get_readonly_uri() def get_storage_index(self): return self.wrapped_node.get_storage_index() def get_verify_cap(self): return self.wrapped_node.get_verify_cap() def get_repair_cap(self): return self.wrapped_node.get_repair_cap() def get_size(self): return None def get_current_size(self): return defer.succeed(None) def get_size_of_best_version(self): return defer.succeed(None) def check(self, monitor, verify, add_lease): return defer.succeed(None) def check_and_repair(self, monitor, verify, add_lease): return defer.succeed(None) def get_version(self): return None # Omitting any of these methods would fail safe; they are just to ensure correct error reporting. def get_best_readable_version(self): raise FileProhibited(self.reason) def download_best_version(self): raise FileProhibited(self.reason) def get_best_mutable_version(self): raise FileProhibited(self.reason) def overwrite(self, new_contents): raise FileProhibited(self.reason) def modify(self, modifier_cb): raise FileProhibited(self.reason) def get_servermap(self, mode): raise FileProhibited(self.reason) def download_version(self, servermap, version): raise FileProhibited(self.reason) def upload(self, new_contents, servermap): raise FileProhibited(self.reason) def get_writekey(self): raise FileProhibited(self.reason) def read(self, consumer, offset=0, size=None): raise FileProhibited(self.reason) tahoe_lafs-1.20.0/src/allmydata/check_results.py0000644000000000000000000002723113615410400016573 0ustar00"""Ported to Python 3. """ from zope.interface import implementer from allmydata.interfaces import ICheckResults, ICheckAndRepairResults, \ IDeepCheckResults, IDeepCheckAndRepairResults, IURI, IDisplayableServer from allmydata.util import base32 @implementer(ICheckResults) class CheckResults(object): def __init__(self, uri, storage_index, healthy, recoverable, count_happiness, count_shares_needed, count_shares_expected, count_shares_good, count_good_share_hosts, count_recoverable_versions, count_unrecoverable_versions, servers_responding, sharemap, count_wrong_shares, list_corrupt_shares, count_corrupt_shares, list_incompatible_shares, count_incompatible_shares, summary, report, share_problems, servermap): assert IURI.providedBy(uri), uri self._uri = uri self._storage_index = storage_index self._summary = "" self._healthy = bool(healthy) if self._healthy: assert recoverable if not summary: summary = "healthy" else: if not summary: summary = "not healthy" self._recoverable = recoverable if not self._recoverable: assert not self._healthy self._count_happiness = count_happiness self._count_shares_needed = count_shares_needed self._count_shares_expected = count_shares_expected self._count_shares_good = count_shares_good self._count_good_share_hosts = count_good_share_hosts self._count_recoverable_versions = count_recoverable_versions self._count_unrecoverable_versions = count_unrecoverable_versions for server in servers_responding: assert IDisplayableServer.providedBy(server), server self._servers_responding = servers_responding for shnum, servers in sharemap.items(): for server in servers: assert IDisplayableServer.providedBy(server), server self._sharemap = sharemap self._count_wrong_shares = count_wrong_shares for (server, SI, shnum) in list_corrupt_shares: assert IDisplayableServer.providedBy(server), server self._list_corrupt_shares = list_corrupt_shares self._count_corrupt_shares = count_corrupt_shares for (server, SI, shnum) in list_incompatible_shares: assert IDisplayableServer.providedBy(server), server self._list_incompatible_shares = list_incompatible_shares self._count_incompatible_shares = count_incompatible_shares # On Python 2, we can mix bytes and Unicode. On Python 3, we want # unicode. if isinstance(summary, bytes): summary = str(summary, "utf-8") assert isinstance(summary, str) # should be a single string self._summary = summary assert not isinstance(report, str) # should be list of strings self._report = report if servermap: from allmydata.mutable.servermap import ServerMap assert isinstance(servermap, ServerMap), servermap self._servermap = servermap # mutable only self._share_problems = share_problems def get_storage_index(self): return self._storage_index def get_storage_index_string(self): return base32.b2a(self._storage_index) def get_uri(self): return self._uri def is_healthy(self): return self._healthy def is_recoverable(self): return self._recoverable def get_happiness(self): return self._count_happiness def get_encoding_needed(self): return self._count_shares_needed def get_encoding_expected(self): return self._count_shares_expected def get_share_counter_good(self): return self._count_shares_good def get_share_counter_wrong(self): return self._count_wrong_shares def get_corrupt_shares(self): return self._list_corrupt_shares def get_incompatible_shares(self): return self._list_incompatible_shares def get_servers_responding(self): return self._servers_responding def get_host_counter_good_shares(self): return self._count_good_share_hosts def get_version_counter_recoverable(self): return self._count_recoverable_versions def get_version_counter_unrecoverable(self): return self._count_unrecoverable_versions def get_sharemap(self): return self._sharemap def as_dict(self): sharemap = {} for shnum, servers in self._sharemap.items(): sharemap[shnum] = sorted([s.get_serverid() for s in servers]) responding = [s.get_serverid() for s in self._servers_responding] corrupt = [(s.get_serverid(), SI, shnum) for (s, SI, shnum) in self._list_corrupt_shares] incompatible = [(s.get_serverid(), SI, shnum) for (s, SI, shnum) in self._list_incompatible_shares] d = {"count-happiness": self._count_happiness, "count-shares-needed": self._count_shares_needed, "count-shares-expected": self._count_shares_expected, "count-shares-good": self._count_shares_good, "count-good-share-hosts": self._count_good_share_hosts, "count-recoverable-versions": self._count_recoverable_versions, "count-unrecoverable-versions": self._count_unrecoverable_versions, "servers-responding": responding, "sharemap": sharemap, "count-wrong-shares": self._count_wrong_shares, "list-corrupt-shares": corrupt, "count-corrupt-shares": self._count_corrupt_shares, "list-incompatible-shares": incompatible, "count-incompatible-shares": self._count_incompatible_shares, } return d def get_summary(self): return self._summary def get_report(self): return self._report def get_share_problems(self): return self._share_problems def get_servermap(self): return self._servermap @implementer(ICheckAndRepairResults) class CheckAndRepairResults(object): def __init__(self, storage_index): self.storage_index = storage_index self.repair_attempted = False def get_storage_index(self): return self.storage_index def get_storage_index_string(self): return base32.b2a(self.storage_index) def get_repair_attempted(self): return self.repair_attempted def get_repair_successful(self): if not self.repair_attempted: return False return self.repair_successful def get_pre_repair_results(self): return self.pre_repair_results def get_post_repair_results(self): return self.post_repair_results class DeepResultsBase(object): def __init__(self, root_storage_index): self.root_storage_index = root_storage_index if root_storage_index is None: self.root_storage_index_s = "" # is this correct? else: self.root_storage_index_s = base32.b2a(root_storage_index) self.objects_checked = 0 self.objects_healthy = 0 self.objects_unhealthy = 0 self.objects_unrecoverable = 0 self.corrupt_shares = [] self.all_results = {} self.all_results_by_storage_index = {} self.stats = {} def update_stats(self, new_stats): self.stats.update(new_stats) def get_root_storage_index_string(self): return self.root_storage_index_s def get_corrupt_shares(self): return self.corrupt_shares def get_all_results(self): return self.all_results def get_results_for_storage_index(self, storage_index): return self.all_results_by_storage_index[storage_index] def get_stats(self): return self.stats @implementer(IDeepCheckResults) class DeepCheckResults(DeepResultsBase): def add_check(self, r, path): if not r: return # non-distributed object, i.e. LIT file r = ICheckResults(r) assert isinstance(path, (list, tuple)) self.objects_checked += 1 if r.is_healthy(): self.objects_healthy += 1 else: self.objects_unhealthy += 1 if not r.is_recoverable(): self.objects_unrecoverable += 1 self.all_results[tuple(path)] = r self.all_results_by_storage_index[r.get_storage_index()] = r self.corrupt_shares.extend(r.get_corrupt_shares()) def get_counters(self): return {"count-objects-checked": self.objects_checked, "count-objects-healthy": self.objects_healthy, "count-objects-unhealthy": self.objects_unhealthy, "count-objects-unrecoverable": self.objects_unrecoverable, "count-corrupt-shares": len(self.corrupt_shares), } @implementer(IDeepCheckAndRepairResults) class DeepCheckAndRepairResults(DeepResultsBase): def __init__(self, root_storage_index): DeepResultsBase.__init__(self, root_storage_index) self.objects_healthy_post_repair = 0 self.objects_unhealthy_post_repair = 0 self.objects_unrecoverable_post_repair = 0 self.repairs_attempted = 0 self.repairs_successful = 0 self.repairs_unsuccessful = 0 self.corrupt_shares_post_repair = [] def add_check_and_repair(self, r, path): if not r: return # non-distributed object, i.e. LIT file r = ICheckAndRepairResults(r) assert isinstance(path, (list, tuple)) pre_repair = r.get_pre_repair_results() post_repair = r.get_post_repair_results() self.objects_checked += 1 if pre_repair.is_healthy(): self.objects_healthy += 1 else: self.objects_unhealthy += 1 if not pre_repair.is_recoverable(): self.objects_unrecoverable += 1 self.corrupt_shares.extend(pre_repair.get_corrupt_shares()) if r.get_repair_attempted(): self.repairs_attempted += 1 if r.get_repair_successful(): self.repairs_successful += 1 else: self.repairs_unsuccessful += 1 if post_repair.is_healthy(): self.objects_healthy_post_repair += 1 else: self.objects_unhealthy_post_repair += 1 if not post_repair.is_recoverable(): self.objects_unrecoverable_post_repair += 1 self.all_results[tuple(path)] = r self.all_results_by_storage_index[r.get_storage_index()] = r self.corrupt_shares_post_repair.extend(post_repair.get_corrupt_shares()) def get_counters(self): return {"count-objects-checked": self.objects_checked, "count-objects-healthy-pre-repair": self.objects_healthy, "count-objects-unhealthy-pre-repair": self.objects_unhealthy, "count-objects-unrecoverable-pre-repair": self.objects_unrecoverable, "count-objects-healthy-post-repair": self.objects_healthy_post_repair, "count-objects-unhealthy-post-repair": self.objects_unhealthy_post_repair, "count-objects-unrecoverable-post-repair": self.objects_unrecoverable_post_repair, "count-repairs-attempted": self.repairs_attempted, "count-repairs-successful": self.repairs_successful, "count-repairs-unsuccessful": self.repairs_unsuccessful, "count-corrupt-shares-pre-repair": len(self.corrupt_shares), "count-corrupt-shares-post-repair": len(self.corrupt_shares_post_repair), } def get_remaining_corrupt_shares(self): return self.corrupt_shares_post_repair tahoe_lafs-1.20.0/src/allmydata/client.py0000644000000000000000000013104513615410400015212 0ustar00""" Functionality related to operating a Tahoe-LAFS node (client _or_ server). """ from __future__ import annotations import os import stat import time import weakref from typing import Optional, Iterable from base64 import urlsafe_b64encode from functools import partial from configparser import NoSectionError from six import ensure_text from foolscap.furl import ( decode_furl, ) import attr from zope.interface import implementer from twisted.plugin import ( getPlugins, ) from twisted.internet import reactor, defer from twisted.application import service from twisted.application.internet import TimerService from twisted.python.filepath import FilePath import allmydata from allmydata import node from allmydata.crypto import rsa, ed25519 from allmydata.crypto.util import remove_prefix from allmydata.dirnode import DirectoryNode from allmydata.storage.server import StorageServer, FoolscapStorageServer from allmydata import storage_client from allmydata.immutable.upload import Uploader from allmydata.immutable.offloaded import Helper from allmydata.mutable.filenode import MutableFileNode from allmydata.introducer.client import IntroducerClient from allmydata.util import ( hashutil, base32, pollmixin, log, idlib, yamlutil, configutil, fileutil, ) from allmydata.util.encodingutil import get_filesystem_encoding from allmydata.util.abbreviate import parse_abbreviated_size from allmydata.util.time_format import parse_duration, parse_date from allmydata.util.i2p_provider import create as create_i2p_provider from allmydata.util.tor_provider import create as create_tor_provider, _Provider as TorProvider from allmydata.util.cputhreadpool import defer_to_thread from allmydata.util.deferredutil import async_to_deferred from allmydata.stats import StatsProvider from allmydata.history import History from allmydata.interfaces import ( IStatsProducer, SDMF_VERSION, MDMF_VERSION, DEFAULT_IMMUTABLE_MAX_SEGMENT_SIZE, IFoolscapStoragePlugin, IAnnounceableStorageServer, ) from allmydata.nodemaker import NodeMaker from allmydata.blacklist import Blacklist from allmydata.node import _Config KiB=1024 MiB=1024*KiB GiB=1024*MiB TiB=1024*GiB PiB=1024*TiB def _is_valid_section(section_name): """ Check for valid dynamic configuration section names. Currently considers all possible storage server plugin sections valid. """ return ( section_name.startswith("storageserver.plugins.") or section_name.startswith("storageclient.plugins.") or section_name in ("grid_managers", "grid_manager_certificates") ) _client_config = configutil.ValidConfiguration( static_valid_sections={ "client": ( "helper.furl", "introducer.furl", "key_generator.furl", "mutable.format", "peers.preferred", "shares.happy", "shares.needed", "shares.total", "shares._max_immutable_segment_size_for_testing", "storage.plugins", "force_foolscap", ), "storage": ( "debug_discard", "enabled", "anonymous", "expire.cutoff_date", "expire.enabled", "expire.immutable", "expire.mode", "expire.mode", "expire.mutable", "expire.override_lease_duration", "readonly", "reserved_space", "storage_dir", "plugins", "grid_management", "force_foolscap", ), "sftpd": ( "accounts.file", "enabled", "host_privkey_file", "host_pubkey_file", "port", ), "helper": ( "enabled", ), }, is_valid_section=_is_valid_section, # Anything in a valid section is a valid item, for now. is_valid_item=lambda section, ignored: _is_valid_section(section), ) def _valid_config(): cfg = node._common_valid_config() return cfg.update(_client_config) # this is put into README in new node-directories CLIENT_README = u""" This directory contains files which contain private data for the Tahoe node, such as private keys. On Unix-like systems, the permissions on this directory are set to disallow users other than its owner from reading the contents of the files. See the 'configuration.rst' documentation file for details. """ def _make_secret(): """ Returns a base32-encoded random secret of hashutil.CRYPTO_VAL_SIZE bytes. """ return base32.b2a(os.urandom(hashutil.CRYPTO_VAL_SIZE)) + b"\n" class SecretHolder(object): def __init__(self, lease_secret, convergence_secret): self._lease_secret = lease_secret self._convergence_secret = convergence_secret def get_renewal_secret(self): return hashutil.my_renewal_secret_hash(self._lease_secret) def get_cancel_secret(self): return hashutil.my_cancel_secret_hash(self._lease_secret) def get_convergence_secret(self): return self._convergence_secret class KeyGenerator(object): """I create RSA keys for mutable files. Each call to generate() returns a single keypair.""" @async_to_deferred async def generate(self) -> tuple[rsa.PublicKey, rsa.PrivateKey]: """ I return a Deferred that fires with a (verifyingkey, signingkey) pair. The returned key will be 2048 bit. """ keysize = 2048 private, public = await defer_to_thread( rsa.create_signing_keypair, keysize ) return public, private class Terminator(service.Service): def __init__(self): self._clients = weakref.WeakKeyDictionary() def register(self, c): self._clients[c] = None def stopService(self): for c in self._clients: c.stop() return service.Service.stopService(self) def read_config(basedir, portnumfile, generated_files: Iterable=()): """ Read and validate configuration for a client-style Node. See :method:`allmydata.node.read_config` for parameter meanings (the only difference here is we pass different validation data) :returns: :class:`allmydata.node._Config` instance """ return node.read_config( basedir, portnumfile, generated_files=generated_files, _valid_config=_valid_config(), ) config_from_string = partial( node.config_from_string, _valid_config=_valid_config(), ) def create_client(basedir=u".", _client_factory=None): """ Creates a new client instance (a subclass of Node). :param unicode basedir: the node directory (which may not exist yet) :param _client_factory: (for testing) a callable that returns an instance of :class:`allmydata.node.Node` (or a subclass). By default this is :class:`allmydata.client._Client` :returns: Deferred yielding an instance of :class:`allmydata.client._Client` """ try: node.create_node_dir(basedir, CLIENT_README) config = read_config(basedir, u"client.port") # following call is async return create_client_from_config( config, _client_factory=_client_factory, ) except Exception: return defer.fail() @defer.inlineCallbacks def create_client_from_config(config, _client_factory=None, _introducer_factory=None): """ Creates a new client instance (a subclass of Node). Most code should probably use `create_client` instead. :returns: Deferred yielding a _Client instance :param config: configuration instance (from read_config()) which encapsulates everything in the "node directory". :param _client_factory: for testing; the class to instantiate instead of _Client :param _introducer_factory: for testing; the class to instantiate instead of IntroducerClient """ if _client_factory is None: _client_factory = _Client i2p_provider = create_i2p_provider(reactor, config) tor_provider = create_tor_provider(reactor, config) handlers = node.create_connection_handlers(config, i2p_provider, tor_provider) default_connection_handlers, foolscap_connection_handlers = handlers tub_options = node.create_tub_options(config) main_tub = node.create_main_tub( config, tub_options, default_connection_handlers, foolscap_connection_handlers, i2p_provider, tor_provider, ) introducer_clients = create_introducer_clients(config, main_tub, _introducer_factory) storage_broker = create_storage_farm_broker( config, default_connection_handlers, foolscap_connection_handlers, tub_options, introducer_clients, tor_provider ) client = _client_factory( config, main_tub, i2p_provider, tor_provider, introducer_clients, storage_broker, ) # Initialize storage separately after creating the client. This is # necessary because we need to pass a reference to the client in to the # storage plugins to allow them to initialize themselves (specifically, # they may want the anonymous IStorageServer implementation so they don't # have to duplicate all of its basic storage functionality). A better way # to do this, eventually, may be to create that implementation first and # then pass it in to both storage plugin creation and the client factory. # This avoids making a partially initialized client object escape the # client factory and removes the circular dependency between these # objects. storage_plugins = yield _StoragePlugins.from_config( client.get_anonymous_storage_server, config, ) client.init_storage(storage_plugins.announceable_storage_servers) i2p_provider.setServiceParent(client) tor_provider.setServiceParent(client) for ic in introducer_clients: ic.setServiceParent(client) storage_broker.setServiceParent(client) defer.returnValue(client) @attr.s class _StoragePlugins(object): """ Functionality related to getting storage plugins set up and ready for use. :ivar list[IAnnounceableStorageServer] announceable_storage_servers: The announceable storage servers that should be used according to node configuration. """ announceable_storage_servers = attr.ib() @classmethod @defer.inlineCallbacks def from_config(cls, get_anonymous_storage_server, config): """ Load and configured storage plugins. :param get_anonymous_storage_server: A no-argument callable which returns the node's anonymous ``IStorageServer`` implementation. :param _Config config: The node's configuration. :return: A ``_StoragePlugins`` initialized from the given configuration. """ storage_plugin_names = cls._get_enabled_storage_plugin_names(config) plugins = list(cls._collect_storage_plugins(storage_plugin_names)) unknown_plugin_names = storage_plugin_names - {plugin.name for plugin in plugins} if unknown_plugin_names: raise configutil.UnknownConfigError( "Storage plugins {} are enabled but not known on this system.".format( unknown_plugin_names, ), ) announceable_storage_servers = yield cls._create_plugin_storage_servers( get_anonymous_storage_server, config, plugins, ) defer.returnValue(cls( announceable_storage_servers, )) @classmethod def _get_enabled_storage_plugin_names(cls, config): """ Get the names of storage plugins that are enabled in the configuration. """ return set( config.get_config( "storage", "plugins", "" ).split(u",") ) - {u""} @classmethod def _collect_storage_plugins(cls, storage_plugin_names): """ Get the storage plugins with names matching those given. """ return list( plugin for plugin in getPlugins(IFoolscapStoragePlugin) if plugin.name in storage_plugin_names ) @classmethod def _create_plugin_storage_servers(cls, get_anonymous_storage_server, config, plugins): """ Cause each storage plugin to instantiate its storage server and return them all. :return: A ``Deferred`` that fires with storage servers instantiated by all of the given storage server plugins. """ return defer.gatherResults( list( plugin.get_storage_server( cls._get_storage_plugin_configuration(config, plugin.name), get_anonymous_storage_server, ).addCallback( partial( _add_to_announcement, {u"name": plugin.name}, ), ) for plugin # The order is fairly arbitrary and it is not meant to convey # anything but providing *some* stable ordering makes the data # a little easier to deal with (mainly in tests and when # manually inspecting it). in sorted(plugins, key=lambda p: p.name) ), ) @classmethod def _get_storage_plugin_configuration(cls, config, storage_plugin_name): """ Load the configuration for a storage server plugin with the given name. :return dict[bytes, bytes]: The matching configuration. """ try: config = config.items( "storageserver.plugins." + storage_plugin_name, ) except NoSectionError: config = [] return dict(config) def _sequencer(config): """ :returns: a 2-tuple consisting of a new announcement sequence-number and random nonce (int, unicode). Reads and re-writes configuration file "announcement-seqnum" (starting at 1 if that file doesn't exist). """ seqnum_s = config.get_config_from_file("announcement-seqnum") if not seqnum_s: seqnum_s = u"0" seqnum = int(seqnum_s.strip()) seqnum += 1 # increment config.write_config_file("announcement-seqnum", "{}\n".format(seqnum)) nonce = _make_secret().strip() return seqnum, nonce def create_introducer_clients(config, main_tub, _introducer_factory=None): """ Read, validate and parse any 'introducers.yaml' configuration. :param _introducer_factory: for testing; the class to instantiate instead of IntroducerClient :returns: a list of IntroducerClient instances """ if _introducer_factory is None: _introducer_factory = IntroducerClient # we return this list introducer_clients = [] introducers = config.get_introducer_configuration() for petname, (furl, cache_path) in list(introducers.items()): ic = _introducer_factory( main_tub, furl.encode("ascii"), config.nickname, str(allmydata.__full_version__), str(_Client.OLDEST_SUPPORTED_VERSION), partial(_sequencer, config), cache_path, ) introducer_clients.append(ic) return introducer_clients def create_storage_farm_broker(config: _Config, default_connection_handlers, foolscap_connection_handlers, tub_options, introducer_clients, tor_provider: Optional[TorProvider]): """ Create a StorageFarmBroker object, for use by Uploader/Downloader (and everybody else who wants to use storage servers) :param config: a _Config instance :param default_connection_handlers: default Foolscap handlers :param foolscap_connection_handlers: available/configured Foolscap handlers :param dict tub_options: how to configure our Tub :param list introducer_clients: IntroducerClient instances if we're connecting to any """ storage_client_config = storage_client.StorageClientConfig.from_node_config( config, ) # ensure that we can at least load all plugins that the # configuration mentions; doing this early (i.e. before creating # storage-clients themselves) allows us to exit in case of a # problem. storage_client_config.get_configured_storage_plugins() def tub_creator(handler_overrides=None, **kwargs): return node.create_tub( tub_options, default_connection_handlers, foolscap_connection_handlers, handler_overrides={} if handler_overrides is None else handler_overrides, **kwargs ) # create the actual storage-broker sb = storage_client.StorageFarmBroker( permute_peers=True, tub_maker=tub_creator, node_config=config, storage_client_config=storage_client_config, default_connection_handlers=default_connection_handlers, tor_provider=tor_provider, ) for ic in introducer_clients: sb.use_introducer(ic) return sb def _register_reference(key, config, tub, referenceable): """ Register a referenceable in a tub with a stable fURL. Stability is achieved by storing the fURL in the configuration the first time and then reading it back on for future calls. :param bytes key: An identifier for this reference which can be used to identify its fURL in the configuration. :param _Config config: The configuration to use for fURL persistence. :param Tub tub: The tub in which to register the reference. :param Referenceable referenceable: The referenceable to register in the Tub. :return bytes: The fURL at which the object is registered. """ persisted_furl = config.get_private_config( key, default=None, ) name = None if persisted_furl is not None: _, _, name = decode_furl(persisted_furl) registered_furl = tub.registerReference( referenceable, name=name, ) if persisted_furl is None: config.write_private_config(key, registered_furl) return registered_furl @implementer(IAnnounceableStorageServer) @attr.s class AnnounceableStorageServer(object): announcement = attr.ib() storage_server = attr.ib() def _add_to_announcement(information, announceable_storage_server): """ Create a new ``AnnounceableStorageServer`` based on ``announceable_storage_server`` with ``information`` added to its ``announcement``. """ updated_announcement = announceable_storage_server.announcement.copy() updated_announcement.update(information) return AnnounceableStorageServer( updated_announcement, announceable_storage_server.storage_server, ) def storage_enabled(config): """ Is storage enabled according to the given configuration object? :param _Config config: The configuration to inspect. :return bool: ``True`` if storage is enabled, ``False`` otherwise. """ return config.get_config("storage", "enabled", True, boolean=True) def anonymous_storage_enabled(config): """ Is anonymous access to storage enabled according to the given configuration object? :param _Config config: The configuration to inspect. :return bool: ``True`` if storage is enabled, ``False`` otherwise. """ return ( storage_enabled(config) and config.get_config("storage", "anonymous", True, boolean=True) ) @implementer(IStatsProducer) class _Client(node.Node, pollmixin.PollMixin): """ This class should be refactored; see https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3931 """ STOREDIR = 'storage' NODETYPE = "client" EXIT_TRIGGER_FILE = "exit_trigger" # This means that if a storage server treats me as though I were a # 1.0.0 storage client, it will work as they expect. OLDEST_SUPPORTED_VERSION = "1.0.0" # This is a dictionary of (needed, desired, total, max_segment_size). 'needed' # is the number of shares required to reconstruct a file. 'desired' means # that we will abort an upload unless we can allocate space for at least # this many. 'total' is the total number of shares created by encoding. # If everybody has room then this is is how many we will upload. DEFAULT_ENCODING_PARAMETERS = {"k": 3, "happy": 7, "n": 10, "max_segment_size": DEFAULT_IMMUTABLE_MAX_SEGMENT_SIZE, } def __init__(self, config, main_tub, i2p_provider, tor_provider, introducer_clients, storage_farm_broker): """ Use :func:`allmydata.client.create_client` to instantiate one of these. """ node.Node.__init__(self, config, main_tub, i2p_provider, tor_provider) self.started_timestamp = time.time() self.logSource = "Client" self.encoding_params = self.DEFAULT_ENCODING_PARAMETERS.copy() self.introducer_clients = introducer_clients self.storage_broker = storage_farm_broker self.init_stats_provider() self.init_secrets() self.init_node_key() self._key_generator = KeyGenerator() key_gen_furl = config.get_config("client", "key_generator.furl", None) if key_gen_furl: log.msg("[client]key_generator.furl= is now ignored, see #2783") self.init_client() self.load_static_servers() self.helper = None if config.get_config("helper", "enabled", False, boolean=True): if not self._is_tub_listening(): raise ValueError("config error: helper is enabled, but tub " "is not listening ('tub.port=' is empty)") self.init_helper() self.init_sftp_server() # If the node sees an exit_trigger file, it will poll every second to see # whether the file still exists, and what its mtime is. If the file does not # exist or has not been modified for a given timeout, the node will exit. exit_trigger_file = config.get_config_path(self.EXIT_TRIGGER_FILE) if os.path.exists(exit_trigger_file): age = time.time() - os.stat(exit_trigger_file)[stat.ST_MTIME] self.log("%s file noticed (%ds old), starting timer" % (self.EXIT_TRIGGER_FILE, age)) exit_trigger = TimerService(1.0, self._check_exit_trigger, exit_trigger_file) exit_trigger.setServiceParent(self) # this needs to happen last, so it can use getServiceNamed() to # acquire references to StorageServer and other web-statusable things webport = config.get_config("node", "web.port", None) if webport: self.init_web(webport) # strports string # TODO this may be the wrong location for now? but as temporary measure # it allows us to get NURLs for testing in test_istorageserver.py. This # will eventually get fixed one way or another in # https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3901. See also # https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3931 for the bigger # picture issue. self.storage_nurls : Optional[set] = None def init_stats_provider(self): self.stats_provider = StatsProvider(self) self.stats_provider.setServiceParent(self) self.stats_provider.register_producer(self) def get_stats(self): return { 'node.uptime': time.time() - self.started_timestamp } def init_secrets(self): # configs are always unicode def _unicode_make_secret(): return str(_make_secret(), "ascii") lease_s = self.config.get_or_create_private_config( "secret", _unicode_make_secret).encode("utf-8") lease_secret = base32.a2b(lease_s) convergence_s = self.config.get_or_create_private_config( 'convergence', _unicode_make_secret).encode("utf-8") self.convergence = base32.a2b(convergence_s) self._secret_holder = SecretHolder(lease_secret, self.convergence) def init_node_key(self): # we only create the key once. On all subsequent runs, we re-use the # existing key def _make_key(): private_key, _ = ed25519.create_signing_keypair() # Config values are always unicode: return str(ed25519.string_from_signing_key(private_key) + b"\n", "utf-8") private_key_str = self.config.get_or_create_private_config( "node.privkey", _make_key).encode("utf-8") private_key, public_key = ed25519.signing_keypair_from_string(private_key_str) public_key_str = ed25519.string_from_verifying_key(public_key) self.config.write_config_file("node.pubkey", public_key_str + b"\n", "wb") self._node_private_key = private_key self._node_public_key = public_key def get_long_nodeid(self): # this matches what IServer.get_longname() says about us elsewhere vk_string = ed25519.string_from_verifying_key(self._node_public_key) return remove_prefix(vk_string, b"pub-") def get_long_tubid(self): return idlib.nodeid_b2a(self.nodeid) def get_web_service(self): """ :return: a reference to our web server """ return self.getServiceNamed("webish") def _init_permutation_seed(self, ss): seed = self.config.get_config_from_file("permutation-seed") if not seed: have_shares = ss.have_shares() if have_shares: # if the server has shares but not a recorded # permutation-seed, then it has been around since pre-#466 # days, and the clients who uploaded those shares used our # TubID as a permutation-seed. We should keep using that same # seed to keep the shares in the same place in the permuted # ring, so those clients don't have to perform excessive # searches. seed = base32.b2a(self.nodeid) else: # otherwise, we're free to use the more natural seed of our # pubkey-based serverid vk_string = ed25519.string_from_verifying_key(self._node_public_key) vk_bytes = remove_prefix(vk_string, ed25519.PUBLIC_KEY_PREFIX) seed = base32.b2a(vk_bytes) self.config.write_config_file("permutation-seed", seed+b"\n", mode="wb") return seed.strip() def get_anonymous_storage_server(self): """ Get the anonymous ``IStorageServer`` implementation for this node. Note this will return an object even if storage is disabled on this node (but the object will not be exposed, peers will not be able to access it, and storage will remain disabled). The one and only instance for this node is always returned. It is created first if necessary. """ try: ss = self.getServiceNamed(StorageServer.name) except KeyError: pass else: return ss readonly = self.config.get_config("storage", "readonly", False, boolean=True) config_storedir = self.get_config( "storage", "storage_dir", self.STOREDIR, ) storedir = self.config.get_config_path(config_storedir) data = self.config.get_config("storage", "reserved_space", None) try: reserved = parse_abbreviated_size(data) except ValueError: log.msg("[storage]reserved_space= contains unparseable value %s" % data) raise if reserved is None: reserved = 0 discard = self.config.get_config("storage", "debug_discard", False, boolean=True) expire = self.config.get_config("storage", "expire.enabled", False, boolean=True) if expire: mode = self.config.get_config("storage", "expire.mode") # require a mode else: mode = self.config.get_config("storage", "expire.mode", "age") o_l_d = self.config.get_config("storage", "expire.override_lease_duration", None) if o_l_d is not None: o_l_d = parse_duration(o_l_d) cutoff_date = None if mode == "cutoff-date": cutoff_date = self.config.get_config("storage", "expire.cutoff_date") cutoff_date = parse_date(cutoff_date) sharetypes = [] if self.config.get_config("storage", "expire.immutable", True, boolean=True): sharetypes.append("immutable") if self.config.get_config("storage", "expire.mutable", True, boolean=True): sharetypes.append("mutable") expiration_sharetypes = tuple(sharetypes) ss = StorageServer( storedir, self.nodeid, reserved_space=reserved, discard_storage=discard, readonly_storage=readonly, stats_provider=self.stats_provider, expiration_enabled=expire, expiration_mode=mode, expiration_override_lease_duration=o_l_d, expiration_cutoff_date=cutoff_date, expiration_sharetypes=expiration_sharetypes, ) ss.setServiceParent(self) return ss def init_storage(self, announceable_storage_servers): # should we run a storage server (and publish it for others to use)? if not storage_enabled(self.config): return if not self._is_tub_listening(): raise ValueError("config error: storage is enabled, but tub " "is not listening ('tub.port=' is empty)") ss = self.get_anonymous_storage_server() announcement = { "permutation-seed-base32": self._init_permutation_seed(ss), } if anonymous_storage_enabled(self.config): furl_file = self.config.get_private_path("storage.furl").encode(get_filesystem_encoding()) furl = self.tub.registerReference(FoolscapStorageServer(ss), furlFile=furl_file) (_, _, swissnum) = decode_furl(furl) if hasattr(self.tub.negotiationClass, "add_storage_server"): nurls = self.tub.negotiationClass.add_storage_server(ss, swissnum.encode("ascii")) self.storage_nurls = nurls # There is code in e.g. storage_client.py that checks if an # announcement has changed. Since NURL order isn't meaningful, # we don't want a change in the order to count as a change, so we # send the NURLs as a set. CBOR supports sets, as does Foolscap. announcement[storage_client.ANONYMOUS_STORAGE_NURLS] = {n.to_text() for n in nurls} announcement["anonymous-storage-FURL"] = furl enabled_storage_servers = self._enable_storage_servers( announceable_storage_servers, ) storage_options = list( storage_server.announcement for storage_server in enabled_storage_servers ) plugins_announcement = {} if storage_options: # Only add the new key if there are any plugins enabled. plugins_announcement[u"storage-options"] = storage_options announcement.update(plugins_announcement) if self.config.get_config("storage", "grid_management", default=False, boolean=True): grid_manager_certificates = self.config.get_grid_manager_certificates() announcement[u"grid-manager-certificates"] = grid_manager_certificates # Note: certificates are not verified for validity here, but # that may be useful. See: # https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3977 for ic in self.introducer_clients: ic.publish("storage", announcement, self._node_private_key) def get_client_storage_plugin_web_resources(self): """ Get all of the client-side ``IResource`` implementations provided by enabled storage plugins. :return dict[bytes, IResource provider]: The implementations. """ return self.storage_broker.get_client_storage_plugin_web_resources( self.config, ) def _enable_storage_servers(self, announceable_storage_servers): """ Register and announce the given storage servers. """ for announceable in announceable_storage_servers: yield self._enable_storage_server(announceable) def _enable_storage_server(self, announceable_storage_server): """ Register a storage server. """ config_key = "storage-plugin.{}.furl".format( # Oops, why don't I have a better handle on this value? announceable_storage_server.announcement[u"name"], ) furl = _register_reference( config_key, self.config, self.tub, announceable_storage_server.storage_server, ) announceable_storage_server = _add_to_announcement( {u"storage-server-FURL": furl}, announceable_storage_server, ) return announceable_storage_server def init_client(self): helper_furl = self.config.get_config("client", "helper.furl", None) if helper_furl in ("None", ""): helper_furl = None DEP = self.encoding_params DEP["k"] = int(self.config.get_config("client", "shares.needed", DEP["k"])) DEP["n"] = int(self.config.get_config("client", "shares.total", DEP["n"])) DEP["happy"] = int(self.config.get_config("client", "shares.happy", DEP["happy"])) # At the moment this is only used for testing, thus the janky config # attribute name. DEP["max_segment_size"] = int(self.config.get_config( "client", "shares._max_immutable_segment_size_for_testing", DEP["max_segment_size"]) ) # for the CLI to authenticate to local JSON endpoints self._create_auth_token() self.history = History(self.stats_provider) self.terminator = Terminator() self.terminator.setServiceParent(self) uploader = Uploader( helper_furl, self.stats_provider, self.history, ) uploader.setServiceParent(self) self.init_blacklist() self.init_nodemaker() def get_auth_token(self): """ This returns a local authentication token, which is just some random data in "api_auth_token" which must be echoed to API calls. """ return self.config.get_private_config( 'api_auth_token').encode("ascii") def _create_auth_token(self): """ Creates new auth-token data written to 'private/api_auth_token'. This is intentionally re-created every time the node starts. """ self.config.write_private_config( 'api_auth_token', urlsafe_b64encode(os.urandom(32)) + b'\n', ) def get_storage_broker(self): return self.storage_broker def load_static_servers(self): """ Load the servers.yaml file if it exists, and provide the static server data to the StorageFarmBroker. """ fn = self.config.get_private_path("servers.yaml") servers_filepath = FilePath(fn) try: with servers_filepath.open() as f: servers_yaml = yamlutil.safe_load(f) static_servers = servers_yaml.get("storage", {}) log.msg("found %d static servers in private/servers.yaml" % len(static_servers)) static_servers = { ensure_text(key): value for (key, value) in static_servers.items() } self.storage_broker.set_static_servers(static_servers) except EnvironmentError: pass def init_blacklist(self): fn = self.config.get_config_path("access.blacklist") self.blacklist = Blacklist(fn) def init_nodemaker(self): default = self.config.get_config("client", "mutable.format", default="SDMF") if default.upper() == "MDMF": self.mutable_file_default = MDMF_VERSION else: self.mutable_file_default = SDMF_VERSION self.nodemaker = NodeMaker(self.storage_broker, self._secret_holder, self.get_history(), self.getServiceNamed("uploader"), self.terminator, self.get_encoding_parameters(), self.mutable_file_default, self._key_generator, self.blacklist) def get_history(self): return self.history def init_helper(self): self.helper = Helper(self.config.get_config_path("helper"), self.storage_broker, self._secret_holder, self.stats_provider, self.history) # TODO: this is confusing. BASEDIR/private/helper.furl is created by # the helper. BASEDIR/helper.furl is consumed by the client who wants # to use the helper. I like having the filename be the same, since # that makes 'cp' work smoothly, but the difference between config # inputs and generated outputs is hard to see. helper_furlfile = self.config.get_private_path("helper.furl").encode(get_filesystem_encoding()) self.tub.registerReference(self.helper, furlFile=helper_furlfile) def _get_tempdir(self): """ Determine the path to the directory where temporary files for this node should be written. :return bytes: The path which will exist and be a directory. """ tempdir_config = self.config.get_config("node", "tempdir", "tmp") if isinstance(tempdir_config, bytes): tempdir_config = tempdir_config.decode('utf-8') tempdir = self.config.get_config_path(tempdir_config) if not os.path.exists(tempdir): fileutil.make_dirs(tempdir) return tempdir def init_web(self, webport): self.log("init_web(webport=%s)", args=(webport,)) from allmydata.webish import WebishServer, anonymous_tempfile_factory nodeurl_path = self.config.get_config_path("node.url") staticdir_config = self.config.get_config("node", "web.static", "public_html") staticdir = self.config.get_config_path(staticdir_config) ws = WebishServer( self, webport, anonymous_tempfile_factory(self._get_tempdir()), nodeurl_path, staticdir, ) ws.setServiceParent(self) def init_sftp_server(self): if self.config.get_config("sftpd", "enabled", False, boolean=True): accountfile = self.config.get_config("sftpd", "accounts.file", None) if accountfile: accountfile = self.config.get_config_path(accountfile) sftp_portstr = self.config.get_config("sftpd", "port", "tcp:8022") pubkey_file = self.config.get_config("sftpd", "host_pubkey_file") privkey_file = self.config.get_config("sftpd", "host_privkey_file") from allmydata.frontends import sftpd s = sftpd.SFTPServer(self, accountfile, sftp_portstr, pubkey_file, privkey_file) s.setServiceParent(self) def _check_exit_trigger(self, exit_trigger_file): if os.path.exists(exit_trigger_file): mtime = os.stat(exit_trigger_file)[stat.ST_MTIME] if mtime > time.time() - 120.0: return else: self.log("%s file too old, shutting down" % (self.EXIT_TRIGGER_FILE,)) else: self.log("%s file missing, shutting down" % (self.EXIT_TRIGGER_FILE,)) reactor.stop() def get_encoding_parameters(self): return self.encoding_params def introducer_connection_statuses(self): return [ic.connection_status() for ic in self.introducer_clients] def connected_to_introducer(self): return any([ic.connected_to_introducer() for ic in self.introducer_clients]) def get_renewal_secret(self): # this will go away return self._secret_holder.get_renewal_secret() def get_cancel_secret(self): return self._secret_holder.get_cancel_secret() def debug_wait_for_client_connections(self, num_clients): """Return a Deferred that fires (with None) when we have connections to the given number of peers. Useful for tests that set up a temporary test network and need to know when it is safe to proceed with an upload or download.""" def _check(): return len(self.storage_broker.get_connected_servers()) >= num_clients d = self.poll(_check, 0.5) d.addCallback(lambda res: None) return d # these four methods are the primitives for creating filenodes and # dirnodes. The first takes a URI and produces a filenode or (new-style) # dirnode. The other three create brand-new filenodes/dirnodes. def create_node_from_uri(self, write_uri, read_uri=None, deep_immutable=False, name=""): # This returns synchronously. # Note that it does *not* validate the write_uri and read_uri; instead we # may get an opaque node if there were any problems. return self.nodemaker.create_from_cap(write_uri, read_uri, deep_immutable=deep_immutable, name=name) def create_dirnode( self, initial_children: dict | None = None, version: int | None = None, *, unique_keypair: tuple[rsa.PublicKey, rsa.PrivateKey] | None = None ) -> DirectoryNode: """ Create a new directory. :param initial_children: If given, a structured dict representing the initial content of the created directory. See `docs/frontends/webapi.rst` for examples. :param version: If given, an int representing the mutable file format of the new object. Acceptable values are currently `SDMF_VERSION` or `MDMF_VERSION` (corresponding to 0 or 1, respectively, as defined in `allmydata.interfaces`). If no such value is provided, the default mutable format will be used (currently SDMF). :param unique_keypair: an optional tuple containing the RSA public and private key to be used for the new directory. Typically, this value is omitted (in which case a new random keypair will be generated at creation time). **Warning** This value independently determines the identity of the mutable object to create. There cannot be two different mutable objects that share a keypair. They will merge into one object (with undefined contents). :return: A Deferred which will fire with a representation of the new directory after it has been created. """ d = self.nodemaker.create_new_mutable_directory( initial_children, version=version, keypair=unique_keypair, ) return d def create_immutable_dirnode(self, children, convergence=None): return self.nodemaker.create_immutable_directory(children, convergence) def create_mutable_file( self, contents: bytes | None = None, version: int | None = None, *, unique_keypair: tuple[rsa.PublicKey, rsa.PrivateKey] | None = None, ) -> MutableFileNode: """ Create *and upload* a new mutable object. :param contents: If given, the initial contents for the new object. :param version: If given, the mutable file format for the new object (otherwise a format will be chosen automatically). :param unique_keypair: **Warning** This value independently determines the identity of the mutable object to create. There cannot be two different mutable objects that share a keypair. They will merge into one object (with undefined contents). It is common to pass a None value (or not pass a valuye) for this parameter. In these cases, a new random keypair will be generated. If non-None, the given public/private keypair will be used for the new object. The expected use-case is for implementing compliance tests. :return: A Deferred which will fire with a representation of the new mutable object after it has been uploaded. """ return self.nodemaker.create_mutable_file(contents, version=version, keypair=unique_keypair) def upload(self, uploadable, reactor=None): uploader = self.getServiceNamed("uploader") return uploader.upload(uploadable, reactor=reactor) tahoe_lafs-1.20.0/src/allmydata/codec.py0000644000000000000000000000612413615410400015010 0ustar00""" CRS encoding and decoding. Ported to Python 3. """ from zope.interface import implementer from allmydata.util import mathutil from allmydata.util.assertutil import precondition from allmydata.util.cputhreadpool import defer_to_thread from allmydata.util.deferredutil import async_to_deferred from allmydata.interfaces import ICodecEncoder, ICodecDecoder import zfec @implementer(ICodecEncoder) class CRSEncoder(object): ENCODER_TYPE = b"crs" def set_params(self, data_size, required_shares, max_shares): assert required_shares <= max_shares self.data_size = data_size self.required_shares = required_shares self.max_shares = max_shares self.share_size = mathutil.div_ceil(data_size, required_shares) self.last_share_padding = mathutil.pad_size(self.share_size, required_shares) self.encoder = zfec.Encoder(required_shares, max_shares) def get_encoder_type(self): return self.ENCODER_TYPE def get_params(self): return (self.data_size, self.required_shares, self.max_shares) def get_serialized_params(self): return b"%d-%d-%d" % (self.data_size, self.required_shares, self.max_shares) def get_block_size(self): return self.share_size @async_to_deferred async def encode(self, inshares, desired_share_ids=None): precondition(desired_share_ids is None or len(desired_share_ids) <= self.max_shares, desired_share_ids, self.max_shares) if desired_share_ids is None: desired_share_ids = list(range(self.max_shares)) for inshare in inshares: assert len(inshare) == self.share_size, (len(inshare), self.share_size, self.data_size, self.required_shares) shares = await defer_to_thread(self.encoder.encode, inshares, desired_share_ids) return (shares, desired_share_ids) def encode_proposal(self, data, desired_share_ids=None): raise NotImplementedError() @implementer(ICodecDecoder) class CRSDecoder(object): def set_params(self, data_size, required_shares, max_shares): self.data_size = data_size self.required_shares = required_shares self.max_shares = max_shares self.chunk_size = self.required_shares self.num_chunks = mathutil.div_ceil(self.data_size, self.chunk_size) self.share_size = self.num_chunks self.decoder = zfec.Decoder(self.required_shares, self.max_shares) def get_needed_shares(self): return self.required_shares @async_to_deferred async def decode(self, some_shares, their_shareids): precondition(len(some_shares) == len(their_shareids), len(some_shares), len(their_shareids)) precondition(len(some_shares) == self.required_shares, len(some_shares), self.required_shares) return await defer_to_thread( self.decoder.decode, some_shares, [int(s) for s in their_shareids] ) def parse_params(serializedparams): pieces = serializedparams.split(b"-") return int(pieces[0]), int(pieces[1]), int(pieces[2]) tahoe_lafs-1.20.0/src/allmydata/deep_stats.py0000644000000000000000000001146513615410400016072 0ustar00"""Implementation of the deep stats class. Ported to Python 3. """ import math from allmydata.interfaces import IImmutableFileNode from allmydata.interfaces import IMutableFileNode from allmydata.interfaces import IDirectoryNode from allmydata.unknown import UnknownNode from allmydata.uri import LiteralFileURI from allmydata.uri import from_string from allmydata.util import mathutil class DeepStats(object): """Deep stats object. Holds results of the deep-stats operation. Used for json generation in the API.""" # Json API version. # Rules: # - increment each time a field is removed or changes meaning. # - it's ok to add a new field without incrementing the version. API_VERSION = 1 def __init__(self, origin): """Initializes DeepStats object. Sets most of the fields to 0.""" self.monitor = None self.origin = origin self.stats = { 'api-version': self.API_VERSION } for k in ["count-immutable-files", "count-mutable-files", "count-literal-files", "count-files", "count-directories", "count-unknown", "size-immutable-files", #"size-mutable-files", "size-literal-files", "size-directories", "largest-directory", "largest-directory-children", "largest-immutable-file", #"largest-mutable-file", ]: self.stats[k] = 0 self.histograms = {} for k in ["size-files-histogram"]: self.histograms[k] = {} # maps (min,max) to count self.buckets = [(0, 0), (1, 3)] self.root = math.sqrt(10) def set_monitor(self, monitor): """Sets a new monitor.""" self.monitor = monitor monitor.origin_si = self.origin.get_storage_index() monitor.set_status(self.get_results()) def add_node(self, node, childpath): """Adds a node's stats to calculation.""" if isinstance(node, UnknownNode): self.add("count-unknown") elif IDirectoryNode.providedBy(node): self.add("count-directories") elif IMutableFileNode.providedBy(node): self.add("count-files") self.add("count-mutable-files") # TODO: update the servermap, compute a size, add it to # size-mutable-files, max it into "largest-mutable-file" elif IImmutableFileNode.providedBy(node): # CHK and LIT self.add("count-files") size = node.get_size() self.histogram("size-files-histogram", size) theuri = from_string(node.get_uri()) if isinstance(theuri, LiteralFileURI): self.add("count-literal-files") self.add("size-literal-files", size) else: self.add("count-immutable-files") self.add("size-immutable-files", size) self.max("largest-immutable-file", size) def enter_directory(self, parent, children): """Adds directory stats.""" dirsize_bytes = parent.get_size() if dirsize_bytes is not None: self.add("size-directories", dirsize_bytes) self.max("largest-directory", dirsize_bytes) dirsize_children = len(children) self.max("largest-directory-children", dirsize_children) def add(self, key, value=1): self.stats[key] += value def max(self, key, value): self.stats[key] = max(self.stats[key], value) def which_bucket(self, size): # return (min,max) such that min <= size <= max # values are from the set (0,0), (1,3), (4,10), (11,31), (32,100), # (101,316), (317, 1000), etc: two per decade assert size >= 0 i = 0 while True: if i >= len(self.buckets): # extend the list new_lower = self.buckets[i-1][1]+1 new_upper = int(mathutil.next_power_of_k(new_lower, self.root)) self.buckets.append((new_lower, new_upper)) maybe = self.buckets[i] if maybe[0] <= size <= maybe[1]: return maybe i += 1 def histogram(self, key, size): bucket = self.which_bucket(size) h = self.histograms[key] if bucket not in h: h[bucket] = 0 h[bucket] += 1 def get_results(self): """Returns deep-stats results.""" stats = self.stats.copy() for key in self.histograms: h = self.histograms[key] out = [ (bucket[0], bucket[1], h[bucket]) for bucket in h ] out.sort() stats[key] = out return stats def finish(self): """Finishes gathering stats.""" return self.get_results() tahoe_lafs-1.20.0/src/allmydata/dirnode.py0000644000000000000000000011122113615410400015352 0ustar00"""Directory Node implementation. Ported to Python 3. """ import time from zope.interface import implementer from twisted.internet import defer from foolscap.api import fireEventually from allmydata.crypto import aes from allmydata.deep_stats import DeepStats from allmydata.mutable.common import NotWriteableError from allmydata.mutable.filenode import MutableFileNode from allmydata.unknown import UnknownNode, strip_prefix_for_ro from allmydata.interfaces import IFilesystemNode, IDirectoryNode, IFileNode, \ ExistingChildError, NoSuchChildError, ICheckable, IDeepCheckable, \ MustBeDeepImmutableError, CapConstraintError, ChildOfWrongTypeError from allmydata.check_results import DeepCheckResults, \ DeepCheckAndRepairResults from allmydata.monitor import Monitor from allmydata.util import hashutil, base32, log, jsonbytes as json from allmydata.util.encodingutil import quote_output, normalize from allmydata.util.assertutil import precondition from allmydata.util.netstring import netstring, split_netstring from allmydata.util.consumer import download_to_data from allmydata.uri import wrap_dirnode_cap from allmydata.util.dictutil import AuxValueDict from eliot import ( ActionType, Field, ) from eliot.twisted import ( DeferredContext, ) NAME = Field.for_types( "name", [str], "The name linking the parent to this node.", ) METADATA = Field.for_types( "metadata", [dict], "Data about a node.", ) OVERWRITE = Field.for_types( "overwrite", [bool], "True to replace an existing file of the same name, " "false to fail with a collision error.", ) ADD_FILE = ActionType( "dirnode:add-file", [NAME, METADATA, OVERWRITE], [], "Add a new file as a child of a directory.", ) class _OnlyFiles(object): """Marker for replacement option of only replacing files.""" ONLY_FILES = _OnlyFiles() def update_metadata(metadata, new_metadata, now): """Updates 'metadata' in-place with the information in 'new_metadata'. Timestamps are set according to the time 'now'. """ if metadata is None: metadata = {} old_ctime = None if 'ctime' in metadata: old_ctime = metadata['ctime'] if new_metadata is not None: # Overwrite all metadata. newmd = new_metadata.copy() # Except 'tahoe'. if 'tahoe' in newmd: del newmd['tahoe'] if 'tahoe' in metadata: newmd['tahoe'] = metadata['tahoe'] metadata = newmd # update timestamps sysmd = metadata.get('tahoe', {}) if 'linkcrtime' not in sysmd: # In Tahoe < 1.4.0 we used the word 'ctime' to mean what Tahoe >= 1.4.0 # calls 'linkcrtime'. This field is only used if it was in the old metadata, # and 'tahoe:linkcrtime' was not. if old_ctime is not None: sysmd['linkcrtime'] = old_ctime else: sysmd['linkcrtime'] = now sysmd['linkmotime'] = now metadata['tahoe'] = sysmd return metadata # TODO: {Deleter,MetadataSetter,Adder}.modify all start by unpacking the # contents and end by repacking them. It might be better to apply them to # the unpacked contents. class Deleter(object): def __init__(self, node, namex, must_exist=True, must_be_directory=False, must_be_file=False): self.node = node self.name = normalize(namex) self.must_exist = must_exist self.must_be_directory = must_be_directory self.must_be_file = must_be_file def modify(self, old_contents, servermap, first_time): children = self.node._unpack_contents(old_contents) if self.name not in children: if first_time and self.must_exist: raise NoSuchChildError(self.name) self.old_child = None return None self.old_child, metadata = children[self.name] # Unknown children can be removed regardless of must_be_directory or must_be_file. if self.must_be_directory and IFileNode.providedBy(self.old_child): raise ChildOfWrongTypeError("delete required a directory, not a file") if self.must_be_file and IDirectoryNode.providedBy(self.old_child): raise ChildOfWrongTypeError("delete required a file, not a directory") del children[self.name] new_contents = self.node._pack_contents(children) return new_contents class MetadataSetter(object): def __init__(self, node, namex, metadata, create_readonly_node=None): self.node = node self.name = normalize(namex) self.metadata = metadata self.create_readonly_node = create_readonly_node def modify(self, old_contents, servermap, first_time): children = self.node._unpack_contents(old_contents) name = self.name if name not in children: raise NoSuchChildError(name) now = time.time() child = children[name][0] metadata = update_metadata(children[name][1].copy(), self.metadata, now) if self.create_readonly_node and metadata.get('no-write', False): child = self.create_readonly_node(child, name) children[name] = (child, metadata) new_contents = self.node._pack_contents(children) return new_contents class Adder(object): def __init__(self, node, entries=None, overwrite=True, create_readonly_node=None): """ :param overwrite: Either True (allow overwriting anything existing), False (don't allow overwriting), or ONLY_FILES (only files can be overwritten). """ self.node = node if entries is None: entries = {} precondition(isinstance(entries, dict), entries) precondition(overwrite in (True, False, ONLY_FILES), overwrite) # keys of 'entries' may not be normalized. self.entries = entries self.overwrite = overwrite self.create_readonly_node = create_readonly_node def set_node(self, namex, node, metadata): precondition(IFilesystemNode.providedBy(node), node) self.entries[namex] = (node, metadata) def modify(self, old_contents, servermap, first_time): children = self.node._unpack_contents(old_contents) now = time.time() for (namex, (child, new_metadata)) in list(self.entries.items()): name = normalize(namex) precondition(IFilesystemNode.providedBy(child), child) # Strictly speaking this is redundant because we would raise the # error again in _pack_normalized_children. child.raise_error() metadata = None if name in children: if not self.overwrite: raise ExistingChildError("child %s already exists" % quote_output(name, encoding='utf-8')) if self.overwrite == ONLY_FILES and IDirectoryNode.providedBy(children[name][0]): raise ExistingChildError("child %s already exists as a directory" % quote_output(name, encoding='utf-8')) metadata = children[name][1].copy() metadata = update_metadata(metadata, new_metadata, now) if self.create_readonly_node and metadata.get('no-write', False): child = self.create_readonly_node(child, name) children[name] = (child, metadata) new_contents = self.node._pack_contents(children) return new_contents def _encrypt_rw_uri(writekey, rw_uri): precondition(isinstance(rw_uri, bytes), rw_uri) precondition(isinstance(writekey, bytes), writekey) salt = hashutil.mutable_rwcap_salt_hash(rw_uri) key = hashutil.mutable_rwcap_key_hash(salt, writekey) encryptor = aes.create_encryptor(key) crypttext = aes.encrypt_data(encryptor, rw_uri) mac = hashutil.hmac(key, salt + crypttext) assert len(mac) == 32 return salt + crypttext + mac # The MAC is not checked by readers in Tahoe >= 1.3.0, but we still # produce it for the sake of older readers. def pack_children(childrenx, writekey, deep_immutable=False): # initial_children must have metadata (i.e. {} instead of None) children = {} for (namex, (node, metadata)) in list(childrenx.items()): precondition(isinstance(metadata, dict), "directory creation requires metadata to be a dict, not None", metadata) children[normalize(namex)] = (node, metadata) return _pack_normalized_children(children, writekey=writekey, deep_immutable=deep_immutable) ZERO_LEN_NETSTR=netstring(b'') def _pack_normalized_children(children, writekey, deep_immutable=False): """Take a dict that maps: children[unicode_nfc_name] = (IFileSystemNode, metadata_dict) and pack it into a single string, for use as the contents of the backing file. This is the same format as is returned by _unpack_contents. I also accept an AuxValueDict, in which case I'll use the auxilliary cached data as the pre-packed entry, which is faster than re-packing everything each time. If writekey is provided then I will superencrypt the child's writecap with writekey. If deep_immutable is True, I will require that all my children are deeply immutable, and will raise a MustBeDeepImmutableError if not. """ precondition((writekey is None) or isinstance(writekey, bytes), writekey) has_aux = isinstance(children, AuxValueDict) entries = [] for name in sorted(children.keys()): assert isinstance(name, str) entry = None (child, metadata) = children[name] child.raise_error() if deep_immutable and not child.is_allowed_in_immutable_directory(): raise MustBeDeepImmutableError( "child %r is not allowed in an immutable directory" % (name,), name) if has_aux: entry = children.get_aux(name) if not entry: assert IFilesystemNode.providedBy(child), (name,child) assert isinstance(metadata, dict) rw_uri = child.get_write_uri() if rw_uri is None: rw_uri = b"" assert isinstance(rw_uri, bytes), rw_uri # should be prevented by MustBeDeepImmutableError check above assert not (rw_uri and deep_immutable) ro_uri = child.get_readonly_uri() if ro_uri is None: ro_uri = b"" assert isinstance(ro_uri, bytes), ro_uri if writekey is not None: writecap = netstring(_encrypt_rw_uri(writekey, rw_uri)) else: writecap = ZERO_LEN_NETSTR entry = b"".join([netstring(name.encode("utf-8")), netstring(strip_prefix_for_ro(ro_uri, deep_immutable)), writecap, netstring(json.dumps(metadata).encode("utf-8"))]) entries.append(netstring(entry)) return b"".join(entries) @implementer(IDirectoryNode, ICheckable, IDeepCheckable) class DirectoryNode(object): filenode_class = MutableFileNode def __init__(self, filenode, nodemaker, uploader): assert IFileNode.providedBy(filenode), filenode assert not IDirectoryNode.providedBy(filenode), filenode self._node = filenode filenode_cap = filenode.get_cap() self._uri = wrap_dirnode_cap(filenode_cap) self._nodemaker = nodemaker self._uploader = uploader def __repr__(self): return "<%s %s-%s %s>" % (self.__class__.__name__, self.is_readonly() and "RO" or "RW", self.is_mutable() and "MUT" or "IMM", hasattr(self, '_uri') and str(self._uri.abbrev(), "utf-8")) def get_size(self): """Return the size of our backing mutable file, in bytes, if we've fetched it. Otherwise return None. This returns synchronously.""" return self._node.get_size() def get_current_size(self): """Calculate the size of our backing mutable file, in bytes. Returns a Deferred that fires with the result.""" return self._node.get_current_size() def _read(self): if self._node.is_mutable(): # use the IMutableFileNode API. d = self._node.download_best_version() else: d = download_to_data(self._node) d.addCallback(self._unpack_contents) return d def _decrypt_rwcapdata(self, encwrcap): salt = encwrcap[:16] crypttext = encwrcap[16:-32] key = hashutil.mutable_rwcap_key_hash(salt, self._node.get_writekey()) encryptor = aes.create_decryptor(key) plaintext = aes.decrypt_data(encryptor, crypttext) return plaintext def _create_and_validate_node(self, rw_uri, ro_uri, name): # name is just for error reporting node = self._nodemaker.create_from_cap(rw_uri, ro_uri, deep_immutable=not self.is_mutable(), name=name) node.raise_error() return node def _create_readonly_node(self, node, name): # name is just for error reporting if not node.is_unknown() and node.is_readonly(): return node return self._create_and_validate_node(None, node.get_readonly_uri(), name=name) def _unpack_contents(self, data): # the directory is serialized as a list of netstrings, one per child. # Each child is serialized as a list of four netstrings: (name, ro_uri, # rwcapdata, metadata), in which the name, ro_uri, metadata are in # cleartext. The 'name' is UTF-8 encoded, and should be normalized to NFC. # The rwcapdata is formatted as: # pack("16ss32s", iv, AES(H(writekey+iv), plaintext_rw_uri), mac) assert isinstance(data, bytes), (repr(data), type(data)) # an empty directory is serialized as an empty string if data == b"": return AuxValueDict() writeable = not self.is_readonly() mutable = self.is_mutable() children = AuxValueDict() position = 0 while position < len(data): entries, position = split_netstring(data, 1, position) entry = entries[0] (namex_utf8, ro_uri, rwcapdata, metadata_s), subpos = split_netstring(entry, 4) if not mutable and len(rwcapdata) > 0: raise ValueError("the rwcapdata field of a dirnode in an immutable directory was not empty") # A name containing characters that are unassigned in one version of Unicode might # not be normalized wrt a later version. See the note in section 'Normalization Stability' # at . # Therefore we normalize names going both in and out of directories. name = normalize(namex_utf8.decode("utf-8")) rw_uri = b"" if writeable: rw_uri = self._decrypt_rwcapdata(rwcapdata) # Since the encryption uses CTR mode, it currently leaks the length of the # plaintext rw_uri -- and therefore whether it is present, i.e. whether the # dirnode is writeable (ticket #925). By stripping trailing spaces in # Tahoe >= 1.6.0, we may make it easier for future versions to plug this leak. # ro_uri is treated in the same way for consistency. # rw_uri and ro_uri will be either None or a non-empty string. rw_uri = rw_uri.rstrip(b' ') or None ro_uri = ro_uri.rstrip(b' ') or None try: child = self._create_and_validate_node(rw_uri, ro_uri, name) if mutable or child.is_allowed_in_immutable_directory(): metadata = json.loads(metadata_s) assert isinstance(metadata, dict) children[name] = (child, metadata) children.set_with_aux(name, (child, metadata), auxilliary=entry) else: log.msg(format="mutable cap for child %(name)s unpacked from an immutable directory", name=quote_output(name, encoding='utf-8'), facility="tahoe.webish", level=log.UNUSUAL) except CapConstraintError as e: log.msg(format="unmet constraint on cap for child %(name)s unpacked from a directory:\n" "%(message)s", message=e.args[0], name=quote_output(name, encoding='utf-8'), facility="tahoe.webish", level=log.UNUSUAL) return children def _pack_contents(self, children): # expects children in the same format as _unpack_contents returns return _pack_normalized_children(children, self._node.get_writekey()) def is_readonly(self): return self._node.is_readonly() def is_mutable(self): return self._node.is_mutable() def is_unknown(self): return False def is_allowed_in_immutable_directory(self): return not self._node.is_mutable() def raise_error(self): pass def get_uri(self): return self._uri.to_string() def get_write_uri(self): if self.is_readonly(): return None return self._uri.to_string() def get_readonly_uri(self): return self._uri.get_readonly().to_string() def get_cap(self): return self._uri def get_readcap(self): return self._uri.get_readonly() def get_verify_cap(self): return self._uri.get_verify_cap() def get_repair_cap(self): if self._node.is_readonly(): return None # readonly (mutable) dirnodes are not yet repairable return self._uri def get_storage_index(self): return self._uri.get_storage_index() def check(self, monitor, verify=False, add_lease=False): """Perform a file check. See IChecker.check for details.""" return self._node.check(monitor, verify, add_lease) def check_and_repair(self, monitor, verify=False, add_lease=False): return self._node.check_and_repair(monitor, verify, add_lease) def list(self): """I return a Deferred that fires with a dictionary mapping child name to a tuple of (IFilesystemNode, metadata).""" return self._read() def has_child(self, namex): """I return a Deferred that fires with a boolean, True if there exists a child of the given name, False if not.""" name = normalize(namex) d = self._read() d.addCallback(lambda children: name in children) return d def _get(self, children, name): child = children.get(name) if child is None: raise NoSuchChildError(name) return child[0] def _get_with_metadata(self, children, name): child = children.get(name) if child is None: raise NoSuchChildError(name) return child def get(self, namex): """I return a Deferred that fires with the named child node, which is an IFilesystemNode.""" name = normalize(namex) d = self._read() d.addCallback(self._get, name) return d def get_child_and_metadata(self, namex): """I return a Deferred that fires with the (node, metadata) pair for the named child. The node is an IFilesystemNode, and the metadata is a dictionary.""" name = normalize(namex) d = self._read() d.addCallback(self._get_with_metadata, name) return d def get_metadata_for(self, namex): name = normalize(namex) d = self._read() d.addCallback(lambda children: children[name][1]) return d def set_metadata_for(self, namex, metadata): name = normalize(namex) if self.is_readonly(): return defer.fail(NotWriteableError()) assert isinstance(metadata, dict) s = MetadataSetter(self, name, metadata, create_readonly_node=self._create_readonly_node) d = self._node.modify(s.modify) d.addCallback(lambda res: self) return d def get_child_at_path(self, pathx): """Transform a child path into an IFilesystemNode. I perform a recursive series of 'get' operations to find the named descendant node. I return a Deferred that fires with the node, or errbacks with IndexError if the node could not be found. The path can be either a single string (slash-separated) or a list of path-name elements. """ d = self.get_child_and_metadata_at_path(pathx) d.addCallback(lambda node_and_metadata: node_and_metadata[0]) return d def get_child_and_metadata_at_path(self, pathx): """Transform a child path into an IFilesystemNode and a metadata dictionary from the last edge that was traversed. """ if not pathx: return defer.succeed((self, {})) if isinstance(pathx, (list, tuple)): pass else: pathx = pathx.split("/") for p in pathx: assert isinstance(p, str), p childnamex = pathx[0] remaining_pathx = pathx[1:] if remaining_pathx: d = self.get(childnamex) d.addCallback(lambda node: node.get_child_and_metadata_at_path(remaining_pathx)) return d d = self.get_child_and_metadata(childnamex) return d def set_uri(self, namex, writecap, readcap=None, metadata=None, overwrite=True): precondition(isinstance(writecap, (bytes, type(None))), writecap) precondition(isinstance(readcap, (bytes, type(None))), readcap) # We now allow packing unknown nodes, provided they are valid # for this type of directory. child_node = self._create_and_validate_node(writecap, readcap, namex) d = self.set_node(namex, child_node, metadata, overwrite) d.addCallback(lambda res: child_node) return d def set_children(self, entries, overwrite=True): # this takes URIs a = Adder(self, overwrite=overwrite, create_readonly_node=self._create_readonly_node) for (namex, e) in entries.items(): assert isinstance(namex, str), namex if len(e) == 2: writecap, readcap = e metadata = None else: assert len(e) == 3 writecap, readcap, metadata = e precondition(isinstance(writecap, (bytes,type(None))), writecap) precondition(isinstance(readcap, (bytes,type(None))), readcap) # We now allow packing unknown nodes, provided they are valid # for this type of directory. child_node = self._create_and_validate_node(writecap, readcap, namex) a.set_node(namex, child_node, metadata) d = self._node.modify(a.modify) d.addCallback(lambda ign: self) return d def set_node(self, namex, child, metadata=None, overwrite=True): """I add a child at the specific name. I return a Deferred that fires when the operation finishes. This Deferred will fire with the child node that was just added. I will replace any existing child of the same name. If this directory node is read-only, the Deferred will errback with a NotWriteableError.""" precondition(IFilesystemNode.providedBy(child), child) if self.is_readonly(): return defer.fail(NotWriteableError()) assert IFilesystemNode.providedBy(child), child a = Adder(self, overwrite=overwrite, create_readonly_node=self._create_readonly_node) a.set_node(namex, child, metadata) d = self._node.modify(a.modify) d.addCallback(lambda res: child) return d def set_nodes(self, entries, overwrite=True): precondition(isinstance(entries, dict), entries) if self.is_readonly(): return defer.fail(NotWriteableError()) a = Adder(self, entries, overwrite=overwrite, create_readonly_node=self._create_readonly_node) d = self._node.modify(a.modify) d.addCallback(lambda res: self) return d def add_file(self, namex, uploadable, metadata=None, overwrite=True): """I upload a file (using the given IUploadable), then attach the resulting FileNode to the directory at the given name. I return a Deferred that fires (with the IFileNode of the uploaded file) when the operation completes.""" with ADD_FILE(name=namex, metadata=metadata, overwrite=overwrite).context(): name = normalize(namex) if self.is_readonly(): d = DeferredContext(defer.fail(NotWriteableError())) else: # XXX should pass reactor arg d = DeferredContext(self._uploader.upload(uploadable)) d.addCallback(lambda results: self._create_and_validate_node(results.get_uri(), None, name)) d.addCallback(lambda node: self.set_node(name, node, metadata, overwrite)) return d.addActionFinish() def delete(self, namex, must_exist=True, must_be_directory=False, must_be_file=False): """I remove the child at the specific name. I return a Deferred that fires (with the node just removed) when the operation finishes.""" if self.is_readonly(): return defer.fail(NotWriteableError()) deleter = Deleter(self, namex, must_exist=must_exist, must_be_directory=must_be_directory, must_be_file=must_be_file) d = self._node.modify(deleter.modify) d.addCallback(lambda res: deleter.old_child) return d # XXX: Too many arguments? Worthwhile to break into mutable/immutable? def create_subdirectory(self, namex, initial_children=None, overwrite=True, mutable=True, mutable_version=None, metadata=None): if initial_children is None: initial_children = {} name = normalize(namex) if self.is_readonly(): return defer.fail(NotWriteableError()) if mutable: if mutable_version: d = self._nodemaker.create_new_mutable_directory(initial_children, version=mutable_version) else: d = self._nodemaker.create_new_mutable_directory(initial_children) else: # mutable version doesn't make sense for immmutable directories. assert mutable_version is None d = self._nodemaker.create_immutable_directory(initial_children) def _created(child): entries = {name: (child, metadata)} a = Adder(self, entries, overwrite=overwrite, create_readonly_node=self._create_readonly_node) d = self._node.modify(a.modify) d.addCallback(lambda res: child) return d d.addCallback(_created) return d def move_child_to(self, current_child_namex, new_parent, new_child_namex=None, overwrite=True): """ I take one of my child links and move it to a new parent. The child link is referenced by name. In the new parent, the child link will live at 'new_child_namex', which defaults to 'current_child_namex'. I return a Deferred that fires when the operation finishes. 'new_child_namex' and 'current_child_namex' need not be normalized. The overwrite parameter may be True (overwrite any existing child), False (error if the new child link already exists), or ONLY_FILES (error if the new child link exists and points to a directory). """ if self.is_readonly() or new_parent.is_readonly(): return defer.fail(NotWriteableError()) current_child_name = normalize(current_child_namex) if new_child_namex is None: new_child_name = current_child_name else: new_child_name = normalize(new_child_namex) from_uri = self.get_write_uri() if new_parent.get_write_uri() == from_uri and new_child_name == current_child_name: # needed for correctness, otherwise we would delete the child return defer.succeed("redundant rename/relink") d = self.get_child_and_metadata(current_child_name) def _got_child(child_and_metadata): (child, metadata) = child_and_metadata return new_parent.set_node(new_child_name, child, metadata, overwrite=overwrite) d.addCallback(_got_child) d.addCallback(lambda child: self.delete(current_child_name)) return d def deep_traverse(self, walker): """Perform a recursive walk, using this dirnode as a root, notifying the 'walker' instance of everything I encounter. I call walker.enter_directory(parent, children) once for each dirnode I visit, immediately after retrieving the list of children. I pass in the parent dirnode and the dict of childname->(childnode,metadata). This function should *not* traverse the children: I will do that. enter_directory() is most useful for the deep-stats number that counts how large a directory is. I call walker.add_node(node, path) for each node (both files and directories) I can reach. Most work should be done here. I avoid loops by keeping track of verifier-caps and refusing to call walker.add_node() or traverse a node that I've seen before. This means that any file or directory will only be given to the walker once. If files or directories are referenced multiple times by a directory structure, this may appear to under-count or miss some of them. I return a Monitor which can be used to wait for the operation to finish, learn about its progress, or cancel the operation. """ # this is just a tree-walker, except that following each edge # requires a Deferred. We used to use a ConcurrencyLimiter to limit # fanout to 10 simultaneous operations, but the memory load of the # queued operations was excessive (in one case, with 330k dirnodes, # it caused the process to run into the 3.0GB-ish per-process 32bit # linux memory limit, and crashed). So we use a single big Deferred # chain, and do a strict depth-first traversal, one node at a time. # This can be slower, because we aren't pipelining directory reads, # but it brought the memory footprint down by roughly 50%. monitor = Monitor() walker.set_monitor(monitor) found = set([self.get_verify_cap()]) d = self._deep_traverse_dirnode(self, [], walker, monitor, found) d.addCallback(lambda ignored: walker.finish()) d.addBoth(monitor.finish) d.addErrback(lambda f: None) return monitor def _deep_traverse_dirnode(self, node, path, walker, monitor, found): # process this directory, then walk its children monitor.raise_if_cancelled() d = defer.maybeDeferred(walker.add_node, node, path) d.addCallback(lambda ignored: node.list()) d.addCallback(self._deep_traverse_dirnode_children, node, path, walker, monitor, found) return d def _deep_traverse_dirnode_children(self, children, parent, path, walker, monitor, found): monitor.raise_if_cancelled() d = defer.maybeDeferred(walker.enter_directory, parent, children) # we process file-like children first, so we can drop their FileNode # objects as quickly as possible. Tests suggest that a FileNode (held # in the client's nodecache) consumes about 2440 bytes. dirnodes (not # in the nodecache) seem to consume about 2000 bytes. dirkids = [] filekids = [] for name, (child, metadata) in sorted(children.items()): childpath = path + [name] if isinstance(child, UnknownNode): walker.add_node(child, childpath) continue verifier = child.get_verify_cap() # allow LIT files (for which verifier==None) to be processed if (verifier is not None) and (verifier in found): continue found.add(verifier) if IDirectoryNode.providedBy(child): dirkids.append( (child, childpath) ) else: filekids.append( (child, childpath) ) for i, (child, childpath) in enumerate(filekids): d.addCallback(lambda ignored, child=child, childpath=childpath: walker.add_node(child, childpath)) # to work around the Deferred tail-recursion problem # (specifically the defer.succeed flavor) requires us to avoid # doing more than 158 LIT files in a row. We insert a turn break # once every 100 files (LIT or CHK) to preserve some stack space # for other code. This is a different expression of the same # Twisted problem as in #237. if i % 100 == 99: d.addCallback(lambda ignored: fireEventually()) for (child, childpath) in dirkids: d.addCallback(lambda ignored, child=child, childpath=childpath: self._deep_traverse_dirnode(child, childpath, walker, monitor, found)) return d def build_manifest(self): """Return a Monitor, with a ['status'] that will be a list of (path, cap) tuples, for all nodes (directories and files) reachable from this one.""" walker = ManifestWalker(self) return self.deep_traverse(walker) def start_deep_stats(self): # Since deep_traverse tracks verifier caps, we avoid double-counting # children for which we've got both a write-cap and a read-cap return self.deep_traverse(DeepStats(self)) def start_deep_check(self, verify=False, add_lease=False): return self.deep_traverse(DeepChecker(self, verify, repair=False, add_lease=add_lease)) def start_deep_check_and_repair(self, verify=False, add_lease=False): return self.deep_traverse(DeepChecker(self, verify, repair=True, add_lease=add_lease)) class ManifestWalker(DeepStats): def __init__(self, origin): DeepStats.__init__(self, origin) self.manifest = [] self.storage_index_strings = set() self.verifycaps = set() def add_node(self, node, path): self.manifest.append( (tuple(path), node.get_uri()) ) si = node.get_storage_index() if si: self.storage_index_strings.add(base32.b2a(si)) v = node.get_verify_cap() if v: self.verifycaps.add(v.to_string()) return DeepStats.add_node(self, node, path) def get_results(self): stats = DeepStats.get_results(self) return {"manifest": self.manifest, "verifycaps": self.verifycaps, "storage-index": self.storage_index_strings, "stats": stats, } class DeepChecker(object): def __init__(self, root, verify, repair, add_lease): root_si = root.get_storage_index() if root_si: root_si_base32 = base32.b2a(root_si) else: root_si_base32 = "" self._lp = log.msg(format="deep-check starting (%(si)s)," " verify=%(verify)s, repair=%(repair)s", si=root_si_base32, verify=verify, repair=repair) self._verify = verify self._repair = repair self._add_lease = add_lease if repair: self._results = DeepCheckAndRepairResults(root_si) else: self._results = DeepCheckResults(root_si) self._stats = DeepStats(root) def set_monitor(self, monitor): self.monitor = monitor monitor.set_status(self._results) def add_node(self, node, childpath): if self._repair: d = node.check_and_repair(self.monitor, self._verify, self._add_lease) d.addCallback(self._results.add_check_and_repair, childpath) else: d = node.check(self.monitor, self._verify, self._add_lease) d.addCallback(self._results.add_check, childpath) d.addCallback(lambda ignored: self._stats.add_node(node, childpath)) return d def enter_directory(self, parent, children): return self._stats.enter_directory(parent, children) def finish(self): log.msg("deep-check done", parent=self._lp) self._results.update_stats(self._stats.get_results()) return self._results # use client.create_dirnode() to make one of these tahoe_lafs-1.20.0/src/allmydata/grid_manager.py0000644000000000000000000003614213615410400016355 0ustar00""" Functions and classes relating to the Grid Manager internal state """ import sys from datetime import ( datetime, timezone, ) from typing import ( Optional, Union, List, IO ) from twisted.python.filepath import FilePath from allmydata.crypto import ( ed25519, ) from allmydata.util import ( base32, jsonbytes as json, dictutil, ) from attrs import ( frozen, Factory, ) @frozen class SignedCertificate(object): """ A signed certificate. """ # A JSON-encoded, UTF-8-encoded certificate. certificate : bytes # The signature (although the signature is in base32 in "public", # this contains the decoded raw bytes -- not base32) signature : bytes @classmethod def load(cls, file_like): data = json.load(file_like) return cls( certificate=data["certificate"].encode("utf-8"), signature=base32.a2b(data["signature"].encode("ascii")), ) def marshal(self): """ :return dict: a json-able dict """ return dict( certificate=self.certificate, signature=base32.b2a(self.signature), ) @frozen class _GridManagerStorageServer(object): """ A Grid Manager's notion of a storage server """ name : str public_key : ed25519.Ed25519PublicKey certificates : list = Factory(list) # SignedCertificates def add_certificate(self, certificate): """ Add ``certificate`` """ self.certificates.append(certificate) def public_key_string(self) -> bytes: """ :returns: the public key as bytes. """ return ed25519.string_from_verifying_key(self.public_key) def marshal(self): """ :returns: a dict suitable for JSON representing this object """ return { u"public_key": self.public_key_string(), } @frozen class _GridManagerCertificate(object): """ Represents a single certificate for a single storage-server """ filename : str index : int expires : datetime public_key : ed25519.Ed25519PublicKey def create_grid_manager(): """ Create a new Grid Manager with a fresh keypair """ private_key, public_key = ed25519.create_signing_keypair() return _GridManager( ed25519.string_from_signing_key(private_key), {}, ) def current_datetime_with_zone(): """ :returns: a timezone-aware datetime object representing the current timestamp in UTC """ return datetime.now(timezone.utc) def _load_certificates_for(config_path: FilePath, name: str, gm_key=Optional[ed25519.Ed25519PublicKey]) -> List[_GridManagerCertificate]: """ Load any existing certificates for the given storage-server. :param FilePath config_path: the configuration location (or None for stdin) :param str name: the name of an existing storage-server :param ed25519.Ed25519PublicKey gm_key: an optional Grid Manager public key. If provided, certificates will be verified against it. :returns: list containing any known certificates (may be empty) :raises: ed25519.BadSignature if any certificate signature fails to verify """ cert_index = 0 cert_path = config_path.child('{}.cert.{}'.format(name, cert_index)) certificates = [] while cert_path.exists(): container = SignedCertificate.load(cert_path.open('r')) if gm_key is not None: validate_grid_manager_certificate(gm_key, container) cert_data = json.loads(container.certificate) if cert_data['version'] != 1: raise ValueError( "Unknown certificate version '{}' in '{}'".format( cert_data['version'], cert_path.path, ) ) certificates.append( _GridManagerCertificate( filename=cert_path.path, index=cert_index, expires=datetime.fromisoformat(cert_data['expires']), public_key=ed25519.verifying_key_from_string(cert_data['public_key'].encode('ascii')), ) ) cert_index += 1 cert_path = config_path.child('{}.cert.{}'.format(name, cert_index)) return certificates def load_grid_manager(config_path: Optional[FilePath]): """ Load a Grid Manager from existing configuration. :param FilePath config_path: the configuration location (or None for stdin) :returns: a GridManager instance :raises: ValueError if the confguration is invalid or IOError if expected files can't be opened. """ config_file: Union[IO[bytes], IO[str]] if config_path is None: config_file = sys.stdin else: # this might raise IOError or similar but caller must handle it config_file = config_path.child("config.json").open("r") with config_file: config = json.load(config_file) gm_version = config.get(u'grid_manager_config_version', None) if gm_version != 0: raise ValueError( "Missing or unknown version '{}' of Grid Manager config".format( gm_version ) ) if 'private_key' not in config: raise ValueError( "'private_key' required in config" ) private_key_bytes = config['private_key'].encode('ascii') try: private_key, public_key = ed25519.signing_keypair_from_string(private_key_bytes) except Exception as e: raise ValueError( "Invalid Grid Manager private_key: {}".format(e) ) storage_servers = dict() for name, srv_config in list(config.get(u'storage_servers', {}).items()): if 'public_key' not in srv_config: raise ValueError( "No 'public_key' for storage server '{}'".format(name) ) storage_servers[name] = _GridManagerStorageServer( name, ed25519.verifying_key_from_string(srv_config['public_key'].encode('ascii')), [] if config_path is None else _load_certificates_for(config_path, name, public_key), ) return _GridManager(private_key_bytes, storage_servers) class _GridManager(object): """ A Grid Manager's configuration. """ def __init__(self, private_key_bytes, storage_servers): self._storage_servers = dictutil.UnicodeKeyDict( {} if storage_servers is None else storage_servers ) assert isinstance(private_key_bytes, bytes) self._private_key_bytes = private_key_bytes self._private_key, self._public_key = ed25519.signing_keypair_from_string(self._private_key_bytes) self._version = 0 @property def storage_servers(self): return self._storage_servers def public_identity(self): """ :returns: public key as a string """ return ed25519.string_from_verifying_key(self._public_key) def sign(self, name, expiry): """ Create a new signed certificate for a particular server :param str name: the server to create a certificate for :param timedelta expiry: how far in the future the certificate should expire. :returns SignedCertificate: the signed certificate. """ assert isinstance(name, str) # must be unicode try: srv = self._storage_servers[name] except KeyError: raise KeyError( "No storage server named '{}'".format(name) ) expiration = current_datetime_with_zone() + expiry cert_info = { "expires": expiration.isoformat(), "public_key": srv.public_key_string(), "version": 1, } cert_data = json.dumps_bytes(cert_info, separators=(',',':'), sort_keys=True) sig = ed25519.sign_data(self._private_key, cert_data) certificate = SignedCertificate( certificate=cert_data, signature=sig, ) vk = ed25519.verifying_key_from_signing_key(self._private_key) ed25519.verify_signature(vk, sig, cert_data) srv.add_certificate(certificate) return certificate def add_storage_server(self, name, public_key): """ :param name: a user-meaningful name for the server :param public_key: ed25519.VerifyingKey the public-key of the storage provider (e.g. from the contents of node.pubkey for the client) """ assert isinstance(name, str) # must be unicode if name in self._storage_servers: raise KeyError( "Already have a storage server called '{}'".format(name) ) ss = _GridManagerStorageServer(name, public_key, []) self._storage_servers[name] = ss return ss def remove_storage_server(self, name): """ :param name: a user-meaningful name for the server """ assert isinstance(name, str) # must be unicode try: del self._storage_servers[name] except KeyError: raise KeyError( "No storage server called '{}'".format(name) ) def marshal(self): """ :returns: a dict suitable for JSON representing this object """ data = { u"grid_manager_config_version": self._version, u"private_key": self._private_key_bytes.decode('ascii'), } if self._storage_servers: data[u"storage_servers"] = { name: srv.marshal() for name, srv in self._storage_servers.items() } return data def save_grid_manager(file_path, grid_manager, create=True): """ Writes a Grid Manager configuration. :param file_path: a FilePath specifying where to write the config (if None, stdout is used) :param grid_manager: a _GridManager instance :param bool create: if True (the default) we are creating a new grid-manager and will fail if the directory already exists. """ data = json.dumps( grid_manager.marshal(), indent=4, ) if file_path is None: print("{}\n".format(data)) else: try: file_path.makedirs() file_path.chmod(0o700) except OSError: if create: raise with file_path.child("config.json").open("w") as f: f.write(data.encode("utf-8")) f.write(b"\n") def parse_grid_manager_certificate(gm_data: Union[str, bytes]): """ :param gm_data: some data that might be JSON that might be a valid Grid Manager Certificate :returns: json data of a valid Grid Manager certificate, or an exception if the data is not valid. """ required_keys = { 'certificate', 'signature', } js = json.loads(gm_data) if not isinstance(js, dict): raise ValueError( "Grid Manager certificate must be a dict" ) if set(js.keys()) != required_keys: raise ValueError( "Grid Manager certificate must contain: {}".format( ", ".join("'{}'".format(k) for k in required_keys), ) ) return js def validate_grid_manager_certificate(gm_key, alleged_cert): """ :param gm_key: a VerifyingKey instance, a Grid Manager's public key. :param alleged_cert SignedCertificate: A signed certificate. :return: a dict consisting of the deserialized certificate data or None if the signature is invalid. Note we do NOT check the expiry time in this function. """ try: ed25519.verify_signature( gm_key, alleged_cert.signature, alleged_cert.certificate, ) except ed25519.BadSignature: return None # signature is valid; now we can load the actual data cert = json.loads(alleged_cert.certificate) return cert def create_grid_manager_verifier(keys, certs, public_key, now_fn=None, bad_cert=None): """ Creates a predicate for confirming some Grid Manager-issued certificates against Grid Manager keys. A predicate is used (instead of just returning True/False here) so that the expiry-time can be tested on each call. :param list keys: 0 or more ``VerifyingKey`` instances :param list certs: 1 or more Grid Manager certificates each of which is a ``SignedCertificate``. :param str public_key: the identifier of the server we expect certificates for. :param callable now_fn: a callable which returns the current UTC timestamp (or current_datetime_with_zone() if None). :param callable bad_cert: a two-argument callable which is invoked when a certificate verification fails. The first argument is the verifying key and the second is the certificate. If None (the default) errors are print()-ed. Note that we may have several certificates and only one must be valid, so this may be called (multiple times) even if the function ultimately returns successfully. :returns: a callable which will return True only-if there is at least one valid certificate (that has not at this moment expired) in `certs` signed by one of the keys in `keys`. """ now_fn = current_datetime_with_zone if now_fn is None else now_fn valid_certs = [] # if we have zero grid-manager keys then everything is valid if not keys: return lambda: True if bad_cert is None: def bad_cert(key, alleged_cert): """ We might want to let the user know about this failed-to-verify certificate .. but also if you have multiple grid-managers then a bunch of these messages would appear. Better would be to bubble this up to some sort of status API (or maybe on the Welcome page?) The only thing that might actually be interesting, though, is whether this whole function returns false or not.. """ print( "Grid Manager certificate signature failed. Certificate: " "\"{cert}\" for key \"{key}\".".format( cert=alleged_cert, key=ed25519.string_from_verifying_key(key), ) ) # validate the signatures on any certificates we have (not yet the expiry dates) for alleged_cert in certs: for key in keys: cert = validate_grid_manager_certificate(key, alleged_cert) if cert is not None: valid_certs.append(cert) else: bad_cert(key, alleged_cert) def validate(): """ :returns: True if *any* certificate is still valid for a server """ now = now_fn() for cert in valid_certs: expires = datetime.fromisoformat(cert["expires"]) pc = cert['public_key'].encode('ascii') assert type(pc) == type(public_key), "{} isn't {}".format(type(pc), type(public_key)) if pc == public_key: if expires > now: # not-expired return True return False return validate tahoe_lafs-1.20.0/src/allmydata/hashtree.py0000644000000000000000000004612013615410400015536 0ustar00# -*- test-case-name: allmydata.test.test_hashtree -*- """ Read and write chunks from files. Version 1.0.0. A file is divided into blocks, each of which has size L{BLOCK_SIZE} (except for the last block, which may be smaller). Blocks are encoded into chunks. One publishes the hash of the entire file. Clients who want to download the file first obtain the hash, then the clients can receive chunks in any order. Cryptographic hashing is used to verify each received chunk before writing to disk. Thus it is impossible to download corrupt data if one has the correct file hash. One obtains the hash of a complete file via L{CompleteChunkFile.file_hash}. One can read chunks from a complete file by the sequence operations of C{len()} and subscripting on a L{CompleteChunkFile} object. One can open an empty or partially downloaded file with L{PartialChunkFile}, and read and write chunks to this file. A chunk will fail to write if its contents and index are not consistent with the overall file hash passed to L{PartialChunkFile} when the partial chunk file was first created. The chunks have an overhead of less than 4% for files of size less than C{10**20} bytes. Benchmarks: - On a 3 GHz Pentium 3, it took 3.4 minutes to first make a L{CompleteChunkFile} object for a 4 GB file. Up to 10 MB of memory was used as the constructor ran. A metafile filename was passed to the constructor, and so the hash information was written to the metafile. The object used a negligible amount of memory after the constructor was finished. - Creation of L{CompleteChunkFile} objects in future runs of the program took negligible time, since the hash information was already stored in the metafile. @var BLOCK_SIZE: Size of a block. See L{BlockFile}. @var MAX_CHUNK_SIZE: Upper bound on the size of a chunk. See L{CompleteChunkFile}. free (adj.): unencumbered; not under the control of others Written by Connelly Barnes in 2005 and released into the public domain with no warranty of any kind, either expressed or implied. It probably won't make your computer catch on fire, or eat your children, but it might. Use at your own risk. Ported to Python 3. """ from allmydata.util import mathutil # from the pyutil library from allmydata.util import base32 from allmydata.util.hashutil import tagged_hash, tagged_pair_hash __version__ = '1.0.0-allmydata' BLOCK_SIZE = 65536 MAX_CHUNK_SIZE = BLOCK_SIZE + 4096 def roundup_pow2(x): """ Round integer C{x} up to the nearest power of 2. """ ans = 1 while ans < x: ans *= 2 return ans class CompleteBinaryTreeMixin(object): """ Adds convenience methods to a complete binary tree. Assumes the total number of elements in the binary tree may be accessed via C{__len__}, and that each element can be retrieved using list subscripting. Tree is indexed like so:: 0 / \ 1 2 / \ / \ 3 4 5 6 / \ / \ / \ / \ 7 8 9 10 11 12 13 14 """ def parent(self, i): """ Index of the parent of C{i}. """ if i < 1 or (hasattr(self, '__len__') and i >= len(self)): raise IndexError('index out of range: ' + repr(i)) return (i - 1) // 2 def lchild(self, i): """ Index of the left child of C{i}. """ ans = 2 * i + 1 if i < 0 or (hasattr(self, '__len__') and ans >= len(self)): raise IndexError('index out of range: ' + repr(i)) return ans def rchild(self, i): """ Index of right child of C{i}. """ ans = 2 * i + 2 if i < 0 or (hasattr(self, '__len__') and ans >= len(self)): raise IndexError('index out of range: ' + repr(i)) return ans def sibling(self, i): """ Index of sibling of C{i}. """ parent = self.parent(i) if self.lchild(parent) == i: return self.rchild(parent) else: return self.lchild(parent) def needed_for(self, i): """ Return a list of node indices that are necessary for the hash chain. """ if i < 0 or i >= len(self): raise IndexError('index out of range: 0 >= %s < %s' % (i, len(self))) needed = [] here = i while here != 0: needed.append(self.sibling(here)) here = self.parent(here) return needed def depth_first(self, i=0): yield i, 0 try: for child,childdepth in self.depth_first(self.lchild(i)): yield child, childdepth+1 except IndexError: pass try: for child,childdepth in self.depth_first(self.rchild(i)): yield child, childdepth+1 except IndexError: pass def dump(self): lines = [] for i,depth in self.depth_first(): value = base32.b2a_or_none(self[i]) if value is not None: value = str(value, "utf-8") lines.append("%s%3d: %s" % (" "*depth, i, value)) return "\n".join(lines) + "\n" def get_leaf_index(self, leafnum): return self.first_leaf_num + leafnum def get_leaf(self, leafnum): return self[self.first_leaf_num + leafnum] def depth_of(i): """Return the depth or level of the given node. Level 0 contains node 0 Level 1 contains nodes 1 and 2. Level 2 contains nodes 3,4,5,6.""" return mathutil.log_floor(i+1, 2) def empty_leaf_hash(i): return tagged_hash(b'Merkle tree empty leaf', b"%d" % i) def pair_hash(a, b): return tagged_pair_hash(b'Merkle tree internal node', a, b) class HashTree(CompleteBinaryTreeMixin, list): """ Compute Merkle hashes at any node in a complete binary tree. Tree is indexed like so:: 0 / \ 1 2 / \ / \ 3 4 5 6 / \ / \ / \ / \ 7 8 9 10 11 12 13 14 <- List passed to constructor. """ def __init__(self, L): """ Create complete binary tree from list of hash strings. The list is augmented by hashes so its length is a power of 2, and then this is used as the bottom row of the hash tree. The augmenting is done so that if the augmented element is at index C{i}, then its value is C{hash(tagged_hash('Merkle tree empty leaf', '%d'%i))}. """ # Augment the list. start = len(L) end = roundup_pow2(len(L)) self.first_leaf_num = end - 1 L = L + [None] * (end - start) for i in range(start, end): L[i] = empty_leaf_hash(i) # Form each row of the tree. rows = [L] while len(rows[-1]) != 1: last = rows[-1] rows += [[pair_hash(last[2*i], last[2*i+1]) for i in range(len(last)//2)]] # Flatten the list of rows into a single list. rows.reverse() self[:] = sum(rows, []) def needed_hashes(self, leafnum, include_leaf=False): """Which hashes will someone need to validate a given data block? I am used to answer a question: supposing you have the data block that is used to form leaf hash N, and you want to validate that it, which hashes would you need? I accept a leaf number and return a set of 'hash index' values, which are integers from 0 to len(self). In the 'hash index' number space, hash[0] is the root hash, while hash[len(self)-1] is the last leaf hash. This method can be used to find out which hashes you should request from some untrusted source (usually the same source that provides the data block), so you can minimize storage or transmission overhead. It can also be used to determine which hashes you should send to a remote data store so that it will be able to provide validatable data in the future. I will not include '0' (the root hash) in the result, since the root is generally stored somewhere that is more trusted than the source of the remaining hashes. I will include the leaf hash itself only if you ask me to, by passing include_leaf=True. """ needed = set(self.needed_for(self.first_leaf_num + leafnum)) if include_leaf: needed.add(self.first_leaf_num + leafnum) return needed class NotEnoughHashesError(Exception): pass class BadHashError(Exception): pass class IncompleteHashTree(CompleteBinaryTreeMixin, list): """I am a hash tree which may or may not be complete. I can be used to validate inbound data from some untrustworthy provider who has a subset of leaves and a sufficient subset of internal nodes. Initially I am completely unpopulated. Over time, I will become filled with hashes, just enough to validate particular leaf nodes. If you desire to validate leaf number N, first find out which hashes I need by calling needed_hashes(N). This will return a list of node numbers (which will nominally be the sibling chain between the given leaf and the root, but if I already have some of those nodes, needed_hashes(N) will only return a subset). Obtain these hashes from the data provider, then tell me about them with set_hash(i, HASH). Once I have enough hashes, you can tell me the hash of the leaf with set_leaf_hash(N, HASH), and I will either return None or raise BadHashError. The first hash to be set will probably be 0 (the root hash), since this is the one that will come from someone more trustworthy than the data provider. """ def __init__(self, num_leaves): L = [None] * num_leaves start = len(L) end = roundup_pow2(len(L)) self.first_leaf_num = end - 1 L = L + [None] * (end - start) rows = [L] while len(rows[-1]) != 1: last = rows[-1] rows += [[None for i in range(len(last)//2)]] # Flatten the list of rows into a single list. rows.reverse() self[:] = sum(rows, []) def needed_hashes(self, leafnum, include_leaf=False): """Which new hashes do I need to validate a given data block? I am much like HashTree.needed_hashes(), except that I don't include hashes that I already know about. When needed_hashes() is called on an empty IncompleteHashTree, it will return the same set as a HashTree of the same size. But later, once hashes have been added with set_hashes(), I will ask for fewer hashes, since some of the necessary ones have already been set. """ maybe_needed = set(self.needed_for(self.first_leaf_num + leafnum)) if include_leaf: maybe_needed.add(self.first_leaf_num + leafnum) return set([i for i in maybe_needed if self[i] is None]) def _name_hash(self, i): name = "[%d of %d]" % (i, len(self)) if i >= self.first_leaf_num: leafnum = i - self.first_leaf_num numleaves = len(self) - self.first_leaf_num name += " (leaf [%d] of %d)" % (leafnum, numleaves) return name def set_hashes(self, hashes=None, leaves=None): """Add a bunch of hashes to the tree. I will validate these to the best of my ability. If I already have a copy of any of the new hashes, the new values must equal the existing ones, or I will raise BadHashError. If adding a hash allows me to compute a parent hash, those parent hashes must match or I will raise BadHashError. If I raise BadHashError, I will forget about all the hashes that you tried to add, leaving my state exactly the same as before I was called. If I return successfully, I will remember all those hashes. I insist upon being able to validate all of the hashes that were given to me. If I cannot do this because I'm missing some hashes, I will raise NotEnoughHashesError (and forget about all the hashes that you tried to add). Note that this means that the root hash must either be included in 'hashes', or it must have been provided at some point in the past. 'leaves' is a dictionary uses 'leaf index' values, which range from 0 (the left-most leaf) to num_leaves-1 (the right-most leaf), and form the base of the tree. 'hashes' uses 'hash_index' values, which range from 0 (the root of the tree) to 2*num_leaves-2 (the right-most leaf). leaf[i] is the same as hash[num_leaves-1+i]. The best way to use me is to start by obtaining the root hash from some 'good' channel and populate me with it: iht = IncompleteHashTree(numleaves) roothash = trusted_channel.get_roothash() iht.set_hashes(hashes={0: roothash}) Then use the 'bad' channel to obtain data block 0 and the corresponding hash chain (a dict with the same hashes that needed_hashes(0) tells you, e.g. {0:h0, 2:h2, 4:h4, 8:h8} when len(L)=8). Hash the data block to create leaf0, then feed everything into set_hashes() and see if it raises an exception or not:: otherhashes = untrusted_channel.get_hashes() # otherhashes.keys() should == iht.needed_hashes(leaves=[0]) datablock0 = untrusted_channel.get_data(0) leaf0 = HASH(datablock0) # HASH() is probably hashutil.tagged_hash(tag, datablock0) iht.set_hashes(otherhashes, leaves={0: leaf0}) If the set_hashes() call doesn't raise an exception, the data block was valid. If it raises BadHashError, then either the data block was corrupted or one of the received hashes was corrupted. If it raises NotEnoughHashesError, then the otherhashes dictionary was incomplete. """ if hashes is None: hashes = {} if leaves is None: leaves = {} assert isinstance(hashes, dict) for h in hashes.values(): assert isinstance(h, bytes) assert isinstance(leaves, dict) for h in leaves.values(): assert isinstance(h, bytes) new_hashes = hashes.copy() for leafnum,leafhash in leaves.items(): hashnum = self.first_leaf_num + leafnum if hashnum in new_hashes: if new_hashes[hashnum] != leafhash: raise BadHashError("got conflicting hashes in my " "arguments: leaves[%d] != hashes[%d]" % (leafnum, hashnum)) new_hashes[hashnum] = leafhash remove_upon_failure = set() # we'll remove these if the check fails # visualize this method in the following way: # A: start with the empty or partially-populated tree as shown in # the HashTree docstring # B: add all of our input hashes to the tree, filling in some of the # holes. Don't overwrite anything, but new values must equal the # existing ones. Mark everything that was added with a red dot # (meaning "not yet validated") # C: start with the lowest/deepest level. Pick any red-dotted node, # hash it with its sibling to compute the parent hash. Add the # parent to the tree just like in step B (if the parent already # exists, the values must be equal; if not, add our computed # value with a red dot). If we have no sibling, throw # NotEnoughHashesError, since we won't be able to validate this # node. Remove the red dot. If there was a red dot on our # sibling, remove it too. # D: finish all red-dotted nodes in one level before moving up to # the next. # E: if we hit NotEnoughHashesError or BadHashError before getting # to the root, discard every hash we've added. try: num_levels = depth_of(len(self)-1) # hashes_to_check[level] is set(index). This holds the "red dots" # described above hashes_to_check = [set() for level in range(num_levels+1)] # first we provisionally add all hashes to the tree, comparing # any duplicates for i,h in new_hashes.items(): if self[i]: if self[i] != h: raise BadHashError("new hash %r does not match " "existing hash %r at %r" % (base32.b2a(h), base32.b2a(self[i]), self._name_hash(i))) else: level = depth_of(i) hashes_to_check[level].add(i) self[i] = h remove_upon_failure.add(i) for level in reversed(range(len(hashes_to_check))): this_level = hashes_to_check[level] while this_level: i = this_level.pop() if i == 0: # The root has no sibling. How lonely. You can't # really *check* the root; you either accept it # because the caller told you what it is by including # it in hashes, or you accept it because you # calculated it from its two children. You probably # want to set the root (from a trusted source) before # adding any children from an untrusted source. continue siblingnum = self.sibling(i) if self[siblingnum] is None: # without a sibling, we can't compute a parent, and # we can't verify this node raise NotEnoughHashesError("unable to validate [%d]"%i) parentnum = self.parent(i) # make sure we know right from left leftnum, rightnum = sorted([i, siblingnum]) new_parent_hash = pair_hash(self[leftnum], self[rightnum]) if self[parentnum]: if self[parentnum] != new_parent_hash: raise BadHashError("h([%d]+[%d]) != h[%d]" % (leftnum, rightnum, parentnum)) else: self[parentnum] = new_parent_hash remove_upon_failure.add(parentnum) parent_level = depth_of(parentnum) assert parent_level == level-1 hashes_to_check[parent_level].add(parentnum) # our sibling is now as valid as this node this_level.discard(siblingnum) # we're done! except (BadHashError, NotEnoughHashesError): for i in remove_upon_failure: self[i] = None raise tahoe_lafs-1.20.0/src/allmydata/history.py0000644000000000000000000000747513615410400015446 0ustar00"""Ported to Python 3. """ import weakref class History(object): """Keep track of recent operations, for a status display.""" name = "history" MAX_DOWNLOAD_STATUSES = 10 MAX_UPLOAD_STATUSES = 10 MAX_MAPUPDATE_STATUSES = 20 MAX_PUBLISH_STATUSES = 20 MAX_RETRIEVE_STATUSES = 40 def __init__(self, stats_provider=None): self.stats_provider = stats_provider self.all_downloads_statuses = weakref.WeakKeyDictionary() self.recent_download_statuses = [] self.all_upload_statuses = weakref.WeakKeyDictionary() self.recent_upload_statuses = [] self.all_mapupdate_status = weakref.WeakKeyDictionary() self.recent_mapupdate_status = [] self.all_publish_status = weakref.WeakKeyDictionary() self.recent_publish_status = [] self.all_retrieve_status = weakref.WeakKeyDictionary() self.recent_retrieve_status = [] self.all_helper_upload_statuses = weakref.WeakKeyDictionary() self.recent_helper_upload_statuses = [] def add_download(self, download_status): self.all_downloads_statuses[download_status] = None self.recent_download_statuses.append(download_status) while len(self.recent_download_statuses) > self.MAX_DOWNLOAD_STATUSES: self.recent_download_statuses.pop(0) def list_all_download_statuses(self): for ds in self.all_downloads_statuses: yield ds def add_upload(self, upload_status): self.all_upload_statuses[upload_status] = None self.recent_upload_statuses.append(upload_status) while len(self.recent_upload_statuses) > self.MAX_UPLOAD_STATUSES: self.recent_upload_statuses.pop(0) def list_all_upload_statuses(self): for us in self.all_upload_statuses: yield us def notify_mapupdate(self, p): self.all_mapupdate_status[p] = None self.recent_mapupdate_status.append(p) while len(self.recent_mapupdate_status) > self.MAX_MAPUPDATE_STATUSES: self.recent_mapupdate_status.pop(0) def notify_publish(self, p, size): self.all_publish_status[p] = None self.recent_publish_status.append(p) if self.stats_provider: self.stats_provider.count('mutable.files_published', 1) # We must be told bytes_published as an argument, since the # publish_status does not yet know how much data it will be asked # to send. When we move to MDMF we'll need to find a better way # to handle this. self.stats_provider.count('mutable.bytes_published', size) while len(self.recent_publish_status) > self.MAX_PUBLISH_STATUSES: self.recent_publish_status.pop(0) def notify_retrieve(self, r): self.all_retrieve_status[r] = None self.recent_retrieve_status.append(r) if self.stats_provider: self.stats_provider.count('mutable.files_retrieved', 1) self.stats_provider.count('mutable.bytes_retrieved', r.get_size()) while len(self.recent_retrieve_status) > self.MAX_RETRIEVE_STATUSES: self.recent_retrieve_status.pop(0) def list_all_mapupdate_statuses(self): for s in self.all_mapupdate_status: yield s def list_all_publish_statuses(self): for s in self.all_publish_status: yield s def list_all_retrieve_statuses(self): for s in self.all_retrieve_status: yield s def notify_helper_upload(self, s): self.all_helper_upload_statuses[s] = None self.recent_helper_upload_statuses.append(s) while len(self.recent_helper_upload_statuses) > self.MAX_UPLOAD_STATUSES: self.recent_helper_upload_statuses.pop(0) def list_all_helper_statuses(self): for s in self.all_helper_upload_statuses: yield s tahoe_lafs-1.20.0/src/allmydata/interfaces.py0000644000000000000000000040424713615410400016066 0ustar00""" Interfaces for Tahoe-LAFS. Ported to Python 3. Note that for RemoteInterfaces, the __remote_name__ needs to be a native string because of https://github.com/warner/foolscap/blob/43f4485a42c9c28e2c79d655b3a9e24d4e6360ca/src/foolscap/remoteinterface.py#L67 """ from typing import Dict from zope.interface import Interface, Attribute from twisted.plugin import ( IPlugin, ) from twisted.internet.defer import Deferred from foolscap.api import StringConstraint, ListOf, TupleOf, SetOf, DictOf, \ ChoiceOf, IntegerConstraint, Any, RemoteInterface, Referenceable HASH_SIZE=32 SALT_SIZE=16 SDMF_VERSION=0 MDMF_VERSION=1 Hash = StringConstraint(maxLength=HASH_SIZE, minLength=HASH_SIZE)# binary format 32-byte SHA256 hash Nodeid = StringConstraint(maxLength=20, minLength=20) # binary format 20-byte SHA1 hash FURL = StringConstraint(1000) StorageIndex = StringConstraint(16) URI = StringConstraint(300) # kind of arbitrary MAX_BUCKETS = 256 # per peer -- zfec offers at most 256 shares per file # The default size for segments of new CHK ("immutable") uploads. DEFAULT_IMMUTABLE_MAX_SEGMENT_SIZE = 1024*1024 ShareData = StringConstraint(None) URIExtensionData = StringConstraint(1000) Number = IntegerConstraint(8) # 2**(8*8) == 16EiB ~= 18e18 ~= 18 exabytes Offset = Number ReadSize = int # the 'int' constraint is 2**31 == 2Gib -- large files are processed in not-so-large increments WriteEnablerSecret = Hash # used to protect mutable share modifications LeaseRenewSecret = Hash # used to protect lease renewal requests LeaseCancelSecret = Hash # was used to protect lease cancellation requests class NoSpace(Exception): """Storage space was not available for a space-allocating operation.""" class DataTooLargeError(Exception): """The write went past the expected size of the bucket.""" class ConflictingWriteError(Exception): """Two writes happened to same immutable with different data.""" class RIBucketWriter(RemoteInterface): """ Objects of this kind live on the server side. """ def write(offset=Offset, data=ShareData): return None def close(): """ If the data that has been written is incomplete or inconsistent then the server will throw the data away, else it will store it for future retrieval. """ return None def abort(): """Abandon all the data that has been written. """ return None class RIBucketReader(RemoteInterface): def read(offset=Offset, length=ReadSize): return ShareData def advise_corrupt_share(reason=bytes): """Clients who discover hash failures in shares that they have downloaded from me will use this method to inform me about the failures. I will record their concern so that my operator can manually inspect the shares in question. I return None. This is a wrapper around RIStorageServer.advise_corrupt_share() that is tied to a specific share, and therefore does not need the extra share-identifying arguments. Please see that method for full documentation. """ TestVector = ListOf(TupleOf(Offset, ReadSize, bytes, bytes)) # elements are (offset, length, operator, specimen) # operator must be b"eq", typically length==len(specimen), but one can ensure # writes don't happen to empty shares by setting length to 1 and specimen to # b"". The operator is still used for wire compatibility with old versions. DataVector = ListOf(TupleOf(Offset, ShareData)) # (offset, data). This limits us to 30 writes of 1MiB each per call TestAndWriteVectorsForShares = DictOf(int, TupleOf(TestVector, DataVector, ChoiceOf(None, Offset), # new_length )) ReadVector = ListOf(TupleOf(Offset, ReadSize)) ReadData = ListOf(ShareData) # returns data[offset:offset+length] for each element of TestVector class RIStorageServer(RemoteInterface): __remote_name__ = "RIStorageServer.tahoe.allmydata.com" def get_version(): """ Return a dictionary of version information. """ return DictOf(bytes, Any()) def allocate_buckets(storage_index=StorageIndex, renew_secret=LeaseRenewSecret, cancel_secret=LeaseCancelSecret, sharenums=SetOf(int, maxLength=MAX_BUCKETS), allocated_size=Offset, canary=Referenceable): """ @param storage_index: the index of the bucket to be created or increfed. @param sharenums: these are the share numbers (probably between 0 and 99) that the sender is proposing to store on this server. @param renew_secret: This is the secret used to protect bucket refresh This secret is generated by the client and stored for later comparison by the server. Each server is given a different secret. @param cancel_secret: This no longer allows lease cancellation, but must still be a unique value identifying the lease. XXX stop relying on it to be unique. @param canary: If the canary is lost before close(), the bucket is deleted. @return: tuple of (alreadygot, allocated), where alreadygot is what we already have and allocated is what we hereby agree to accept. New leases are added for shares in both lists. """ return TupleOf(SetOf(int, maxLength=MAX_BUCKETS), DictOf(int, RIBucketWriter, maxKeys=MAX_BUCKETS)) def add_lease(storage_index=StorageIndex, renew_secret=LeaseRenewSecret, cancel_secret=LeaseCancelSecret): """ Add a new lease on the given bucket. If the renew_secret matches an existing lease, that lease will be renewed instead. If there is no bucket for the given storage_index, return silently. (note that in tahoe-1.3.0 and earlier, IndexError was raised if there was no bucket) """ return Any() # returns None now, but future versions might change def get_buckets(storage_index=StorageIndex): return DictOf(int, RIBucketReader, maxKeys=MAX_BUCKETS) def slot_readv(storage_index=StorageIndex, shares=ListOf(int), readv=ReadVector): """Read a vector from the numbered shares associated with the given storage index. An empty shares list means to return data from all known shares. Returns a dictionary with one key per share.""" return DictOf(int, ReadData) # shnum -> results def slot_testv_and_readv_and_writev(storage_index=StorageIndex, secrets=TupleOf(WriteEnablerSecret, LeaseRenewSecret, LeaseCancelSecret), tw_vectors=TestAndWriteVectorsForShares, r_vector=ReadVector, ): """ General-purpose test-read-and-set operation for mutable slots: (1) For submitted shnums, compare the test vectors against extant shares, or against an empty share for shnums that do not exist. (2) Use the read vectors to extract "old data" from extant shares. (3) If all tests in (1) passed, then apply the write vectors (possibly creating new shares). (4) Return whether the tests passed, and the "old data", which does not include any modifications made by the writes. The operation does not interleave with other operations on the same shareset. This method is, um, large. The goal is to allow clients to update all the shares associated with a mutable file in a single round trip. @param storage_index: the index of the bucket to be created or increfed. @param write_enabler: a secret that is stored along with the slot. Writes are accepted from any caller who can present the matching secret. A different secret should be used for each slot*server pair. @param renew_secret: This is the secret used to protect bucket refresh This secret is generated by the client and stored for later comparison by the server. Each server is given a different secret. @param cancel_secret: This no longer allows lease cancellation, but must still be a unique value identifying the lease. XXX stop relying on it to be unique. The 'secrets' argument is a tuple of (write_enabler, renew_secret, cancel_secret). The first is required to perform any write. The latter two are used when allocating new shares. To simply acquire a new lease on existing shares, use an empty testv and an empty writev. Each share can have a separate test vector (i.e. a list of comparisons to perform). If all vectors for all shares pass, then all writes for all shares are recorded. Each comparison is a 4-tuple of (offset, length, operator, specimen), which effectively does a bool( (read(offset, length)) OPERATOR specimen ) and only performs the write if all these evaluate to True. Basic test-and-set uses 'eq'. Write-if-newer uses a seqnum and (offset, length, 'lt', specimen). Write-if-same-or-newer uses 'le'. Reads from the end of the container are truncated, and missing shares behave like empty ones, so to assert that a share doesn't exist (for use when creating a new share), use (0, 1, 'eq', ''). The write vector will be applied to the given share, expanding it if necessary. A write vector applied to a share number that did not exist previously will cause that share to be created. Write vectors must not overlap (if they do, this will either cause an error or apply them in an unspecified order). Duplicate write vectors, with the same offset and data, are currently tolerated but are not desirable. In Tahoe-LAFS v1.8.3 or later (except 1.9.0a1), if you send a write vector whose offset is beyond the end of the current data, the space between the end of the current data and the beginning of the write vector will be filled with zero bytes. In earlier versions the contents of this space was unspecified (and might end up containing secrets). Storage servers with the new zero-filling behavior will advertise a true value for the 'fills-holes-with-zero-bytes' key (under 'http://allmydata.org/tahoe/protocols/storage/v1') in their version information. Each write vector is accompanied by a 'new_length' argument, which can be used to truncate the data. If new_length is not None and it is less than the current size of the data (after applying all write vectors), then the data will be truncated to new_length. If new_length==0, the share will be deleted. In Tahoe-LAFS v1.8.2 and earlier, new_length could also be used to enlarge the file by sending a number larger than the size of the data after applying all write vectors. That behavior was not used, and as of Tahoe-LAFS v1.8.3 it no longer works and the new_length is ignored in that case. If a storage client knows that the server supports zero-filling, for example from the 'fills-holes-with-zero-bytes' key in its version information, it can extend the file efficiently by writing a single zero byte just before the new end-of-file. Otherwise it must explicitly write zeroes to all bytes between the old and new end-of-file. In any case it should avoid sending new_length larger than the size of the data after applying all write vectors. The read vector is used to extract data from all known shares, *before* any writes have been applied. The same read vector is used for all shares. This captures the state that was tested by the test vector, for extant shares. This method returns two values: a boolean and a dict. The boolean is True if the write vectors were applied, False if not. The dict is keyed by share number, and each value contains a list of strings, one for each element of the read vector. If the write_enabler is wrong, this will raise BadWriteEnablerError. To enable share migration (using update_write_enabler), the exception will have the nodeid used for the old write enabler embedded in it, in the following string:: The write enabler was recorded by nodeid '%s'. Note that the nodeid here is encoded using the same base32 encoding used by Foolscap and allmydata.util.idlib.nodeid_b2a(). """ return TupleOf(bool, DictOf(int, ReadData)) def advise_corrupt_share(share_type=bytes, storage_index=StorageIndex, shnum=int, reason=bytes): """Clients who discover hash failures in shares that they have downloaded from me will use this method to inform me about the failures. I will record their concern so that my operator can manually inspect the shares in question. I return None. 'share_type' is either 'mutable' or 'immutable'. 'storage_index' is a (binary) storage index string, and 'shnum' is the integer share number. 'reason' is a human-readable explanation of the problem, probably including some expected hash values and the computed ones that did not match. Corruption advisories for mutable shares should include a hash of the public key (the same value that appears in the mutable-file verify-cap), since the current share format does not store that on disk. """ # The result of IStorageServer.get_version(): VersionMessage = Dict[bytes, object] class IStorageServer(Interface): """ An object capable of storing shares for a storage client. """ def get_version() -> Deferred[VersionMessage]: """ :see: ``RIStorageServer.get_version`` """ def allocate_buckets( storage_index, renew_secret, cancel_secret, sharenums, allocated_size, canary, ): """ :see: ``RIStorageServer.allocate_buckets`` """ def add_lease( storage_index, renew_secret, cancel_secret, ): """ :see: ``RIStorageServer.add_lease`` """ def get_buckets( storage_index, ): """ :see: ``RIStorageServer.get_buckets`` """ def slot_readv( storage_index, shares, readv, ): """ :see: ``RIStorageServer.slot_readv`` """ def slot_testv_and_readv_and_writev( storage_index, secrets, tw_vectors, r_vector, ): """ :see: ``RIStorageServer.slot_testv_readv_and_writev`` While the interface mostly matches, test vectors are simplified. Instead of a tuple ``(offset, read_size, operator, expected_data)`` in the original, for this method you need only pass in ``(offset, read_size, expected_data)``, with the operator implicitly being ``b"eq"``. """ def advise_corrupt_share( share_type, storage_index, shnum, reason, ): """ :see: ``RIStorageServer.advise_corrupt_share`` """ class IStorageBucketWriter(Interface): """ Objects of this kind live on the client side. """ def put_block(segmentnum, data): """ @param segmentnum=int @param data=ShareData: For most segments, this data will be 'blocksize' bytes in length. The last segment might be shorter. @return: a Deferred that fires (with None) when the operation completes """ def put_crypttext_hashes(hashes): """ @param hashes=ListOf(Hash) @return: a Deferred that fires (with None) when the operation completes """ def put_block_hashes(blockhashes): """ @param blockhashes=ListOf(Hash) @return: a Deferred that fires (with None) when the operation completes """ def put_share_hashes(sharehashes): """ @param sharehashes=ListOf(TupleOf(int, Hash)) @return: a Deferred that fires (with None) when the operation completes """ def put_uri_extension(data): """This block of data contains integrity-checking information (hashes of plaintext, crypttext, and shares), as well as encoding parameters that are necessary to recover the data. This is a serialized dict mapping strings to other strings. The hash of this data is kept in the URI and verified before any of the data is used. All buckets for a given file contain identical copies of this data. The serialization format is specified with the following pseudocode: for k in sorted(dict.keys()): assert re.match(r'^[a-zA-Z_\-]+$', k) write(k + ':' + netstring(dict[k])) @param data=URIExtensionData @return: a Deferred that fires (with None) when the operation completes """ def close(): """Finish writing and close the bucket. The share is not finalized until this method is called: if the uploading client disconnects before calling close(), the partially-written share will be discarded. @return: a Deferred that fires (with None) when the operation completes """ class IStorageBucketReader(Interface): def get_block_data(blocknum, blocksize, size): """Most blocks will be the same size. The last block might be shorter than the others. @param blocknum=int @param blocksize=int @param size=int @return: ShareData """ def get_crypttext_hashes(): """ @return: ListOf(Hash) """ def get_block_hashes(at_least_these=()): """ @param at_least_these=SetOf(int) @return: ListOf(Hash) """ def get_share_hashes(): """ @return: ListOf(TupleOf(int, Hash)) """ def get_uri_extension(): """ @return: URIExtensionData """ class IStorageBroker(Interface): def get_servers_for_psi(peer_selection_index): """ @return: list of IServer instances """ def get_connected_servers(): """ @return: frozenset of connected IServer instances """ def get_known_servers(): """ @return: frozenset of IServer instances """ def get_all_serverids(): """ @return: frozenset of serverid strings """ def get_nickname_for_serverid(serverid): """ @return: unicode nickname, or None """ class IDisplayableServer(Interface): def get_nickname(): pass def get_name(): pass def get_longname(): pass class IServer(IDisplayableServer): """I live in the client, and represent a single server.""" def start_connecting(trigger_cb): pass def upload_permitted(): """ :return: True if we should use this server for uploads, False otherwise. """ def get_storage_server(): """ Once a server is connected, I return an ``IStorageServer``. Before a server is connected for the first time, I return None. Note that the ``IStorageServer`` I return will start producing DeadReferenceErrors once the connection is lost. """ class IMutableSlotWriter(Interface): """ The interface for a writer around a mutable slot on a remote server. """ def set_checkstring(seqnum_or_checkstring, root_hash=None, salt=None): """ Set the checkstring that I will pass to the remote server when writing. @param checkstring A packed checkstring to use. Note that implementations can differ in which semantics they wish to support for set_checkstring -- they can, for example, build the checkstring themselves from its constituents, or some other thing. """ def get_checkstring(): """ Get the checkstring that I think currently exists on the remote server. """ def put_block(data, segnum, salt): """ Add a block and salt to the share. """ def put_encprivkey(encprivkey): """ Add the encrypted private key to the share. """ def put_blockhashes(blockhashes): """ @param blockhashes=list Add the block hash tree to the share. """ def put_sharehashes(sharehashes): """ @param sharehashes=dict Add the share hash chain to the share. """ def get_signable(): """ Return the part of the share that needs to be signed. """ def put_signature(signature): """ Add the signature to the share. """ def put_verification_key(verification_key): """ Add the verification key to the share. """ def finish_publishing(): """ Do anything necessary to finish writing the share to a remote server. I require that no further publishing needs to take place after this method has been called. """ class IURI(Interface): def init_from_string(uri): """Accept a string (as created by my to_string() method) and populate this instance with its data. I am not normally called directly, please use the module-level uri.from_string() function to convert arbitrary URI strings into IURI-providing instances.""" def is_readonly(): """Return False if this URI be used to modify the data. Return True if this URI cannot be used to modify the data.""" def is_mutable(): """Return True if the data can be modified by *somebody* (perhaps someone who has a more powerful URI than this one).""" # TODO: rename to get_read_cap() def get_readonly(): """Return another IURI instance that represents a read-only form of this one. If is_readonly() is True, this returns self.""" def get_verify_cap(): """Return an instance that provides IVerifierURI, which can be used to check on the availability of the file or directory, without providing enough capabilities to actually read or modify the contents. This may return None if the file does not need checking or verification (e.g. LIT URIs). """ def to_string(): """Return a string of printable ASCII characters, suitable for passing into init_from_string.""" class IVerifierURI(IURI): def init_from_string(uri): """Accept a string (as created by my to_string() method) and populate this instance with its data. I am not normally called directly, please use the module-level uri.from_string() function to convert arbitrary URI strings into IURI-providing instances.""" def to_string(): """Return a string of printable ASCII characters, suitable for passing into init_from_string.""" class IDirnodeURI(Interface): """I am a URI that represents a dirnode.""" class IFileURI(Interface): """I am a URI that represents a filenode.""" def get_size(): """Return the length (in bytes) of the file that I represent.""" class IImmutableFileURI(IFileURI): pass class IMutableFileURI(Interface): pass class IDirectoryURI(Interface): pass class IReadonlyDirectoryURI(Interface): pass class CapConstraintError(Exception): """A constraint on a cap was violated.""" class MustBeDeepImmutableError(CapConstraintError): """Mutable children cannot be added to an immutable directory. Also, caps obtained from an immutable directory can trigger this error if they are later found to refer to a mutable object and then used.""" class MustBeReadonlyError(CapConstraintError): """Known write caps cannot be specified in a ro_uri field. Also, caps obtained from a ro_uri field can trigger this error if they are later found to be write caps and then used.""" class MustNotBeUnknownRWError(CapConstraintError): """Cannot add an unknown child cap specified in a rw_uri field.""" class IReadable(Interface): """I represent a readable object -- either an immutable file, or a specific version of a mutable file. """ def is_readonly(): """Return True if this reference provides mutable access to the given file or directory (i.e. if you can modify it), or False if not. Note that even if this reference is read-only, someone else may hold a read-write reference to it. For an IReadable returned by get_best_readable_version(), this will always return True, but for instances of subinterfaces such as IMutableFileVersion, it may return False.""" def is_mutable(): """Return True if this file or directory is mutable (by *somebody*, not necessarily you), False if it is is immutable. Note that a file might be mutable overall, but your reference to it might be read-only. On the other hand, all references to an immutable file will be read-only; there are no read-write references to an immutable file.""" def get_storage_index(): """Return the storage index of the file.""" def get_size(): """Return the length (in bytes) of this readable object.""" def download_to_data(): """Download all of the file contents. I return a Deferred that fires with the contents as a byte string. """ def read(consumer, offset=0, size=None): """Download a portion (possibly all) of the file's contents, making them available to the given IConsumer. Return a Deferred that fires (with the consumer) when the consumer is unregistered (either because the last byte has been given to it, or because the consumer threw an exception during write(), possibly because it no longer wants to receive data). The portion downloaded will start at 'offset' and contain 'size' bytes (or the remainder of the file if size==None). It is an error to read beyond the end of the file: callers must use get_size() and clip any non-default offset= and size= parameters. It is permissible to read zero bytes. The consumer will be used in non-streaming mode: an IPullProducer will be attached to it. The consumer will not receive data right away: several network trips must occur first. The order of events will be:: consumer.registerProducer(p, streaming) (if streaming == False):: consumer does p.resumeProducing() consumer.write(data) consumer does p.resumeProducing() consumer.write(data).. (repeat until all data is written) consumer.unregisterProducer() deferred.callback(consumer) If a download error occurs, or an exception is raised by consumer.registerProducer() or consumer.write(), I will call consumer.unregisterProducer() and then deliver the exception via deferred.errback(). To cancel the download, the consumer should call p.stopProducing(), which will result in an exception being delivered via deferred.errback(). See src/allmydata/util/consumer.py for an example of a simple download-to-memory consumer. """ class IPeerSelector(Interface): """ I select peers for an upload, maximizing some measure of health. I keep track of the state of a grid relative to a file. This means that I know about all of the peers that parts of that file could be placed on, and about shares that have been placed on those peers. Given this, I assign shares to peers in a way that maximizes the file's health according to whichever definition of health I am programmed with. I tell the uploader whether or not my assignment is healthy. I keep track of failures during the process and update my conclusions appropriately. """ def add_peer_with_share(peerid, shnum): """ Update my internal state to reflect the fact that peer peerid holds share shnum. Called for shares that are detected before peer selection begins. """ def add_peers(peerids=set): """ Update my internal state to include the peers in peerids as potential candidates for storing a file. """ def mark_readonly_peer(peerid): """ Mark the peer peerid as full. This means that any peer-with-share relationships I know about for peerid remain valid, but that peerid will not be assigned any new shares. """ def mark_bad_peer(peerid): """ Mark the peer peerid as bad. This is typically called when an error is encountered when communicating with a peer. I will disregard any existing peer => share relationships associated with peerid, and will not attempt to assign it any more shares. """ def get_share_placements(): """ Return the share-placement map (a dict) which maps shares to server-ids """ class IWriteable(Interface): """ I define methods that callers can use to update SDMF and MDMF mutable files on a Tahoe-LAFS grid. """ # XXX: For the moment, we have only this. It is possible that we # want to move overwrite() and modify() in here too. def update(data, offset): """ I write the data from my data argument to the MDMF file, starting at offset. I continue writing data until my data argument is exhausted, appending data to the file as necessary. """ # assert IMutableUploadable.providedBy(data) # to append data: offset=node.get_size_of_best_version() # do we want to support compacting MDMF? # for an MDMF file, this can be done with O(data.get_size()) # memory. For an SDMF file, any modification takes # O(node.get_size_of_best_version()). class IMutableFileVersion(IReadable): """I provide access to a particular version of a mutable file. The access is read/write if I was obtained from a filenode derived from a write cap, or read-only if the filenode was derived from a read cap. """ def get_sequence_number(): """Return the sequence number of this version.""" def get_servermap(): """Return the IMutableFileServerMap instance that was used to create this object. """ def get_writekey(): """Return this filenode's writekey, or None if the node does not have write-capability. This may be used to assist with data structures that need to make certain data available only to writers, such as the read-write child caps in dirnodes. The recommended process is to have reader-visible data be submitted to the filenode in the clear (where it will be encrypted by the filenode using the readkey), but encrypt writer-visible data using this writekey. """ def overwrite(new_contents): """Replace the contents of the mutable file, provided that no other node has published (or is attempting to publish, concurrently) a newer version of the file than this one. I will avoid modifying any share that is different than the version given by get_sequence_number(). However, if another node is writing to the file at the same time as me, I may manage to update some shares while they update others. If I see any evidence of this, I will signal UncoordinatedWriteError, and the file will be left in an inconsistent state (possibly the version you provided, possibly the old version, possibly somebody else's version, and possibly a mix of shares from all of these). The recommended response to UncoordinatedWriteError is to either return it to the caller (since they failed to coordinate their writes), or to attempt some sort of recovery. It may be sufficient to wait a random interval (with exponential backoff) and repeat your operation. If I do not signal UncoordinatedWriteError, then I was able to write the new version without incident. I return a Deferred that fires (with a PublishStatus object) when the update has completed. """ def modify(modifier_cb): """Modify the contents of the file, by downloading this version, applying the modifier function (or bound method), then uploading the new version. This will succeed as long as no other node publishes a version between the download and the upload. I return a Deferred that fires (with a PublishStatus object) when the update is complete. The modifier callable will be given three arguments: a string (with the old contents), a 'first_time' boolean, and a servermap. As with download_to_data(), the old contents will be from this version, but the modifier can use the servermap to make other decisions (such as refusing to apply the delta if there are multiple parallel versions, or if there is evidence of a newer unrecoverable version). 'first_time' will be True the first time the modifier is called, and False on any subsequent calls. The callable should return a string with the new contents. The callable must be prepared to be called multiple times, and must examine the input string to see if the change that it wants to make is already present in the old version. If it does not need to make any changes, it can either return None, or return its input string. If the modifier raises an exception, it will be returned in the errback. """ # The hierarchy looks like this: # IFilesystemNode # IFileNode # IMutableFileNode # IImmutableFileNode # IDirectoryNode class IFilesystemNode(Interface): def get_cap(): """Return the strongest 'cap instance' associated with this node. (writecap for writeable-mutable files/directories, readcap for immutable or readonly-mutable files/directories). To convert this into a string, call .to_string() on the result.""" def get_readcap(): """Return a readonly cap instance for this node. For immutable or readonly nodes, get_cap() and get_readcap() return the same thing.""" def get_repair_cap(): """Return an IURI instance that can be used to repair the file, or None if this node cannot be repaired (either because it is not distributed, like a LIT file, or because the node does not represent sufficient authority to create a repair-cap, like a read-only RSA mutable file node [which cannot create the correct write-enablers]). """ def get_verify_cap(): """Return an IVerifierURI instance that represents the 'verifiy/refresh capability' for this node. The holder of this capability will be able to renew the lease for this node, protecting it from garbage-collection. They will also be able to ask a server if it holds a share for the file or directory. """ def get_uri(): """Return the URI string corresponding to the strongest cap associated with this node. If this node is read-only, the URI will only offer read-only access. If this node is read-write, the URI will offer read-write access. If you have read-write access to a node and wish to share merely read-only access with others, use get_readonly_uri(). """ def get_write_uri(): """Return the URI string that can be used by others to get write access to this node, if it is writeable. If this is a read-only node, return None.""" def get_readonly_uri(): """Return the URI string that can be used by others to get read-only access to this node. The result is a read-only URI, regardless of whether this node is read-only or read-write. If you have merely read-only access to this node, get_readonly_uri() will return the same thing as get_uri(). """ def get_storage_index(): """Return a string with the (binary) storage index in use on this download. This may be None if there is no storage index (i.e. LIT files and directories).""" def is_readonly(): """Return True if this reference provides mutable access to the given file or directory (i.e. if you can modify it), or False if not. Note that even if this reference is read-only, someone else may hold a read-write reference to it.""" def is_mutable(): """Return True if this file or directory is mutable (by *somebody*, not necessarily you), False if it is is immutable. Note that a file might be mutable overall, but your reference to it might be read-only. On the other hand, all references to an immutable file will be read-only; there are no read-write references to an immutable file. """ def is_unknown(): """Return True if this is an unknown node.""" def is_allowed_in_immutable_directory(): """Return True if this node is allowed as a child of a deep-immutable directory. This is true if either the node is of a known-immutable type, or it is unknown and read-only. """ def raise_error(): """Raise any error associated with this node.""" # XXX: These may not be appropriate outside the context of an IReadable. def get_size(): """Return the length (in bytes) of the data this node represents. For directory nodes, I return the size of the backing store. I return synchronously and do not consult the network, so for mutable objects, I will return the most recently observed size for the object, or None if I don't remember a size. Use get_current_size, which returns a Deferred, if you want more up-to-date information.""" def get_current_size(): """I return a Deferred that fires with the length (in bytes) of the data this node represents. """ class IFileNode(IFilesystemNode): """I am a node that represents a file: a sequence of bytes. I am not a container, like IDirectoryNode.""" def get_best_readable_version(): """Return a Deferred that fires with an IReadable for the 'best' available version of the file. The IReadable provides only read access, even if this filenode was derived from a write cap. For an immutable file, there is only one version. For a mutable file, the 'best' version is the recoverable version with the highest sequence number. If no uncoordinated writes have occurred, and if enough shares are available, then this will be the most recent version that has been uploaded. If no version is recoverable, the Deferred will errback with an UnrecoverableFileError. """ def download_best_version(): """Download the contents of the version that would be returned by get_best_readable_version(). This is equivalent to calling download_to_data() on the IReadable given by that method. I return a Deferred that fires with a byte string when the file has been fully downloaded. To support streaming download, use the 'read' method of IReadable. If no version is recoverable, the Deferred will errback with an UnrecoverableFileError. """ def get_size_of_best_version(): """Find the size of the version that would be returned by get_best_readable_version(). I return a Deferred that fires with an integer. If no version is recoverable, the Deferred will errback with an UnrecoverableFileError. """ class IImmutableFileNode(IFileNode, IReadable): """I am a node representing an immutable file. Immutable files have only one version""" class IMutableFileNode(IFileNode): """I provide access to a 'mutable file', which retains its identity regardless of what contents are put in it. The consistency-vs-availability problem means that there might be multiple versions of a file present in the grid, some of which might be unrecoverable (i.e. have fewer than 'k' shares). These versions are loosely ordered: each has a sequence number and a hash, and any version with seqnum=N was uploaded by a node that has seen at least one version with seqnum=N-1. The 'servermap' (an instance of IMutableFileServerMap) is used to describe the versions that are known to be present in the grid, and which servers are hosting their shares. It is used to represent the 'state of the world', and is used for this purpose by my test-and-set operations. Downloading the contents of the mutable file will also return a servermap. Uploading a new version into the mutable file requires a servermap as input, and the semantics of the replace operation is 'replace the file with my new version if it looks like nobody else has changed the file since my previous download'. Because the file is distributed, this is not a perfect test-and-set operation, but it will do its best. If the replace process sees evidence of a simultaneous write, it will signal an UncoordinatedWriteError, so that the caller can take corrective action. Most readers will want to use the 'best' current version of the file, and should use my 'download_best_version()' method. To unconditionally replace the file, callers should use overwrite(). This is the mode that user-visible mutable files will probably use. To apply some delta to the file, call modify() with a callable modifier function that can apply the modification that you want to make. This is the mode that dirnodes will use, since most directory modification operations can be expressed in terms of deltas to the directory state. Three methods are available for users who need to perform more complex operations. The first is get_servermap(), which returns an up-to-date servermap using a specified mode. The second is download_version(), which downloads a specific version (not necessarily the 'best' one). The third is 'upload', which accepts new contents and a servermap (which must have been updated with MODE_WRITE). The upload method will attempt to apply the new contents as long as no other node has modified the file since the servermap was updated. This might be useful to a caller who wants to merge multiple versions into a single new one. Note that each time the servermap is updated, a specific 'mode' is used, which determines how many peers are queried. To use a servermap for my replace() method, that servermap must have been updated in MODE_WRITE. These modes are defined in allmydata.mutable.common, and consist of MODE_READ, MODE_WRITE, MODE_ANYTHING, and MODE_CHECK. Please look in allmydata/mutable/servermap.py for details about the differences. Mutable files are currently limited in size (about 3.5MB max) and can only be retrieved and updated all-at-once, as a single big string. Future versions of our mutable files will remove this restriction. """ def get_best_mutable_version(): """Return a Deferred that fires with an IMutableFileVersion for the 'best' available version of the file. The best version is the recoverable version with the highest sequence number. If no uncoordinated writes have occurred, and if enough shares are available, then this will be the most recent version that has been uploaded. If no version is recoverable, the Deferred will errback with an UnrecoverableFileError. """ def overwrite(new_contents): """Unconditionally replace the contents of the mutable file with new ones. This simply chains get_servermap(MODE_WRITE) and upload(). This is only appropriate to use when the new contents of the file are completely unrelated to the old ones, and you do not care about other clients' changes. I return a Deferred that fires (with a PublishStatus object) when the update has completed. """ def modify(modifier_cb): """Modify the contents of the file, by downloading the current version, applying the modifier function (or bound method), then uploading the new version. I return a Deferred that fires (with a PublishStatus object) when the update is complete. The modifier callable will be given three arguments: a string (with the old contents), a 'first_time' boolean, and a servermap. As with download_best_version(), the old contents will be from the best recoverable version, but the modifier can use the servermap to make other decisions (such as refusing to apply the delta if there are multiple parallel versions, or if there is evidence of a newer unrecoverable version). 'first_time' will be True the first time the modifier is called, and False on any subsequent calls. The callable should return a string with the new contents. The callable must be prepared to be called multiple times, and must examine the input string to see if the change that it wants to make is already present in the old version. If it does not need to make any changes, it can either return None, or return its input string. If the modifier raises an exception, it will be returned in the errback. """ def get_servermap(mode): """Return a Deferred that fires with an IMutableFileServerMap instance, updated using the given mode. """ def download_version(servermap, version): """Download a specific version of the file, using the servermap as a guide to where the shares are located. I return a Deferred that fires with the requested contents, or errbacks with UnrecoverableFileError. Note that a servermap that was updated with MODE_ANYTHING or MODE_READ may not know about shares for all versions (those modes stop querying servers as soon as they can fulfil their goals), so you may want to use MODE_CHECK (which checks everything) to get increased visibility. """ def upload(new_contents, servermap): """Replace the contents of the file with new ones. This requires a servermap that was previously updated with MODE_WRITE. I attempt to provide test-and-set semantics, in that I will avoid modifying any share that is different than the version I saw in the servermap. However, if another node is writing to the file at the same time as me, I may manage to update some shares while they update others. If I see any evidence of this, I will signal UncoordinatedWriteError, and the file will be left in an inconsistent state (possibly the version you provided, possibly the old version, possibly somebody else's version, and possibly a mix of shares from all of these). The recommended response to UncoordinatedWriteError is to either return it to the caller (since they failed to coordinate their writes), or to attempt some sort of recovery. It may be sufficient to wait a random interval (with exponential backoff) and repeat your operation. If I do not signal UncoordinatedWriteError, then I was able to write the new version without incident. I return a Deferred that fires (with a PublishStatus object) when the publish has completed. I will update the servermap in-place with the location of all new shares. """ def get_writekey(): """Return this filenode's writekey, or None if the node does not have write-capability. This may be used to assist with data structures that need to make certain data available only to writers, such as the read-write child caps in dirnodes. The recommended process is to have reader-visible data be submitted to the filenode in the clear (where it will be encrypted by the filenode using the readkey), but encrypt writer-visible data using this writekey. """ def get_version(): """Returns the mutable file protocol version.""" class NotEnoughSharesError(Exception): """Download was unable to get enough shares""" class NoSharesError(Exception): """Download was unable to get any shares at all.""" class DownloadStopped(Exception): pass class UploadUnhappinessError(Exception): """Upload was unable to satisfy 'servers_of_happiness'""" class UnableToFetchCriticalDownloadDataError(Exception): """I was unable to fetch some piece of critical data that is supposed to be identically present in all shares.""" class NoServersError(Exception): """Upload wasn't given any servers to work with, usually indicating a network or Introducer problem.""" class ExistingChildError(Exception): """A directory node was asked to add or replace a child that already exists, and overwrite= was set to False.""" class NoSuchChildError(Exception): """A directory node was asked to fetch a child that does not exist.""" def __str__(self): # avoid UnicodeEncodeErrors when converting to str return self.__repr__() class ChildOfWrongTypeError(Exception): """An operation was attempted on a child of the wrong type (file or directory).""" class IDirectoryNode(IFilesystemNode): """I represent a filesystem node that is a container, with a name-to-child mapping, holding the tahoe equivalent of a directory. All child names are unicode strings, and all children are some sort of IFilesystemNode (a file, subdirectory, or unknown node). """ def get_uri(): """ The dirnode ('1') URI returned by this method can be used in set_uri() on a different directory ('2') to 'mount' a reference to this directory ('1') under the other ('2'). This URI is just a string, so it can be passed around through email or other out-of-band protocol. """ def get_readonly_uri(): """ The dirnode ('1') URI returned by this method can be used in set_uri() on a different directory ('2') to 'mount' a reference to this directory ('1') under the other ('2'). This URI is just a string, so it can be passed around through email or other out-of-band protocol. """ def list(): """I return a Deferred that fires with a dictionary mapping child name (a unicode string) to (node, metadata_dict) tuples, in which 'node' is an IFilesystemNode and 'metadata_dict' is a dictionary of metadata.""" def has_child(name): """I return a Deferred that fires with a boolean, True if there exists a child of the given name, False if not. The child name must be a unicode string.""" def get(name): """I return a Deferred that fires with a specific named child node, which is an IFilesystemNode. The child name must be a unicode string. I raise NoSuchChildError if I do not have a child by that name.""" def get_metadata_for(name): """I return a Deferred that fires with the metadata dictionary for a specific named child node. The child name must be a unicode string. This metadata is stored in the *edge*, not in the child, so it is attached to the parent dirnode rather than the child node. I raise NoSuchChildError if I do not have a child by that name.""" def set_metadata_for(name, metadata): """I replace any existing metadata for the named child with the new metadata. The child name must be a unicode string. This metadata is stored in the *edge*, not in the child, so it is attached to the parent dirnode rather than the child node. I return a Deferred (that fires with this dirnode) when the operation is complete. I raise NoSuchChildError if I do not have a child by that name.""" def get_child_at_path(path): """Transform a child path into an IFilesystemNode. I perform a recursive series of 'get' operations to find the named descendant node. I return a Deferred that fires with the node, or errbacks with NoSuchChildError if the node could not be found. The path can be either a single string (slash-separated) or a list of path-name elements. All elements must be unicode strings. """ def get_child_and_metadata_at_path(path): """Transform a child path into an IFilesystemNode and metadata. I am like get_child_at_path(), but my Deferred fires with a tuple of (node, metadata). The metadata comes from the last edge. If the path is empty, the metadata will be an empty dictionary. """ def set_uri(name, writecap, readcap=None, metadata=None, overwrite=True): """I add a child (by writecap+readcap) at the specific name. I return a Deferred that fires when the operation finishes. If overwrite= is True, I will replace any existing child of the same name, otherwise an existing child will cause me to return ExistingChildError. The child name must be a unicode string. The child caps could be for a file, or for a directory. If you have both the writecap and readcap, you should provide both arguments. If you have only one cap and don't know whether it is read-only, provide it as the writecap argument and leave the readcap as None. If you have only one cap that is known to be read-only, provide it as the readcap argument and leave the writecap as None. The filecaps are typically obtained from an IFilesystemNode with get_uri() and get_readonly_uri(). If metadata= is provided, I will use it as the metadata for the named edge. This will replace any existing metadata. If metadata= is left as the default value of None, I will set ['mtime'] to the current time, and I will set ['ctime'] to the current time if there was not already a child by this name present. This roughly matches the ctime/mtime semantics of traditional filesystems. See the "About the metadata" section of webapi.txt for futher information. If this directory node is read-only, the Deferred will errback with a NotWriteableError.""" def set_children(entries, overwrite=True): """Add multiple children (by writecap+readcap) to a directory node. Takes a dictionary, with childname as keys and (writecap, readcap) tuples (or (writecap, readcap, metadata) triples) as values. Returns a Deferred that fires (with this dirnode) when the operation finishes. This is equivalent to calling set_uri() multiple times, but is much more efficient. All child names must be unicode strings. """ def set_node(name, child, metadata=None, overwrite=True): """I add a child at the specific name. I return a Deferred that fires when the operation finishes. This Deferred will fire with the child node that was just added. I will replace any existing child of the same name. The child name must be a unicode string. The 'child' instance must be an instance providing IFilesystemNode. If metadata= is provided, I will use it as the metadata for the named edge. This will replace any existing metadata. If metadata= is left as the default value of None, I will set ['mtime'] to the current time, and I will set ['ctime'] to the current time if there was not already a child by this name present. This roughly matches the ctime/mtime semantics of traditional filesystems. See the "About the metadata" section of webapi.txt for futher information. If this directory node is read-only, the Deferred will errback with a NotWriteableError.""" def set_nodes(entries, overwrite=True): """Add multiple children to a directory node. Takes a dict mapping unicode childname to (child_node, metdata) tuples. If metdata=None, the original metadata is left unmodified. Returns a Deferred that fires (with this dirnode) when the operation finishes. This is equivalent to calling set_node() multiple times, but is much more efficient.""" def add_file(name, uploadable, metadata=None, overwrite=True): """I upload a file (using the given IUploadable), then attach the resulting ImmutableFileNode to the directory at the given name. I set metadata the same way as set_uri and set_node. The child name must be a unicode string. I return a Deferred that fires (with the IFileNode of the uploaded file) when the operation completes.""" def delete(name, must_exist=True, must_be_directory=False, must_be_file=False): """I remove the child at the specific name. I return a Deferred that fires when the operation finishes. The child name must be a unicode string. If must_exist is True and I do not have a child by that name, I raise NoSuchChildError. If must_be_directory is True and the child is a file, or if must_be_file is True and the child is a directory, I raise ChildOfWrongTypeError.""" def create_subdirectory(name, initial_children=None, overwrite=True, mutable=True, mutable_version=None, metadata=None): """I create and attach a directory at the given name. The new directory can be empty, or it can be populated with children according to 'initial_children', which takes a dictionary in the same format as set_nodes (i.e. mapping unicode child name to (childnode, metadata) tuples). The child name must be a unicode string. I return a Deferred that fires (with the new directory node) when the operation finishes.""" def move_child_to(current_child_name, new_parent, new_child_name=None, overwrite=True): """I take one of my children and move them to a new parent. The child is referenced by name. On the new parent, the child will live under 'new_child_name', which defaults to 'current_child_name'. TODO: what should we do about metadata? I return a Deferred that fires when the operation finishes. The child name must be a unicode string. I raise NoSuchChildError if I do not have a child by that name.""" def build_manifest(): """I generate a table of everything reachable from this directory. I also compute deep-stats as described below. I return a Monitor. The Monitor's results will be a dictionary with four elements: res['manifest']: a list of (path, cap) tuples for all nodes (directories and files) reachable from this one. 'path' will be a tuple of unicode strings. The origin dirnode will be represented by an empty path tuple. res['verifycaps']: a list of (printable) verifycap strings, one for each reachable non-LIT node. This is a set: it will contain no duplicates. res['storage-index']: a list of (base32) storage index strings, one for each reachable non-LIT node. This is a set: it will contain no duplicates. res['stats']: a dictionary, the same that is generated by start_deep_stats() below. The Monitor will also have an .origin_si attribute with the (binary) storage index of the starting point. """ def start_deep_stats(): """Return a Monitor, examining all nodes (directories and files) reachable from this one. The Monitor's results will be a dictionary with the following keys:: count-immutable-files: count of how many CHK files are in the set count-mutable-files: same, for mutable files (does not include directories) count-literal-files: same, for LIT files count-files: sum of the above three count-directories: count of directories size-immutable-files: total bytes for all CHK files in the set size-mutable-files (TODO): same, for current version of all mutable files, does not include directories size-literal-files: same, for LIT files size-directories: size of mutable files used by directories largest-directory: number of bytes in the largest directory largest-directory-children: number of children in the largest directory largest-immutable-file: number of bytes in the largest CHK file size-mutable-files is not yet implemented, because it would involve even more queries than deep_stats does. The Monitor will also have an .origin_si attribute with the (binary) storage index of the starting point. This operation will visit every directory node underneath this one, and can take a long time to run. On a typical workstation with good bandwidth, this can examine roughly 15 directories per second (and takes several minutes of 100% CPU for ~1700 directories). """ class ICodecEncoder(Interface): def set_params(data_size, required_shares, max_shares): """Set up the parameters of this encoder. This prepares the encoder to perform an operation that converts a single block of data into a number of shares, such that a future ICodecDecoder can use a subset of these shares to recover the original data. This operation is invoked by calling encode(). Once the encoding parameters are set up, the encode operation can be invoked multiple times. set_params() prepares the encoder to accept blocks of input data that are exactly 'data_size' bytes in length. The encoder will be prepared to produce 'max_shares' shares for each encode() operation (although see the 'desired_share_ids' to use less CPU). The encoding math will be chosen such that the decoder can get by with as few as 'required_shares' of these shares and still reproduce the original data. For example, set_params(1000, 5, 5) offers no redundancy at all, whereas set_params(1000, 1, 10) provides 10x redundancy. Numerical Restrictions: 'data_size' is required to be an integral multiple of 'required_shares'. In general, the caller should choose required_shares and max_shares based upon their reliability requirements and the number of peers available (the total storage space used is roughly equal to max_shares*data_size/required_shares), then choose data_size to achieve the memory footprint desired (larger data_size means more efficient operation, smaller data_size means smaller memory footprint). In addition, 'max_shares' must be equal to or greater than 'required_shares'. Of course, setting them to be equal causes encode() to degenerate into a particularly slow form of the 'split' utility. See encode() for more details about how these parameters are used. set_params() must be called before any other ICodecEncoder methods may be invoked. """ def get_params(): """Return the 3-tuple of data_size, required_shares, max_shares""" def get_encoder_type(): """Return a short string that describes the type of this encoder. There is required to be a global table of encoder classes. This method returns an index into this table; the value at this index is an encoder class, and this encoder is an instance of that class. """ def get_block_size(): """Return the length of the shares that encode() will produce. """ def encode_proposal(data, desired_share_ids=None): """Encode some data. 'data' must be a string (or other buffer object), and len(data) must be equal to the 'data_size' value passed earlier to set_params(). This will return a Deferred that will fire with two lists. The first is a list of shares, each of which is a string (or other buffer object) such that len(share) is the same as what get_share_size() returned earlier. The second is a list of shareids, in which each is an integer. The lengths of the two lists will always be equal to each other. The user should take care to keep each share closely associated with its shareid, as one is useless without the other. The length of this output list will normally be the same as the value provided to the 'max_shares' parameter of set_params(). This may be different if 'desired_share_ids' is provided. 'desired_share_ids', if provided, is required to be a sequence of ints, each of which is required to be >= 0 and < max_shares. If not provided, encode() will produce 'max_shares' shares, as if 'desired_share_ids' were set to range(max_shares). You might use this if you initially thought you were going to use 10 peers, started encoding, and then two of the peers dropped out: you could use desired_share_ids= to skip the work (both memory and CPU) of producing shares for the peers that are no longer available. """ def encode(inshares, desired_share_ids=None): """Encode some data. This may be called multiple times. Each call is independent. inshares is a sequence of length required_shares, containing buffers (i.e. strings), where each buffer contains the next contiguous non-overlapping segment of the input data. Each buffer is required to be the same length, and the sum of the lengths of the buffers is required to be exactly the data_size promised by set_params(). (This implies that the data has to be padded before being passed to encode(), unless of course it already happens to be an even multiple of required_shares in length.) Note: the requirement to break up your data into 'required_shares' chunks of exactly the right length before calling encode() is surprising from point of view of a user who doesn't know how FEC works. It feels like an implementation detail that has leaked outside the abstraction barrier. Is there a use case in which the data to be encoded might already be available in pre-segmented chunks, such that it is faster or less work to make encode() take a list rather than splitting a single string? Yes, there is: suppose you are uploading a file with K=64, N=128, segsize=262,144. Then each in-share will be of size 4096. If you use this .encode() API then your code could first read each successive 4096-byte chunk from the file and store each one in a Python string and store each such Python string in a Python list. Then you could call .encode(), passing that list as "inshares". The encoder would generate the other 64 "secondary shares" and return to you a new list containing references to the same 64 Python strings that you passed in (as the primary shares) plus references to the new 64 Python strings. (You could even imagine that your code could use readv() so that the operating system can arrange to get all of those bytes copied from the file into the Python list of Python strings as efficiently as possible instead of having a loop written in C or in Python to copy the next part of the file into the next string.) On the other hand if you instead use the .encode_proposal() API (above), then your code can first read in all of the 262,144 bytes of the segment from the file into a Python string, then call .encode_proposal() passing the segment data as the "data" argument. The encoder would basically first split the "data" argument into a list of 64 in-shares of 4096 byte each, and then do the same thing that .encode() does. So this would result in a little bit more copying of data and a little bit higher of a "maximum memory usage" during the process, although it might or might not make a practical difference for our current use cases. Note that "inshares" is a strange name for the parameter if you think of the parameter as being just for feeding in data to the codec. It makes more sense if you think of the result of this encoding as being the set of shares from inshares plus an extra set of "secondary shares" (or "check shares"). It is a surprising name! If the API is going to be surprising then the name should be surprising. If we switch to encode_proposal() above then we should also switch to an unsurprising name. 'desired_share_ids', if provided, is required to be a sequence of ints, each of which is required to be >= 0 and < max_shares. If not provided, encode() will produce 'max_shares' shares, as if 'desired_share_ids' were set to range(max_shares). You might use this if you initially thought you were going to use 10 peers, started encoding, and then two of the peers dropped out: you could use desired_share_ids= to skip the work (both memory and CPU) of producing shares for the peers that are no longer available. For each call, encode() will return a Deferred that fires with two lists, one containing shares and the other containing the shareids. The get_share_size() method can be used to determine the length of the share strings returned by encode(). Each shareid is a small integer, exactly as passed into 'desired_share_ids' (or range(max_shares), if desired_share_ids was not provided). The shares and their corresponding shareids are required to be kept together during storage and retrieval. Specifically, the share data is useless by itself: the decoder needs to be told which share is which by providing it with both the shareid and the actual share data. This function will allocate an amount of memory roughly equal to:: (max_shares - required_shares) * get_share_size() When combined with the memory that the caller must allocate to provide the input data, this leads to a memory footprint roughly equal to the size of the resulting encoded shares (i.e. the expansion factor times the size of the input segment). """ # rejected ideas: # # returning a list of (shareidN,shareN) tuples instead of a pair of # lists (shareids..,shares..). Brian thought the tuples would # encourage users to keep the share and shareid together throughout # later processing, Zooko pointed out that the code to iterate # through two lists is not really more complicated than using a list # of tuples and there's also a performance improvement # # having 'data_size' not required to be an integral multiple of # 'required_shares'. Doing this would require encode() to perform # padding internally, and we'd prefer to have any padding be done # explicitly by the caller. Yes, it is an abstraction leak, but # hopefully not an onerous one. class ICodecDecoder(Interface): def set_params(data_size, required_shares, max_shares): """Set the params. They have to be exactly the same ones that were used for encoding.""" def get_needed_shares(): """Return the number of shares needed to reconstruct the data. set_params() is required to be called before this.""" def decode(some_shares, their_shareids): """Decode a partial list of shares into data. 'some_shares' is required to be a sequence of buffers of sharedata, a subset of the shares returned by ICodecEncode.encode(). Each share is required to be of the same length. The i'th element of their_shareids is required to be the shareid of the i'th buffer in some_shares. This returns a Deferred that fires with a sequence of buffers. This sequence will contain all of the segments of the original data, in order. The sum of the lengths of all of the buffers will be the 'data_size' value passed into the original ICodecEncode.set_params() call. To get back the single original input block of data, use ''.join(output_buffers), or you may wish to simply write them in order to an output file. Note that some of the elements in the result sequence may be references to the elements of the some_shares input sequence. In particular, this means that if those share objects are mutable (e.g. arrays) and if they are changed, then both the input (the 'some_shares' parameter) and the output (the value given when the deferred is triggered) will change. The length of 'some_shares' is required to be exactly the value of 'required_shares' passed into the original ICodecEncode.set_params() call. """ class IEncoder(Interface): """I take an object that provides IEncryptedUploadable, which provides encrypted data, and a list of shareholders. I then encode, hash, and deliver shares to those shareholders. I will compute all the necessary Merkle hash trees that are necessary to validate the crypttext that eventually comes back from the shareholders. I provide the URI Extension Block Hash, and the encoding parameters, both of which must be included in the URI. I do not choose shareholders, that is left to the IUploader. I must be given a dict of RemoteReferences to storage buckets that are ready and willing to receive data. """ def set_encrypted_uploadable(u): """Provide a source of encrypted upload data. 'u' must implement IEncryptedUploadable. When this is called, the IEncryptedUploadable will be queried for its length and the storage_index that should be used. This returns a Deferred that fires with this Encoder instance. This must be performed before start() can be called. """ def get_param(name): """Return an encoding parameter, by name. 'storage_index': return a string with the (16-byte truncated SHA-256 hash) storage index to which these shares should be pushed. 'share_counts': return a tuple describing how many shares are used: (needed_shares, servers_of_happiness, total_shares) 'num_segments': return an int with the number of segments that will be encoded. 'segment_size': return an int with the size of each segment. 'block_size': return the size of the individual blocks that will be delivered to a shareholder's put_block() method. By knowing this, the shareholder will be able to keep all blocks in a single file and still provide random access when reading them. # TODO: can we avoid exposing this? 'share_size': an int with the size of the data that will be stored on each shareholder. This is aggregate amount of data that will be sent to the shareholder, summed over all the put_block() calls I will ever make. It is useful to determine this size before asking potential shareholders whether they will grant a lease or not, since their answers will depend upon how much space we need. TODO: this might also include some amount of overhead, like the size of all the hashes. We need to decide whether this is useful or not. 'serialized_params': a string with a concise description of the codec name and its parameters. This may be passed into the IUploadable to let it make sure that the same file encoded with different parameters will result in different storage indexes. Once this is called, set_size() and set_params() may not be called. """ def set_shareholders(shareholders, servermap): """Tell the encoder where to put the encoded shares. 'shareholders' must be a dictionary that maps share number (an integer ranging from 0 to n-1) to an instance that provides IStorageBucketWriter. 'servermap' is a dictionary that maps share number (as defined above) to a set of peerids. This must be performed before start() can be called.""" def start(): """Begin the encode/upload process. This involves reading encrypted data from the IEncryptedUploadable, encoding it, uploading the shares to the shareholders, then sending the hash trees. set_encrypted_uploadable() and set_shareholders() must be called before this can be invoked. This returns a Deferred that fires with a verify cap when the upload process is complete. The verifycap, plus the encryption key, is sufficient to construct the read cap. """ class IDecoder(Interface): """I take a list of shareholders and some setup information, then download, validate, decode, and decrypt data from them, writing the results to an output file. I do not locate the shareholders, that is left to the IDownloader. I must be given a dict of RemoteReferences to storage buckets that are ready to send data. """ def setup(outfile): """I take a file-like object (providing write and close) to which all the plaintext data will be written. TODO: producer/consumer . Maybe write() should return a Deferred that indicates when it will accept more data? But probably having the IDecoder be a producer is easier to glue to IConsumer pieces. """ def set_shareholders(shareholders): """I take a dictionary that maps share identifiers (small integers) to RemoteReferences that provide RIBucketReader. This must be called before start().""" def start(): """I start the download. This process involves retrieving data and hash chains from the shareholders, using the hashes to validate the data, decoding the shares into segments, decrypting the segments, then writing the resulting plaintext to the output file. I return a Deferred that will fire (with self) when the download is complete. """ class IDownloadTarget(Interface): # Note that if the IDownloadTarget is also an IConsumer, the downloader # will register itself as a producer. This allows the target to invoke # downloader.pauseProducing, resumeProducing, and stopProducing. def open(size): """Called before any calls to write() or close(). If an error occurs before any data is available, fail() may be called without a previous call to open(). 'size' is the length of the file being downloaded, in bytes.""" def write(data): """Output some data to the target.""" def close(): """Inform the target that there is no more data to be written.""" def fail(why): """fail() is called to indicate that the download has failed. 'why' is a Failure object indicating what went wrong. No further methods will be invoked on the IDownloadTarget after fail().""" def register_canceller(cb): """The CiphertextDownloader uses this to register a no-argument function that the target can call to cancel the download. Once this canceller is invoked, no further calls to write() or close() will be made.""" def finish(): """When the CiphertextDownloader is done, this finish() function will be called. Whatever it returns will be returned to the invoker of Downloader.download. """ class IDownloader(Interface): def download(uri, target): """Perform a CHK download, sending the data to the given target. 'target' must provide IDownloadTarget. Returns a Deferred that fires (with the results of target.finish) when the download is finished, or errbacks if something went wrong.""" class IEncryptedUploadable(Interface): def set_upload_status(upload_status): """Provide an IUploadStatus object that should be filled with status information. The IEncryptedUploadable is responsible for setting key-determination progress ('chk'), size, storage_index, and ciphertext-fetch progress. It may delegate some of this responsibility to others, in particular to the IUploadable.""" def get_size(): """This behaves just like IUploadable.get_size().""" def get_all_encoding_parameters(): """Return a Deferred that fires with a tuple of (k,happy,n,segment_size). The segment_size will be used as-is, and must match the following constraints: it must be a multiple of k, and it shouldn't be unreasonably larger than the file size (if segment_size is larger than filesize, the difference must be stored as padding). This usually passes through to the IUploadable method of the same name. The encoder strictly obeys the values returned by this method. To make an upload use non-default encoding parameters, you must arrange to control the values that this method returns. """ def get_storage_index(): """Return a Deferred that fires with a 16-byte storage index. """ def read_encrypted(length, hash_only): """This behaves just like IUploadable.read(), but returns crypttext instead of plaintext. If hash_only is True, then this discards the data (and returns an empty list); this improves efficiency when resuming an interrupted upload (where we need to compute the plaintext hashes, but don't need the redundant encrypted data).""" def close(): """Just like IUploadable.close().""" class IUploadable(Interface): def set_upload_status(upload_status): """Provide an IUploadStatus object that should be filled with status information. The IUploadable is responsible for setting key-determination progress ('chk').""" def set_default_encoding_parameters(params): """Set the default encoding parameters, which must be a dict mapping strings to ints. The meaningful keys are 'k', 'happy', 'n', and 'max_segment_size'. These might have an influence on the final encoding parameters returned by get_all_encoding_parameters(), if the Uploadable doesn't have more specific preferences. This call is optional: if it is not used, the Uploadable will use some built-in defaults. If used, this method must be called before any other IUploadable methods to have any effect. """ def get_size(): """Return a Deferred that will fire with the length of the data to be uploaded, in bytes. This will be called before the data is actually used, to compute encoding parameters. """ def get_all_encoding_parameters(): """Return a Deferred that fires with a tuple of (k,happy,n,segment_size). The segment_size will be used as-is, and must match the following constraints: it must be a multiple of k, and it shouldn't be unreasonably larger than the file size (if segment_size is larger than filesize, the difference must be stored as padding). The relative values of k and n allow some IUploadables to request better redundancy than others (in exchange for consuming more space in the grid). Larger values of segment_size reduce hash overhead, while smaller values reduce memory footprint and cause data to be delivered in smaller pieces (which may provide a smoother and more predictable download experience). The encoder strictly obeys the values returned by this method. To make an upload use non-default encoding parameters, you must arrange to control the values that this method returns. One way to influence them may be to call set_encoding_parameters() before calling get_all_encoding_parameters(). """ def get_encryption_key(): """Return a Deferred that fires with a 16-byte AES key. This key will be used to encrypt the data. The key will also be hashed to derive the StorageIndex. Uploadables that want to achieve convergence should hash their file contents and the serialized_encoding_parameters to form the key (which of course requires a full pass over the data). Uploadables can use the upload.ConvergentUploadMixin class to achieve this automatically. Uploadables that do not care about convergence (or do not wish to make multiple passes over the data) can simply return a strongly-random 16 byte string. get_encryption_key() may be called multiple times: the IUploadable is required to return the same value each time. """ def read(length): """Return a Deferred that fires with a list of strings (perhaps with only a single element) that, when concatenated together, contain the next 'length' bytes of data. If EOF is near, this may provide fewer than 'length' bytes. The total number of bytes provided by read() before it signals EOF must equal the size provided by get_size(). If the data must be acquired through multiple internal read operations, returning a list instead of a single string may help to reduce string copies. However, the length of the concatenated strings must equal the amount of data requested, unless EOF is encountered. Long reads, or short reads without EOF, are not allowed. read() should return the same amount of data as a local disk file read, just in a different shape and asynchronously. 'length' will typically be equal to (min(get_size(),1MB)/req_shares), so a 10kB file means length=3kB, 100kB file means length=30kB, and >=1MB file means length=300kB. This method provides for a single full pass through the data. Later use cases may desire multiple passes or access to only parts of the data (such as a mutable file making small edits-in-place). This API will be expanded once those use cases are better understood. """ def close(): """The upload is finished, and whatever filehandle was in use may be closed.""" class IMutableUploadable(Interface): """ I represent content that is due to be uploaded to a mutable filecap. """ # This is somewhat simpler than the IUploadable interface above # because mutable files do not need to be concerned with possibly # generating a CHK, nor with per-file keys. It is a subset of the # methods in IUploadable, though, so we could just as well implement # the mutable uploadables as IUploadables that don't happen to use # those methods (with the understanding that the unused methods will # never be called on such objects) def get_size(): """ Returns a Deferred that fires with the size of the content held by the uploadable. """ def read(length): """ Returns a list of strings that, when concatenated, are the next length bytes of the file, or fewer if there are fewer bytes between the current location and the end of the file. """ def close(): """ The process that used the Uploadable is finished using it, so the uploadable may be closed. """ class IUploadResults(Interface): """I am returned by immutable upload() methods and contain the results of the upload. Note that some of my methods return empty values (0 or an empty dict) when called for non-distributed LIT files.""" def get_file_size(): """Return the file size, in bytes.""" def get_uri(): """Return the (string) URI of the object uploaded, a CHK readcap.""" def get_ciphertext_fetched(): """Return the number of bytes fetched by the helpe for this upload, or 0 if the helper did not need to fetch any bytes (or if there was no helper).""" def get_preexisting_shares(): """Return the number of shares that were already present in the grid.""" def get_pushed_shares(): """Return the number of shares that were uploaded.""" def get_sharemap(): """Return a dict mapping share identifier to set of IServer instances. This indicates which servers were given which shares. For immutable files, the shareid is an integer (the share number, from 0 to N-1). For mutable files, it is a string of the form 'seq%d-%s-sh%d', containing the sequence number, the roothash, and the share number.""" def get_servermap(): """Return dict mapping IServer instance to a set of share numbers.""" def get_timings(): """Return dict of timing information, mapping name to seconds. All times are floats: total : total upload time, start to finish storage_index : time to compute the storage index peer_selection : time to decide which peers will be used contacting_helper : initial helper query to upload/no-upload decision helper_total : initial helper query to helper finished pushing cumulative_fetch : helper waiting for ciphertext requests total_fetch : helper start to last ciphertext response cumulative_encoding : just time spent in zfec cumulative_sending : just time spent waiting for storage servers hashes_and_close : last segment push to shareholder close total_encode_and_push : first encode to shareholder close """ def get_uri_extension_data(): """Return the dict of UEB data created for this file.""" def get_verifycapstr(): """Return the (string) verify-cap URI for the uploaded object.""" class IDownloadResults(Interface): """I am created internally by download() methods. I contain a number of public attributes that contain details about the download process.:: .file_size : the size of the file, in bytes .servers_used : set of server peerids that were used during download .server_problems : dict mapping server peerid to a problem string. Only servers that had problems (bad hashes, disconnects) are listed here. .servermap : dict mapping server peerid to a set of share numbers. Only servers that had any shares are listed here. .timings : dict of timing information, mapping name to seconds (float) peer_selection : time to ask servers about shares servers_peer_selection : dict of peerid to DYHB-query time uri_extension : time to fetch a copy of the URI extension block hashtrees : time to fetch the hash trees segments : time to fetch, decode, and deliver segments cumulative_fetch : time spent waiting for storage servers cumulative_decode : just time spent in zfec cumulative_decrypt : just time spent in decryption total : total download time, start to finish fetch_per_server : dict of server to list of per-segment fetch times """ class IUploader(Interface): def upload(uploadable): """Upload the file. 'uploadable' must impement IUploadable. This returns a Deferred that fires with an IUploadResults instance, from which the URI of the file can be obtained as results.uri .""" class ICheckable(Interface): def check(monitor, verify=False, add_lease=False): """Check up on my health, optionally repairing any problems. This returns a Deferred that fires with an instance that provides ICheckResults, or None if the object is non-distributed (i.e. LIT files). The monitor will be checked periodically to see if the operation has been cancelled. If so, no new queries will be sent, and the Deferred will fire (with a OperationCancelledError) immediately. Filenodes and dirnodes (which provide IFilesystemNode) are also checkable. Instances that represent verifier-caps will be checkable but not downloadable. Some objects (like LIT files) do not actually live in the grid, and their checkers return None (non-distributed files are always healthy). If verify=False, a relatively lightweight check will be performed: I will ask all servers if they have a share for me, and I will believe whatever they say. If there are at least N distinct shares on the grid, my results will indicate r.is_healthy()==True. This requires a roundtrip to each server, but does not transfer very much data, so the network bandwidth is fairly low. If verify=True, a more resource-intensive check will be performed: every share will be downloaded, and the hashes will be validated on every bit. I will ignore any shares that failed their hash checks. If there are at least N distinct valid shares on the grid, my results will indicate r.is_healthy()==True. This requires N/k times as much download bandwidth (and server disk IO) as a regular download. If a storage server is holding a corrupt share, or is experiencing memory failures during retrieval, or is malicious or buggy, then verification will detect the problem, but checking will not. If add_lease=True, I will ensure that an up-to-date lease is present on each share. The lease secrets will be derived from by node secret (in BASEDIR/private/secret), so either I will add a new lease to the share, or I will merely renew the lease that I already had. In a future version of the storage-server protocol (once Accounting has been implemented), there may be additional options here to define the kind of lease that is obtained (which account number to claim, etc). TODO: any problems seen during checking will be reported to the health-manager.furl, a centralized object that is responsible for figuring out why files are unhealthy so corrective action can be taken. """ def check_and_repair(monitor, verify=False, add_lease=False): """Like check(), but if the file/directory is not healthy, attempt to repair the damage. Any non-healthy result will cause an immediate repair operation, to generate and upload new shares. After repair, the file will be as healthy as we can make it. Details about what sort of repair is done will be put in the check-and-repair results. The Deferred will not fire until the repair is complete. This returns a Deferred that fires with an instance of ICheckAndRepairResults.""" class IDeepCheckable(Interface): def start_deep_check(verify=False, add_lease=False): """Check upon the health of me and everything I can reach. This is a recursive form of check(), useable only on dirnodes. I return a Monitor, with results that are an IDeepCheckResults object. TODO: If any of the directories I traverse are unrecoverable, the Monitor will report failure. If any of the files I check upon are unrecoverable, those problems will be reported in the IDeepCheckResults as usual, and the Monitor will not report a failure. """ def start_deep_check_and_repair(verify=False, add_lease=False): """Check upon the health of me and everything I can reach. Repair anything that isn't healthy. This is a recursive form of check_and_repair(), useable only on dirnodes. I return a Monitor, with results that are an IDeepCheckAndRepairResults object. TODO: If any of the directories I traverse are unrecoverable, the Monitor will report failure. If any of the files I check upon are unrecoverable, those problems will be reported in the IDeepCheckResults as usual, and the Monitor will not report a failure. """ class ICheckResults(Interface): """I contain the detailed results of a check/verify operation. """ def get_storage_index(): """Return a string with the (binary) storage index.""" def get_storage_index_string(): """Return a string with the (printable) abbreviated storage index.""" def get_uri(): """Return the (string) URI of the object that was checked.""" def is_healthy(): """Return a boolean, True if the file/dir is fully healthy, False if it is damaged in any way. Non-distributed LIT files always return True.""" def is_recoverable(): """Return a boolean, True if the file/dir can be recovered, False if not. Unrecoverable files are obviously unhealthy. Non-distributed LIT files always return True.""" # the following methods all return None for non-distributed LIT files def get_happiness(): """Return the happiness count of the file.""" def get_encoding_needed(): """Return 'k', the number of shares required for recovery.""" def get_encoding_expected(): """Return 'N', the number of total shares generated.""" def get_share_counter_good(): """Return the number of distinct good shares that were found. For mutable files, this counts shares for the 'best' version.""" def get_share_counter_wrong(): """For mutable files, return the number of shares for versions other than the 'best' one (which is defined as being the recoverable version with the highest sequence number, then the highest roothash). These are either leftover shares from an older version (perhaps on a server that was offline when an update occurred), shares from an unrecoverable newer version, or shares from an alternate current version that results from an uncoordinated write collision. For a healthy file, this will equal 0. For immutable files, this will always equal 0.""" def get_corrupt_shares(): """Return a list of 'share locators', one for each share that was found to be corrupt (integrity failure). Each share locator is a list of (IServer, storage_index, sharenum).""" def get_incompatible_shares(): """Return a list of 'share locators', one for each share that was found to be of an unknown format. Each share locator is a list of (IServer, storage_index, sharenum).""" def get_servers_responding(): """Return a list of IServer objects, one for each server that responded to the share query (even if they said they didn't have shares, and even if they said they did have shares but then didn't send them when asked, or dropped the connection, or returned a Failure, and even if they said they did have shares and sent incorrect ones when asked)""" def get_host_counter_good_shares(): """Return the number of distinct storage servers with good shares. If this number is less than get_share_counters()[good], then some shares are doubled up, increasing the correlation of failures. This indicates that one or more shares should be moved to an otherwise unused server, if one is available. """ def get_version_counter_recoverable(): """Return the number of recoverable versions of the file. For a healthy file, this will equal 1.""" def get_version_counter_unrecoverable(): """Return the number of unrecoverable versions of the file. For a healthy file, this will be 0.""" def get_sharemap(): """Return a dict mapping share identifier to list of IServer objects. This indicates which servers are holding which shares. For immutable files, the shareid is an integer (the share number, from 0 to N-1). For mutable files, it is a string of the form 'seq%d-%s-sh%d', containing the sequence number, the roothash, and the share number.""" def get_summary(): """Return a string with a brief (one-line) summary of the results.""" def get_report(): """Return a list of strings with more detailed results.""" class ICheckAndRepairResults(Interface): """I contain the detailed results of a check/verify/repair operation. The IFilesystemNode.check()/verify()/repair() methods all return instances that provide ICheckAndRepairResults. """ def get_storage_index(): """Return a string with the (binary) storage index.""" def get_storage_index_string(): """Return a string with the (printable) abbreviated storage index.""" def get_repair_attempted(): """Return a boolean, True if a repair was attempted. We might not attempt to repair the file because it was healthy, or healthy enough (i.e. some shares were missing but not enough to exceed some threshold), or because we don't know how to repair this object.""" def get_repair_successful(): """Return a boolean, True if repair was attempted and the file/dir was fully healthy afterwards. False if no repair was attempted or if a repair attempt failed.""" def get_pre_repair_results(): """Return an ICheckResults instance that describes the state of the file/dir before any repair was attempted.""" def get_post_repair_results(): """Return an ICheckResults instance that describes the state of the file/dir after any repair was attempted. If no repair was attempted, the pre-repair and post-repair results will be identical.""" class IDeepCheckResults(Interface): """I contain the results of a deep-check operation. This is returned by a call to ICheckable.deep_check(). """ def get_root_storage_index_string(): """Return the storage index (abbreviated human-readable string) of the first object checked.""" def get_counters(): """Return a dictionary with the following keys:: count-objects-checked: count of how many objects were checked count-objects-healthy: how many of those objects were completely healthy count-objects-unhealthy: how many were damaged in some way count-objects-unrecoverable: how many were unrecoverable count-corrupt-shares: how many shares were found to have corruption, summed over all objects examined """ def get_corrupt_shares(): """Return a set of (IServer, storage_index, sharenum) for all shares that were found to be corrupt. storage_index is binary.""" def get_all_results(): """Return a dictionary mapping pathname (a tuple of strings, ready to be slash-joined) to an ICheckResults instance, one for each object that was checked.""" def get_results_for_storage_index(storage_index): """Retrive the ICheckResults instance for the given (binary) storage index. Raises KeyError if there are no results for that storage index.""" def get_stats(): """Return a dictionary with the same keys as IDirectoryNode.deep_stats().""" class IDeepCheckAndRepairResults(Interface): """I contain the results of a deep-check-and-repair operation. This is returned by a call to ICheckable.deep_check_and_repair(). """ def get_root_storage_index_string(): """Return the storage index (abbreviated human-readable string) of the first object checked.""" def get_counters(): """Return a dictionary with the following keys:: count-objects-checked: count of how many objects were checked count-objects-healthy-pre-repair: how many of those objects were completely healthy (before any repair) count-objects-unhealthy-pre-repair: how many were damaged in some way count-objects-unrecoverable-pre-repair: how many were unrecoverable count-objects-healthy-post-repair: how many of those objects were completely healthy (after any repair) count-objects-unhealthy-post-repair: how many were damaged in some way count-objects-unrecoverable-post-repair: how many were unrecoverable count-repairs-attempted: repairs were attempted on this many objects. The count-repairs- keys will always be provided, however unless repair=true is present, they will all be zero. count-repairs-successful: how many repairs resulted in healthy objects count-repairs-unsuccessful: how many repairs resulted did not results in completely healthy objects count-corrupt-shares-pre-repair: how many shares were found to have corruption, summed over all objects examined (before any repair) count-corrupt-shares-post-repair: how many shares were found to have corruption, summed over all objects examined (after any repair) """ def get_stats(): """Return a dictionary with the same keys as IDirectoryNode.deep_stats().""" def get_corrupt_shares(): """Return a set of (IServer, storage_index, sharenum) for all shares that were found to be corrupt before any repair was attempted. storage_index is binary. """ def get_remaining_corrupt_shares(): """Return a set of (IServer, storage_index, sharenum) for all shares that were found to be corrupt after any repair was completed. storage_index is binary. These are shares that need manual inspection and probably deletion. """ def get_all_results(): """Return a dictionary mapping pathname (a tuple of strings, ready to be slash-joined) to an ICheckAndRepairResults instance, one for each object that was checked.""" def get_results_for_storage_index(storage_index): """Retrive the ICheckAndRepairResults instance for the given (binary) storage index. Raises KeyError if there are no results for that storage index.""" class IRepairable(Interface): def repair(check_results): """Attempt to repair the given object. Returns a Deferred that fires with a IRepairResults object. I must be called with an object that implements ICheckResults, as proof that you have actually discovered a problem with this file. I will use the data in the checker results to guide the repair process, such as which servers provided bad data and should therefore be avoided. The ICheckResults object is inside the ICheckAndRepairResults object, which is returned by the ICheckable.check() method:: d = filenode.check(repair=False) def _got_results(check_and_repair_results): check_results = check_and_repair_results.get_pre_repair_results() return filenode.repair(check_results) d.addCallback(_got_results) return d """ class IRepairResults(Interface): """I contain the results of a repair operation.""" def get_successful(): """Returns a boolean: True if the repair made the file healthy, False if not. Repair failure generally indicates a file that has been damaged beyond repair.""" class IClient(Interface): def upload(uploadable): """Upload some data into a CHK, get back the UploadResults for it. @param uploadable: something that implements IUploadable @return: a Deferred that fires with the UploadResults instance. To get the URI for this file, use results.uri . """ def create_mutable_file(contents=""): """Create a new mutable file (with initial) contents, get back the new node instance. @param contents: (bytestring, callable, or None): this provides the initial contents of the mutable file. If 'contents' is a bytestring, it will be used as-is. If 'contents' is a callable, it will be invoked with the new MutableFileNode instance and is expected to return a bytestring with the initial contents of the file (the callable can use node.get_writekey() to decide how to encrypt the initial contents, e.g. for a brand new dirnode with initial children). contents=None is equivalent to an empty string. Using content_maker= is more efficient than creating a mutable file and setting its contents in two separate operations. @return: a Deferred that fires with an IMutableFileNode instance. """ def create_dirnode(initial_children=None): """Create a new unattached dirnode, possibly with initial children. @param initial_children: dict with keys that are unicode child names, and values that are (childnode, metadata) tuples. @return: a Deferred that fires with the new IDirectoryNode instance. """ def create_node_from_uri(uri, rouri): """Create a new IFilesystemNode instance from the uri, synchronously. @param uri: a string or IURI-providing instance, or None. This could be for a LiteralFileNode, a CHK file node, a mutable file node, or a directory node @param rouri: a string or IURI-providing instance, or None. If the main uri is None, I will use the rouri instead. If I recognize the format of the main uri, I will ignore the rouri (because it can be derived from the writecap). @return: an instance that provides IFilesystemNode (or more usefully one of its subclasses). File-specifying URIs will result in IFileNode-providing instances, like ImmutableFileNode, LiteralFileNode, or MutableFileNode. Directory-specifying URIs will result in IDirectoryNode-providing instances, like DirectoryNode. """ class INodeMaker(Interface): """The NodeMaker is used to create IFilesystemNode instances. It can accept a filecap/dircap string and return the node right away. It can also create new nodes (i.e. upload a file, or create a mutable file) asynchronously. Once you have one of these nodes, you can use other methods to determine whether it is a file or directory, and to download or modify its contents. The NodeMaker encapsulates all the authorities that these IFilesystemNodes require (like references to the StorageFarmBroker). Each Tahoe process will typically have a single NodeMaker, but unit tests may create simplified/mocked forms for testing purposes. """ def create_from_cap(writecap, readcap=None, deep_immutable=False, name=u""): """I create an IFilesystemNode from the given writecap/readcap. I can only provide nodes for existing file/directory objects: use my other methods to create new objects. I return synchronously.""" def create_mutable_file(contents=None, keysize=None): """I create a new mutable file, and return a Deferred that will fire with the IMutableFileNode instance when it is ready. If contents= is provided (a bytestring), it will be used as the initial contents of the new file, otherwise the file will contain zero bytes. keysize= is for use by unit tests, to create mutable files that are smaller than usual.""" def create_new_mutable_directory(initial_children=None): """I create a new mutable directory, and return a Deferred that will fire with the IDirectoryNode instance when it is ready. If initial_children= is provided (a dict mapping unicode child name to (childnode, metadata_dict) tuples), the directory will be populated with those children, otherwise it will be empty.""" class IClientStatus(Interface): def list_all_uploads(): """Return a list of uploader objects, one for each upload that currently has an object available (tracked with weakrefs). This is intended for debugging purposes.""" def list_active_uploads(): """Return a list of active IUploadStatus objects.""" def list_recent_uploads(): """Return a list of IUploadStatus objects for the most recently started uploads.""" def list_all_downloads(): """Return a list of downloader objects, one for each download that currently has an object available (tracked with weakrefs). This is intended for debugging purposes.""" def list_active_downloads(): """Return a list of active IDownloadStatus objects.""" def list_recent_downloads(): """Return a list of IDownloadStatus objects for the most recently started downloads.""" class IUploadStatus(Interface): def get_started(): """Return a timestamp (float with seconds since epoch) indicating when the operation was started.""" def get_storage_index(): """Return a string with the (binary) storage index in use on this upload. Returns None if the storage index has not yet been calculated.""" def get_size(): """Return an integer with the number of bytes that will eventually be uploaded for this file. Returns None if the size is not yet known. """ def using_helper(): """Return True if this upload is using a Helper, False if not.""" def get_status(): """Return a string describing the current state of the upload process.""" def get_progress(): """Returns a tuple of floats, (chk, ciphertext, encode_and_push), each from 0.0 to 1.0 . 'chk' describes how much progress has been made towards hashing the file to determine a CHK encryption key: if non-convergent encryption is in use, this will be trivial, otherwise the whole file must be hashed. 'ciphertext' describes how much of the ciphertext has been pushed to the helper, and is '1.0' for non-helper uploads. 'encode_and_push' describes how much of the encode-and-push process has finished: for helper uploads this is dependent upon the helper providing progress reports. It might be reasonable to add all three numbers and report the sum to the user.""" def get_active(): """Return True if the upload is currently active, False if not.""" def get_results(): """Return an instance of UploadResults (which contains timing and sharemap information). Might return None if the upload is not yet finished.""" def get_counter(): """Each upload status gets a unique number: this method returns that number. This provides a handle to this particular upload, so a web page can generate a suitable hyperlink.""" class IDownloadStatus(Interface): def get_started(): """Return a timestamp (float with seconds since epoch) indicating when the operation was started.""" def get_storage_index(): """Return a string with the (binary) storage index in use on this download. This may be None if there is no storage index (i.e. LIT files).""" def get_size(): """Return an integer with the number of bytes that will eventually be retrieved for this file. Returns None if the size is not yet known. """ def using_helper(): """Return True if this download is using a Helper, False if not.""" def get_status(): """Return a string describing the current state of the download process.""" def get_progress(): """Returns a float (from 0.0 to 1.0) describing the amount of the download that has completed. This value will remain at 0.0 until the first byte of plaintext is pushed to the download target.""" def get_active(): """Return True if the download is currently active, False if not.""" def get_counter(): """Each download status gets a unique number: this method returns that number. This provides a handle to this particular download, so a web page can generate a suitable hyperlink.""" class IServermapUpdaterStatus(Interface): pass class IPublishStatus(Interface): pass class IRetrieveStatus(Interface): pass class NotCapableError(Exception): """You have tried to write to a read-only node.""" class BadWriteEnablerError(Exception): pass class RIControlClient(RemoteInterface): def wait_for_client_connections(num_clients=int): """Do not return until we have connections to at least NUM_CLIENTS storage servers. """ # debug stuff def upload_random_data_from_file(size=int, convergence=bytes): return str def download_to_tempfile_and_delete(uri=bytes): return None def get_memory_usage(): """Return a dict describes the amount of memory currently in use. The keys are 'VmPeak', 'VmSize', and 'VmData'. The values are integers, measuring memory consupmtion in bytes.""" return DictOf(bytes, int) def speed_test(count=int, size=int, mutable=Any()): """Write 'count' tempfiles to disk, all of the given size. Measure how long (in seconds) it takes to upload them all to the servers. Then measure how long it takes to download all of them. If 'mutable' is 'create', time creation of mutable files. If 'mutable' is 'upload', then time access to the same mutable file instead of creating one. Returns a tuple of (upload_time, download_time). """ return (float, float) def measure_peer_response_time(): """Send a short message to each connected peer, and measure the time it takes for them to respond to it. This is a rough measure of the application-level round trip time. @return: a dictionary mapping peerid to a float (RTT time in seconds) """ return DictOf(bytes, float) UploadResults = Any() #DictOf(bytes, bytes) class RIEncryptedUploadable(RemoteInterface): __remote_name__ = "RIEncryptedUploadable.tahoe.allmydata.com" def get_size(): return Offset def get_all_encoding_parameters(): return (int, int, int, int) def read_encrypted(offset=Offset, length=ReadSize): return ListOf(bytes) def close(): return None class RICHKUploadHelper(RemoteInterface): __remote_name__ = "RIUploadHelper.tahoe.allmydata.com" def get_version(): """ Return a dictionary of version information. """ return DictOf(bytes, Any()) def upload(reader=RIEncryptedUploadable): return UploadResults class RIHelper(RemoteInterface): __remote_name__ = "RIHelper.tahoe.allmydata.com" def get_version(): """ Return a dictionary of version information. """ return DictOf(bytes, Any()) def upload_chk(si=StorageIndex): """See if a file with a given storage index needs uploading. The helper will ask the appropriate storage servers to see if the file has already been uploaded. If so, the helper will return a set of 'upload results' that includes whatever hashes are needed to build the read-cap, and perhaps a truncated sharemap. If the file has not yet been uploaded (or if it was only partially uploaded), the helper will return an empty upload-results dictionary and also an RICHKUploadHelper object that will take care of the upload process. The client should call upload() on this object and pass it a reference to an RIEncryptedUploadable object that will provide ciphertext. When the upload is finished, the upload() method will finish and return the upload results. """ return (UploadResults, ChoiceOf(RICHKUploadHelper, None)) class IStatsProducer(Interface): def get_stats(): """ returns a dictionary, with bytes keys representing the names of stats to be monitored, and numeric values. """ class FileTooLargeError(Exception): pass class IValidatedThingProxy(Interface): def start(): """ Acquire a thing and validate it. Return a deferred that is eventually fired with self if the thing is valid or errbacked if it can't be acquired or validated.""" class InsufficientVersionError(Exception): def __init__(self, needed, got): self.needed = needed self.got = got def __repr__(self): return "InsufficientVersionError(need '%s', got %s)" % (self.needed, self.got) class EmptyPathnameComponentError(Exception): """The webapi disallows empty pathname components.""" class IConnectionStatus(Interface): """ I hold information about the 'connectedness' for some reference. Connections are an illusion, of course: only messages hold any meaning, and they are fleeting. But for status displays, it is useful to pretend that 'recently contacted' means a connection is established, and 'recently failed' means it is not. This object is not 'live': it is created and populated when requested from the connection manager, and it does not change after that point. """ connected = Attribute( """ True if we appear to be connected: we've been successful in communicating with our target at some point in the past, and we haven't experienced any errors since then.""") last_connection_time = Attribute( """ If is_connected() is True, this is a timestamp (seconds-since-epoch) when we last transitioned from 'not connected' to 'connected', such as when a TCP connect() operation completed and subsequent negotiation was successful. Otherwise it is None. """) summary = Attribute( """ A string with a brief summary of the current status, suitable for display on an informational page. The more complete text from last_connection_description would be appropriate for a tool-tip popup. """) last_received_time = Attribute( """ A timestamp (seconds-since-epoch) describing the last time we heard anything (including low-level keep-alives or inbound requests) from the other side. """) non_connected_statuses = Attribute( """ A dictionary, describing all connections that are not (yet) successful. When connected is True, this will only be the losing attempts. When connected is False, this will include all attempts. This maps a connection description string (for foolscap this is a connection hint and the handler it is using) to the status string (pending, connected, refused, or other errors). """) class IFoolscapStoragePlugin(IPlugin): """ An ``IStoragePlugin`` provides client- and server-side implementations of a Foolscap-based protocol which can be used to store and retrieve data. Implementations are free to apply access control or authorization policies to this storage service and doing so is a large part of the motivation for providing this point of pluggability. There should be enough information and hook points to support at least these use-cases: - anonymous, everything allowed (current default) - "storage club" / "friend-net" (possibly identity based) - cryptocurrencies (ideally, paying for each API call) - anonymous tokens (payment for service, but without identities) """ name = Attribute( """ A name for referring to this plugin. This name is both user-facing (for example, it is written in configuration files) and machine-facing (for example, it may be used to construct URLs). It should be unique across all plugins for this interface. Two plugins with the same name cannot be used in one client. Because it is used to construct URLs, it is constrained to URL safe characters (it must be a *segment* as defined by RFC 3986, section 3.3). :type: ``unicode`` """ ) def get_storage_server(configuration, get_anonymous_storage_server): """ Get an ``IAnnounceableStorageServer`` provider that gives an announcement for and an implementation of the server side of the storage protocol. This will be exposed and offered to clients in the storage server's announcement. :param dict configuration: Any configuration given in the section for this plugin in the node's configuration file. As an example, the configuration for the original anonymous-access filesystem-based storage server might look like:: {u"storedir": u"/foo/bar/storage", u"nodeid": u"abcdefg...", u"reserved_space": 0, u"discard_storage": False, u"readonly_storage": False, u"expiration_enabled": False, u"expiration_mode": u"age", u"expiration_override_lease_duration": None, u"expiration_cutoff_date": None, u"expiration_sharetypes": (u"mutable, u"immutable"), } :param get_anonymous_storage_server: A no-argument callable which returns a single instance of the original, anonymous-access storage server. This may be helpful in providing actual storage implementation behavior for a wrapper-style plugin. This is also provided to keep the Python API offered by Tahoe-LAFS to plugin developers narrow (do not try to find and instantiate the original storage server yourself; if you want it, call this). :rtype: ``Deferred`` firing with ``IAnnounceableStorageServer`` """ def get_storage_client(configuration, announcement, get_rref): """ Get an ``IStorageServer`` provider that implements the client side of the storage protocol. :param allmydata.node._Config configuration: A representation of the configuration for the node into which this plugin has been loaded. :param dict announcement: The announcement for the corresponding server portion of this plugin received from a storage server which is offering it. :param get_rref: A no-argument callable which returns a ``foolscap.referenceable.RemoteReference`` which refers to the server portion of this plugin on the currently active connection, or ``None`` if no connection has been established yet. :rtype: ``IStorageServer`` """ def get_client_resource(configuration): """ Get an ``IResource`` that can be published in the Tahoe-LAFS web interface to expose information related to this plugin. :param allmydata.node._Config configuration: A representation of the configuration for the node into which this plugin has been loaded. :rtype: ``IResource`` """ class IAnnounceableStorageServer(Interface): announcement = Attribute( """ Data for an announcement for the associated storage server. :note: This does not include the storage server nickname nor Foolscap fURL. These will be added to the announcement automatically. It may be usual for this announcement to contain no information. Once the client connects to this server it can use other methods to query for additional information (eg, in the manner of ``RIStorageServer.remote_get_version``). The announcement only needs to contain information to help the client determine how to connect. :type: ``dict`` of JSON-serializable types """ ) storage_server = Attribute( """ A Foolscap referenceable object implementing the server side of the storage protocol. :type: ``IReferenceable`` provider """ ) class IAddressFamily(Interface): """ Support for one specific address family. This stretches the definition of address family to include things like Tor and I2P. """ def get_listener(): """ Return a string endpoint description or an ``IStreamServerEndpoint``. This would be named ``get_server_endpoint`` if not for historical reasons. """ def get_client_endpoint(): """ Return an ``IStreamClientEndpoint``. """ tahoe_lafs-1.20.0/src/allmydata/listeners.py0000644000000000000000000000737113615410400015750 0ustar00""" Define a protocol for listening on a transport such that Tahoe-LAFS can communicate over it, manage configuration for it in its configuration file, detect when it is possible to use it, etc. """ from __future__ import annotations from typing import Any, Protocol, Sequence, Mapping, Optional, Union, Awaitable from typing_extensions import Literal from attrs import frozen from twisted.python.usage import Options from .interfaces import IAddressFamily from .util.iputil import allocate_tcp_port from .node import _Config @frozen class ListenerConfig: """ :ivar tub_ports: Entries to merge into ``[node]tub.port``. :ivar tub_locations: Entries to merge into ``[node]tub.location``. :ivar node_config: Entries to add into the overall Tahoe-LAFS configuration beneath a section named after this listener. """ tub_ports: Sequence[str] tub_locations: Sequence[str] node_config: Mapping[str, Sequence[tuple[str, str]]] class Listener(Protocol): """ An object which can listen on a transport and allow Tahoe-LAFS communication to happen over it. """ def is_available(self) -> bool: """ Can this type of listener actually be used in this runtime environment? """ def can_hide_ip(self) -> bool: """ Can the transport supported by this type of listener conceal the node's public internet address from peers? """ async def create_config(self, reactor: Any, cli_config: Options) -> Optional[ListenerConfig]: """ Set up an instance of this listener according to the given configuration parameters. This may also allocate ephemeral resources if necessary. :return: The created configuration which can be merged into the overall *tahoe.cfg* configuration file. """ def create(self, reactor: Any, config: _Config) -> IAddressFamily: """ Instantiate this listener according to the given previously-generated configuration. :return: A handle on the listener which can be used to integrate it into the Tahoe-LAFS node. """ class TCPProvider: """ Support plain TCP connections. """ def is_available(self) -> Literal[True]: return True def can_hide_ip(self) -> Literal[False]: return False async def create_config(self, reactor: Any, cli_config: Options) -> ListenerConfig: tub_ports = [] tub_locations = [] if cli_config["port"]: # --port/--location are a pair tub_ports.append(cli_config["port"]) tub_locations.append(cli_config["location"]) else: assert "hostname" in cli_config hostname = cli_config["hostname"] new_port = allocate_tcp_port() tub_ports.append(f"tcp:{new_port}") tub_locations.append(f"tcp:{hostname}:{new_port}") return ListenerConfig(tub_ports, tub_locations, {}) def create(self, reactor: Any, config: _Config) -> IAddressFamily: raise NotImplementedError() @frozen class StaticProvider: """ A provider that uses all pre-computed values. """ _available: bool _hide_ip: bool _config: Union[Awaitable[Optional[ListenerConfig]], Optional[ListenerConfig]] _address: IAddressFamily def is_available(self) -> bool: return self._available def can_hide_ip(self) -> bool: return self._hide_ip async def create_config(self, reactor: Any, cli_config: Options) -> Optional[ListenerConfig]: if self._config is None or isinstance(self._config, ListenerConfig): return self._config return await self._config def create(self, reactor: Any, config: _Config) -> IAddressFamily: return self._address tahoe_lafs-1.20.0/src/allmydata/monitor.py0000644000000000000000000001046513615410400015425 0ustar00""" Manage status of long-running operations. Ported to Python 3. """ from zope.interface import Interface, implementer from allmydata.util import observer class IMonitor(Interface): """I manage status, progress, and cancellation for long-running operations. Whoever initiates the operation should create a Monitor instance and pass it into the code that implements the operation. That code should periodically check in with the Monitor, perhaps after each major unit of work has been completed, for two purposes. The first is to inform the Monitor about progress that has been made, so that external observers can be reassured that the operation is proceeding normally. If the operation has a well-known amount of work to perform, this notification should reflect that, so that an ETA or 'percentage complete' value can be derived. The second purpose is to check to see if the operation has been cancelled. The impatient observer who no longer wants the operation to continue will inform the Monitor; the next time the operation code checks in, it should notice that the operation has been cancelled, and wrap things up. The same monitor can be passed to multiple operations, all of which may check for cancellation: this pattern may be simpler than having the original caller keep track of subtasks and cancel them individually. """ # the following methods are provided for the operation code def is_cancelled(): """Returns True if the operation has been cancelled. If True, operation code should stop creating new work, and attempt to stop any work already in progress.""" def raise_if_cancelled(): """Raise OperationCancelledError if the operation has been cancelled. Operation code that has a robust error-handling path can simply call this periodically.""" def set_status(status): """Sets the Monitor's 'status' object to an arbitrary value. Different operations will store different sorts of status information here. Operation code should use get+modify+set sequences to update this.""" def get_status(): """Return the status object. If the operation failed, this will be a Failure instance.""" def finish(status): """Call this when the operation is done, successful or not. The Monitor's lifetime is influenced by the completion of the operation it is monitoring. The Monitor's 'status' value will be set with the 'status' argument, just as if it had been passed to set_status(). This value will be used to fire the Deferreds that are returned by when_done(). Operations that fire a Deferred when they finish should trigger this with d.addBoth(monitor.finish)""" # the following methods are provided for the initiator of the operation def is_finished(): """Return a boolean, True if the operation is done (whether successful or failed), False if it is still running.""" def when_done(): """Return a Deferred that fires when the operation is complete. It will fire with the operation status, the same value as returned by get_status().""" def cancel(): """Cancel the operation as soon as possible. is_cancelled() will start returning True after this is called.""" # get_status() is useful too, but it is operation-specific class OperationCancelledError(Exception): pass @implementer(IMonitor) class Monitor(object): def __init__(self): self.cancelled = False self.finished = False self.status = None self.observer = observer.OneShotObserverList() def is_cancelled(self): return self.cancelled def raise_if_cancelled(self): if self.cancelled: raise OperationCancelledError() def is_finished(self): return self.finished def when_done(self): return self.observer.when_fired() def cancel(self): self.cancelled = True def finish(self, status_or_failure): self.set_status(status_or_failure) self.finished = True self.observer.fire(status_or_failure) return status_or_failure def get_status(self): return self.status def set_status(self, status): self.status = status tahoe_lafs-1.20.0/src/allmydata/node.py0000644000000000000000000011544613615410400014670 0ustar00""" This module contains classes and functions to implement and manage a node for Tahoe-LAFS. Ported to Python 3. """ from __future__ import annotations from six import ensure_str, ensure_text import json import datetime import os.path import re import types import errno from base64 import b32decode, b32encode from errno import ENOENT, EPERM from warnings import warn from typing import Union, Iterable import attr # On Python 2 this will be the backported package. import configparser from twisted.python.filepath import ( FilePath, ) from twisted.python import log as twlog from twisted.application import service from twisted.python.failure import Failure from foolscap.api import Tub import foolscap.logging.log from allmydata.util import log from allmydata.util import fileutil, iputil from allmydata.util.fileutil import abspath_expanduser_unicode from allmydata.util.encodingutil import get_filesystem_encoding, quote_output from allmydata.util import configutil from allmydata.util.yamlutil import ( safe_load, ) from . import ( __full_version__, ) from .protocol_switch import create_tub_with_https_support def _common_valid_config(): return configutil.ValidConfiguration({ "connections": ( "tcp", ), "node": ( "log_gatherer.furl", "nickname", "reveal-ip-address", "tempdir", "timeout.disconnect", "timeout.keepalive", "tub.location", "tub.port", "web.port", "web.static", ), "i2p": ( "enabled", "i2p.configdir", "i2p.executable", "launch", "sam.port", "dest", "dest.port", "dest.private_key_file", ), "tor": ( "control.port", "enabled", "launch", "socks.port", "tor.executable", "onion", "onion.local_port", "onion.external_port", "onion.private_key_file", ), }) # group 1 will be addr (dotted quad string), group 3 if any will be portnum (string) ADDR_RE = re.compile("^([1-9][0-9]*\.[1-9][0-9]*\.[1-9][0-9]*\.[1-9][0-9]*)(:([1-9][0-9]*))?$") # this is put into README in new node-directories (for client and introducers) PRIV_README = """ This directory contains files which contain private data for the Tahoe node, such as private keys. On Unix-like systems, the permissions on this directory are set to disallow users other than its owner from reading the contents of the files. See the 'configuration.rst' documentation file for details. """ def formatTimeTahoeStyle(self, when): """ Format the given (UTC) timestamp in the way Tahoe-LAFS expects it, for example: 2007-10-12 00:26:28.566Z :param when: UTC POSIX timestamp :type when: float :returns: datetime.datetime """ d = datetime.datetime.utcfromtimestamp(when) if d.microsecond: return d.isoformat(" ")[:-3]+"Z" return d.isoformat(" ") + ".000Z" PRIV_README = """ This directory contains files which contain private data for the Tahoe node, such as private keys. On Unix-like systems, the permissions on this directory are set to disallow users other than its owner from reading the contents of the files. See the 'configuration.rst' documentation file for details.""" class _None(object): """ This class is to be used as a marker in get_config() """ pass class MissingConfigEntry(Exception): """ A required config entry was not found. """ class OldConfigError(Exception): """ An obsolete config file was found. See docs/historical/configuration.rst. """ def __str__(self): return ("Found pre-Tahoe-LAFS-v1.3 configuration file(s):\n" "%s\n" "See docs/historical/configuration.rst." % "\n".join([quote_output(fname) for fname in self.args[0]])) class OldConfigOptionError(Exception): """Indicate that outdated configuration options are being used.""" pass class UnescapedHashError(Exception): """Indicate that a configuration entry contains an unescaped '#' character.""" def __str__(self): return ("The configuration entry %s contained an unescaped '#' character." % quote_output("[%s]%s = %s" % self.args)) class PrivacyError(Exception): """reveal-IP-address = false, but the node is configured in such a way that the IP address could be revealed""" def create_node_dir(basedir, readme_text): """ Create new new 'node directory' at 'basedir'. This includes a 'private' subdirectory. If basedir (and privdir) already exists, nothing is done. :param readme_text: text to put in /private/README """ if not os.path.exists(basedir): fileutil.make_dirs(basedir) privdir = os.path.join(basedir, "private") if not os.path.exists(privdir): fileutil.make_dirs(privdir, 0o700) readme_text = ensure_text(readme_text) with open(os.path.join(privdir, 'README'), 'w') as f: f.write(readme_text) def read_config(basedir, portnumfile, generated_files: Iterable = (), _valid_config=None): """ Read and validate configuration. :param unicode basedir: directory where configuration data begins :param unicode portnumfile: filename fragment for "port number" files :param list generated_files: a list of automatically-generated configuration files. :param ValidConfiguration _valid_config: (internal use, optional) a structure defining valid configuration sections and keys :returns: :class:`allmydata.node._Config` instance """ basedir = abspath_expanduser_unicode(ensure_text(basedir)) if _valid_config is None: _valid_config = _common_valid_config() # complain if there's bad stuff in the config dir _error_about_old_config_files(basedir, generated_files) # canonicalize the portnum file portnumfile = os.path.join(basedir, portnumfile) config_path = FilePath(basedir).child("tahoe.cfg") try: config_bytes = config_path.getContent() except EnvironmentError as e: if e.errno != errno.ENOENT: raise # The file is missing, just create empty ConfigParser. config_str = u"" else: config_str = config_bytes.decode("utf-8-sig") return config_from_string( basedir, portnumfile, config_str, _valid_config, config_path, ) def config_from_string(basedir, portnumfile, config_str, _valid_config=None, fpath=None): """ load and validate configuration from in-memory string """ if _valid_config is None: _valid_config = _common_valid_config() if isinstance(config_str, bytes): config_str = config_str.decode("utf-8") # load configuration from in-memory string parser = configutil.get_config_from_string(config_str) configutil.validate_config( "" if fpath is None else fpath.path, parser, _valid_config, ) return _Config( parser, portnumfile, basedir, fpath, _valid_config, ) def _error_about_old_config_files(basedir, generated_files): """ If any old configuration files are detected, raise OldConfigError. """ oldfnames = set() old_names = [ 'nickname', 'webport', 'keepalive_timeout', 'log_gatherer.furl', 'disconnect_timeout', 'advertised_ip_addresses', 'introducer.furl', 'helper.furl', 'key_generator.furl', 'stats_gatherer.furl', 'no_storage', 'readonly_storage', 'sizelimit', 'debug_discard_storage', 'run_helper' ] for fn in generated_files: old_names.remove(fn) for name in old_names: fullfname = os.path.join(basedir, name) if os.path.exists(fullfname): oldfnames.add(fullfname) if oldfnames: e = OldConfigError(oldfnames) twlog.msg(e) raise e def ensure_text_and_abspath_expanduser_unicode(basedir: Union[bytes, str]) -> str: return abspath_expanduser_unicode(ensure_text(basedir)) @attr.s class _Config(object): """ Manages configuration of a Tahoe 'node directory'. Note: all this code and functionality was formerly in the Node class; names and funtionality have been kept the same while moving the code. It probably makes sense for several of these APIs to have better names. :ivar ConfigParser config: The actual configuration values. :ivar str portnum_fname: filename to use for the port-number file (a relative path inside basedir). :ivar str _basedir: path to our "node directory", inside which all configuration is managed. :ivar (FilePath|NoneType) config_path: The path actually used to create the configparser (might be ``None`` if using in-memory data). :ivar ValidConfiguration valid_config_sections: The validator for the values in this configuration. """ config = attr.ib(validator=attr.validators.instance_of(configparser.ConfigParser)) portnum_fname = attr.ib() _basedir = attr.ib( converter=ensure_text_and_abspath_expanduser_unicode, ) # type: str config_path = attr.ib( validator=attr.validators.optional( attr.validators.instance_of(FilePath), ), ) valid_config_sections = attr.ib( default=configutil.ValidConfiguration.everything(), validator=attr.validators.instance_of(configutil.ValidConfiguration), ) @property def nickname(self): nickname = self.get_config("node", "nickname", u"") assert isinstance(nickname, str) return nickname @property def _config_fname(self): if self.config_path is None: return "" return self.config_path.path def write_config_file(self, name, value, mode="w"): """ writes the given 'value' into a file called 'name' in the config directory """ fn = os.path.join(self._basedir, name) try: fileutil.write(fn, value, mode) except EnvironmentError: log.err( Failure(), "Unable to write config file '{}'".format(fn), ) def enumerate_section(self, section): """ returns a dict containing all items in a configuration section. an empty dict is returned if the section doesn't exist. """ answer = dict() try: for k in self.config.options(section): answer[k] = self.config.get(section, k) except configparser.NoSectionError: pass return answer def items(self, section, default=_None): try: return self.config.items(section) except configparser.NoSectionError: if default is _None: raise return default def get_config(self, section, option, default=_None, boolean=False): try: if boolean: return self.config.getboolean(section, option) item = self.config.get(section, option) if option.endswith(".furl") and '#' in item: raise UnescapedHashError(section, option, item) return item except (configparser.NoOptionError, configparser.NoSectionError): if default is _None: raise MissingConfigEntry( "{} is missing the [{}]{} entry".format( quote_output(self._config_fname), section, option, ) ) return default def set_config(self, section, option, value): """ Set a config option in a section and re-write the tahoe.cfg file :param str section: The name of the section in which to set the option. :param str option: The name of the option to set. :param str value: The value of the option. :raise UnescapedHashError: If the option holds a fURL and there is a ``#`` in the value. """ if option.endswith(".furl") and "#" in value: raise UnescapedHashError(section, option, value) copied_config = configutil.copy_config(self.config) configutil.set_config(copied_config, section, option, value) configutil.validate_config( self._config_fname, copied_config, self.valid_config_sections, ) if self.config_path is not None: configutil.write_config(self.config_path, copied_config) self.config = copied_config def get_config_from_file(self, name, required=False): """Get the (string) contents of a config file, or None if the file did not exist. If required=True, raise an exception rather than returning None. Any leading or trailing whitespace will be stripped from the data.""" fn = os.path.join(self._basedir, name) try: return fileutil.read(fn).strip() except EnvironmentError as e: if e.errno != errno.ENOENT: raise # we only care about "file doesn't exist" if not required: return None raise def get_or_create_private_config(self, name, default=_None): """Try to get the (string) contents of a private config file (which is a config file that resides within the subdirectory named 'private'), and return it. Any leading or trailing whitespace will be stripped from the data. If the file does not exist, and default is not given, report an error. If the file does not exist and a default is specified, try to create it using that default, and then return the value that was written. If 'default' is a string, use it as a default value. If not, treat it as a zero-argument callable that is expected to return a string. """ privname = os.path.join(self._basedir, "private", name) try: value = fileutil.read(privname, mode="r") except EnvironmentError as e: if e.errno != errno.ENOENT: raise # we only care about "file doesn't exist" if default is _None: raise MissingConfigEntry("The required configuration file %s is missing." % (quote_output(privname),)) if isinstance(default, bytes): default = str(default, "utf-8") if isinstance(default, str): value = default else: value = default() fileutil.write(privname, value) return value.strip() def write_private_config(self, name, value): """Write the (string) contents of a private config file (which is a config file that resides within the subdirectory named 'private'), and return it. """ if isinstance(value, str): value = value.encode("utf-8") privname = os.path.join(self._basedir, "private", name) with open(privname, "wb") as f: f.write(value) def get_private_config(self, name, default=_None): """Read the (native string) contents of a private config file (a config file that resides within the subdirectory named 'private'), and return it. Return a default, or raise an error if one was not given. """ privname = os.path.join(self._basedir, "private", name) try: return fileutil.read(privname, mode="r").strip() except EnvironmentError as e: if e.errno != errno.ENOENT: raise # we only care about "file doesn't exist" if default is _None: raise MissingConfigEntry("The required configuration file %s is missing." % (quote_output(privname),)) return default def get_private_path(self, *args): """ returns an absolute path inside the 'private' directory with any extra args join()-ed This exists for historical reasons. New code should ideally not call this because it makes it harder for e.g. a SQL-based _Config object to exist. Code that needs to call this method should probably be a _Config method itself. See e.g. get_grid_manager_certificates() """ return os.path.join(self._basedir, "private", *args) def get_config_path(self, *args): """ returns an absolute path inside the config directory with any extra args join()-ed This exists for historical reasons. New code should ideally not call this because it makes it harder for e.g. a SQL-based _Config object to exist. Code that needs to call this method should probably be a _Config method itself. See e.g. get_grid_manager_certificates() """ # note: we re-expand here (_basedir already went through this # expanduser function) in case the path we're being asked for # has embedded ".."'s in it return abspath_expanduser_unicode( os.path.join(self._basedir, *args) ) def get_grid_manager_certificates(self): """ Load all Grid Manager certificates in the config. :returns: A list of all certificates. An empty list is returned if there are none. """ grid_manager_certificates = [] cert_fnames = list(self.enumerate_section("grid_manager_certificates").values()) for fname in cert_fnames: fname = self.get_config_path(fname) if not os.path.exists(fname): raise ValueError( "Grid Manager certificate file '{}' doesn't exist".format( fname ) ) with open(fname, 'r') as f: cert = json.load(f) if set(cert.keys()) != {"certificate", "signature"}: raise ValueError( "Unknown key in Grid Manager certificate '{}'".format( fname ) ) grid_manager_certificates.append(cert) return grid_manager_certificates def get_introducer_configuration(self): """ Get configuration for introducers. :return {unicode: (unicode, FilePath)}: A mapping from introducer petname to a tuple of the introducer's fURL and local cache path. """ introducers_yaml_filename = self.get_private_path("introducers.yaml") introducers_filepath = FilePath(introducers_yaml_filename) def get_cache_filepath(petname): return FilePath( self.get_private_path("introducer_{}_cache.yaml".format(petname)), ) try: with introducers_filepath.open() as f: introducers_yaml = safe_load(f) if introducers_yaml is None: raise EnvironmentError( EPERM, "Can't read '{}'".format(introducers_yaml_filename), introducers_yaml_filename, ) introducers = { petname: config["furl"] for petname, config in introducers_yaml.get("introducers", {}).items() } non_strs = list( k for k in introducers.keys() if not isinstance(k, str) ) if non_strs: raise TypeError( "Introducer petnames {!r} should have been str".format( non_strs, ), ) non_strs = list( v for v in introducers.values() if not isinstance(v, str) ) if non_strs: raise TypeError( "Introducer fURLs {!r} should have been str".format( non_strs, ), ) log.msg( "found {} introducers in {!r}".format( len(introducers), introducers_yaml_filename, ) ) except EnvironmentError as e: if e.errno != ENOENT: raise introducers = {} # supported the deprecated [client]introducer.furl item in tahoe.cfg tahoe_cfg_introducer_furl = self.get_config("client", "introducer.furl", None) if tahoe_cfg_introducer_furl == "None": raise ValueError( "tahoe.cfg has invalid 'introducer.furl = None':" " to disable it omit the key entirely" ) if tahoe_cfg_introducer_furl: warn( "tahoe.cfg [client]introducer.furl is deprecated; " "use private/introducers.yaml instead.", category=DeprecationWarning, stacklevel=-1, ) if "default" in introducers: raise ValueError( "'default' introducer furl cannot be specified in tahoe.cfg and introducers.yaml;" " please fix impossible configuration." ) introducers['default'] = tahoe_cfg_introducer_furl return { petname: (furl, get_cache_filepath(petname)) for (petname, furl) in introducers.items() } def create_tub_options(config): """ :param config: a _Config instance :returns: dict containing all Foolscap Tub-related options, overriding defaults with appropriate config from `config` instance. """ # We can't unify the camelCase vs. dashed-name divide here, # because these are options for Foolscap tub_options = { "logLocalFailures": True, "logRemoteFailures": True, "expose-remote-exception-types": False, "accept-gifts": False, } # see #521 for a discussion of how to pick these timeout values. keepalive_timeout_s = config.get_config("node", "timeout.keepalive", "") if keepalive_timeout_s: tub_options["keepaliveTimeout"] = int(keepalive_timeout_s) disconnect_timeout_s = config.get_config("node", "timeout.disconnect", "") if disconnect_timeout_s: # N.B.: this is in seconds, so use "1800" to get 30min tub_options["disconnectTimeout"] = int(disconnect_timeout_s) return tub_options def _make_tcp_handler(): """ :returns: a Foolscap default TCP handler """ # this is always available from foolscap.connections.tcp import default return default() def create_default_connection_handlers(config, handlers): """ :return: A dictionary giving the default connection handlers. The keys are strings like "tcp" and the values are strings like "tor" or ``None``. """ reveal_ip = config.get_config("node", "reveal-IP-address", True, boolean=True) # Remember the default mappings from tahoe.cfg default_connection_handlers = { name: name for name in handlers } tcp_handler_name = config.get_config("connections", "tcp", "tcp").lower() if tcp_handler_name == "disabled": default_connection_handlers["tcp"] = None else: if tcp_handler_name not in handlers: raise ValueError( "'tahoe.cfg [connections] tcp=' uses " "unknown handler type '{}'".format( tcp_handler_name ) ) if not handlers[tcp_handler_name]: raise ValueError( "'tahoe.cfg [connections] tcp=' uses " "unavailable/unimportable handler type '{}'. " "Please pip install tahoe-lafs[{}] to fix.".format( tcp_handler_name, tcp_handler_name, ) ) default_connection_handlers["tcp"] = tcp_handler_name if not reveal_ip: if default_connection_handlers.get("tcp") == "tcp": raise PrivacyError( "Privacy requested with `reveal-IP-address = false` " "but `tcp = tcp` conflicts with this.", ) return default_connection_handlers def create_connection_handlers(config, i2p_provider, tor_provider): """ :returns: 2-tuple of default_connection_handlers, foolscap_connection_handlers """ # We store handlers for everything. None means we were unable to # create that handler, so hints which want it will be ignored. handlers = { "tcp": _make_tcp_handler(), "tor": tor_provider.get_client_endpoint(), "i2p": i2p_provider.get_client_endpoint(), } log.msg( format="built Foolscap connection handlers for: %(known_handlers)s", known_handlers=sorted([k for k,v in handlers.items() if v]), facility="tahoe.node", umid="PuLh8g", ) return create_default_connection_handlers( config, handlers, ), handlers def create_tub(tub_options, default_connection_handlers, foolscap_connection_handlers, handler_overrides=None, force_foolscap=False, **kwargs): """ Create a Tub with the right options and handlers. It will be ephemeral unless the caller provides certFile= in kwargs :param handler_overrides: anything in this will override anything in `default_connection_handlers` for just this call. :param dict tub_options: every key-value pair in here will be set in the new Tub via `Tub.setOption` :param bool force_foolscap: If True, only allow Foolscap, not just HTTPS storage protocol. """ if handler_overrides is None: handler_overrides = {} # We listen simultaneously for both Foolscap and HTTPS on the same port, # so we have to create a special Foolscap Tub for that to work: if force_foolscap: tub = Tub(**kwargs) else: tub = create_tub_with_https_support(**kwargs) for (name, value) in list(tub_options.items()): tub.setOption(name, value) handlers = default_connection_handlers.copy() handlers.update(handler_overrides) tub.removeAllConnectionHintHandlers() for hint_type, handler_name in list(handlers.items()): handler = foolscap_connection_handlers.get(handler_name) if handler: tub.addConnectionHintHandler(hint_type, handler) return tub def _convert_tub_port(s): """ :returns: a proper Twisted endpoint string like (`tcp:X`) is `s` is a bare number, or returns `s` as-is """ us = s if isinstance(s, bytes): us = s.decode("utf-8") if re.search(r'^\d+$', us): return "tcp:{}".format(int(us)) return us class PortAssignmentRequired(Exception): """ A Tub port number was configured to be 0 where this is not allowed. """ def _tub_portlocation(config, get_local_addresses_sync, allocate_tcp_port): """ Figure out the network location of the main tub for some configuration. :param get_local_addresses_sync: A function like ``iputil.get_local_addresses_sync``. :param allocate_tcp_port: A function like ``iputil.allocate_tcp_port``. :returns: None or tuple of (port, location) for the main tub based on the given configuration. May raise ValueError or PrivacyError if there are problems with the config """ cfg_tubport = config.get_config("node", "tub.port", None) cfg_location = config.get_config("node", "tub.location", None) reveal_ip = config.get_config("node", "reveal-IP-address", True, boolean=True) tubport_disabled = False if cfg_tubport is not None: cfg_tubport = cfg_tubport.strip() if cfg_tubport == "": raise ValueError("tub.port must not be empty") if cfg_tubport == "disabled": tubport_disabled = True location_disabled = False if cfg_location is not None: cfg_location = cfg_location.strip() if cfg_location == "": raise ValueError("tub.location must not be empty") if cfg_location == "disabled": location_disabled = True if tubport_disabled and location_disabled: return None if tubport_disabled and not location_disabled: raise ValueError("tub.port is disabled, but not tub.location") if location_disabled and not tubport_disabled: raise ValueError("tub.location is disabled, but not tub.port") if cfg_tubport is None: # For 'tub.port', tahoe.cfg overrides the individual file on # disk. So only read config.portnum_fname if tahoe.cfg doesn't # provide a value. if os.path.exists(config.portnum_fname): file_tubport = fileutil.read(config.portnum_fname).strip() tubport = _convert_tub_port(file_tubport) else: tubport = "tcp:%d" % (allocate_tcp_port(),) fileutil.write_atomically(config.portnum_fname, tubport + "\n", mode="") else: tubport = _convert_tub_port(cfg_tubport) for port in tubport.split(","): if port in ("0", "tcp:0", "tcp:port=0", "tcp:0:interface=127.0.0.1"): raise PortAssignmentRequired() if cfg_location is None: cfg_location = "AUTO" local_portnum = None # needed to hush lgtm.com static analyzer # Replace the location "AUTO", if present, with the detected local # addresses. Don't probe for local addresses unless necessary. split_location = cfg_location.split(",") if "AUTO" in split_location: if not reveal_ip: raise PrivacyError("tub.location uses AUTO") local_addresses = get_local_addresses_sync() # tubport must be like "tcp:12345" or "tcp:12345:morestuff" local_portnum = int(tubport.split(":")[1]) new_locations = [] for loc in split_location: if loc == "AUTO": new_locations.extend(["tcp:%s:%d" % (ip, local_portnum) for ip in local_addresses]) else: if not reveal_ip: # Legacy hints are "host:port". We use Foolscap's utility # function to convert all hints into the modern format # ("tcp:host:port") because that's what the receiving # client will probably do. We test the converted hint for # TCP-ness, but publish the original hint because that # was the user's intent. from foolscap.connections.tcp import convert_legacy_hint converted_hint = convert_legacy_hint(loc) hint_type = converted_hint.split(":")[0] if hint_type == "tcp": raise PrivacyError("tub.location includes tcp: hint") new_locations.append(loc) location = ",".join(new_locations) # Lacking this, Python 2 blows up in Foolscap when it is confused by a # Unicode FURL. location = location.encode("utf-8") return tubport, location def tub_listen_on(i2p_provider, tor_provider, tub, tubport, location): """ Assign a Tub its listener locations. :param i2p_provider: See ``allmydata.util.i2p_provider.create``. :param tor_provider: See ``allmydata.util.tor_provider.create``. """ for port in tubport.split(","): if port == "listen:i2p": # the I2P provider will read its section of tahoe.cfg and # return either a fully-formed Endpoint, or a descriptor # that will create one, so we don't have to stuff all the # options into the tub.port string (which would need a lot # of escaping) port_or_endpoint = i2p_provider.get_listener() elif port == "listen:tor": port_or_endpoint = tor_provider.get_listener() else: port_or_endpoint = port # Foolscap requires native strings: if isinstance(port_or_endpoint, (bytes, str)): port_or_endpoint = ensure_str(port_or_endpoint) tub.listenOn(port_or_endpoint) # This last step makes the Tub is ready for tub.registerReference() tub.setLocation(location) def create_main_tub(config, tub_options, default_connection_handlers, foolscap_connection_handlers, i2p_provider, tor_provider, handler_overrides=None, cert_filename="node.pem"): """ Creates a 'main' Foolscap Tub, typically for use as the top-level access point for a running Node. :param Config: a `_Config` instance :param dict tub_options: any options to change in the tub :param default_connection_handlers: default Foolscap connection handlers :param foolscap_connection_handlers: Foolscap connection handlers for this tub :param i2p_provider: None, or a _Provider instance if I2P is installed. :param tor_provider: None, or a _Provider instance if txtorcon + Tor are installed. """ if handler_overrides is None: handler_overrides = {} portlocation = _tub_portlocation( config, iputil.get_local_addresses_sync, iputil.allocate_tcp_port, ) # FIXME? "node.pem" was the CERTFILE option/thing certfile = config.get_private_path("node.pem") tub = create_tub( tub_options, default_connection_handlers, foolscap_connection_handlers, force_foolscap=config.get_config( "storage", "force_foolscap", default=False, boolean=True ), handler_overrides=handler_overrides, certFile=certfile, ) if portlocation is None: log.msg("Tub is not listening") else: tubport, location = portlocation tub_listen_on( i2p_provider, tor_provider, tub, tubport, location, ) log.msg("Tub location set to %r" % (location,)) return tub class Node(service.MultiService): """ This class implements common functionality of both Client nodes and Introducer nodes. """ NODETYPE = "unknown NODETYPE" CERTFILE = "node.pem" def __init__(self, config, main_tub, i2p_provider, tor_provider): """ Initialize the node with the given configuration. Its base directory is the current directory by default. """ service.MultiService.__init__(self) self.config = config self.get_config = config.get_config # XXX stopgap self.nickname = config.nickname # XXX stopgap # this can go away once Client.init_client_storage_broker is moved into create_client() # (tests sometimes have None here) self._i2p_provider = i2p_provider self._tor_provider = tor_provider self.create_log_tub() self.logSource = "Node" self.setup_logging() self.tub = main_tub if self.tub is not None: self.nodeid = b32decode(self.tub.tubID.upper()) # binary format self.short_nodeid = b32encode(self.nodeid).lower()[:8] # for printing self.config.write_config_file("my_nodeid", b32encode(self.nodeid).lower() + b"\n", mode="wb") self.tub.setServiceParent(self) else: self.nodeid = self.short_nodeid = None self.log("Node constructed. " + __full_version__) iputil.increase_rlimits() def _is_tub_listening(self): """ :returns: True if the main tub is listening """ return len(self.tub.getListeners()) > 0 # pull this outside of Node's __init__ too, see: # https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2948 def create_log_tub(self): # The logport uses a localhost-only ephemeral Tub, with no control # over the listening port or location. This might change if we # discover a compelling reason for it in the future (e.g. being able # to use "flogtool tail" against a remote server), but for now I # think we can live without it. self.log_tub = Tub() portnum = iputil.listenOnUnused(self.log_tub) self.log("Log Tub location set to 127.0.0.1:%s" % (portnum,)) self.log_tub.setServiceParent(self) def startService(self): # Note: this class can be started and stopped at most once. self.log("Node.startService") # Record the process id in the twisted log, after startService() # (__init__ is called before fork(), but startService is called # after). Note that Foolscap logs handle pid-logging by itself, no # need to send a pid to the foolscap log here. twlog.msg("My pid: %s" % os.getpid()) try: os.chmod("twistd.pid", 0o644) except EnvironmentError: pass service.MultiService.startService(self) self.log("%s running" % self.NODETYPE) twlog.msg("%s running" % self.NODETYPE) def stopService(self): self.log("Node.stopService") return service.MultiService.stopService(self) def shutdown(self): """Shut down the node. Returns a Deferred that fires (with None) when it finally stops kicking.""" self.log("Node.shutdown") return self.stopService() def setup_logging(self): # we replace the formatTime() method of the log observer that # twistd set up for us, with a method that uses our preferred # timestamp format. for o in twlog.theLogPublisher.observers: # o might be a FileLogObserver's .emit method if type(o) is type(self.setup_logging): # bound method ob = o.__self__ if isinstance(ob, twlog.FileLogObserver): newmeth = types.MethodType(formatTimeTahoeStyle, ob) ob.formatTime = newmeth # TODO: twisted >2.5.0 offers maxRotatedFiles=50 lgfurl_file = self.config.get_private_path("logport.furl").encode(get_filesystem_encoding()) if os.path.exists(lgfurl_file): os.remove(lgfurl_file) self.log_tub.setOption("logport-furlfile", lgfurl_file) lgfurl = self.config.get_config("node", "log_gatherer.furl", "") if lgfurl: # this is in addition to the contents of log-gatherer-furlfile lgfurl = lgfurl.encode("utf-8") self.log_tub.setOption("log-gatherer-furl", lgfurl) self.log_tub.setOption("log-gatherer-furlfile", self.config.get_config_path("log_gatherer.furl")) incident_dir = self.config.get_config_path("logs", "incidents") foolscap.logging.log.setLogDir(incident_dir) twlog.msg("Foolscap logging initialized") twlog.msg("Note to developers: twistd.log does not receive very much.") twlog.msg("Use 'flogtool tail -c NODEDIR/private/logport.furl' instead") twlog.msg("and read docs/logging.rst") def log(self, *args, **kwargs): return log.msg(*args, **kwargs) tahoe_lafs-1.20.0/src/allmydata/nodemaker.py0000644000000000000000000001654613615410400015711 0ustar00""" Create file nodes of various types. """ from __future__ import annotations import weakref from zope.interface import implementer from twisted.internet.defer import succeed from allmydata.util.assertutil import precondition from allmydata.interfaces import INodeMaker from allmydata.immutable.literal import LiteralFileNode from allmydata.immutable.filenode import ImmutableFileNode, CiphertextFileNode from allmydata.immutable.upload import Data from allmydata.mutable.filenode import MutableFileNode from allmydata.mutable.publish import MutableData from allmydata.dirnode import DirectoryNode, pack_children from allmydata.unknown import UnknownNode from allmydata.blacklist import ProhibitedNode from allmydata.crypto.rsa import PublicKey, PrivateKey from allmydata import uri @implementer(INodeMaker) class NodeMaker(object): def __init__(self, storage_broker, secret_holder, history, uploader, terminator, default_encoding_parameters, mutable_file_default, key_generator, blacklist=None): self.storage_broker = storage_broker self.secret_holder = secret_holder self.history = history self.uploader = uploader self.terminator = terminator self.default_encoding_parameters = default_encoding_parameters self.mutable_file_default = mutable_file_default self.key_generator = key_generator self.blacklist = blacklist self._node_cache = weakref.WeakValueDictionary() # uri -> node def _create_lit(self, cap): return LiteralFileNode(cap) def _create_immutable(self, cap): return ImmutableFileNode(cap, self.storage_broker, self.secret_holder, self.terminator, self.history) def _create_immutable_verifier(self, cap): return CiphertextFileNode(cap, self.storage_broker, self.secret_holder, self.terminator, self.history) def _create_mutable(self, cap): n = MutableFileNode(self.storage_broker, self.secret_holder, self.default_encoding_parameters, self.history) return n.init_from_cap(cap) def _create_dirnode(self, filenode): return DirectoryNode(filenode, self, self.uploader) def create_from_cap(self, writecap, readcap=None, deep_immutable=False, name=u""): # this returns synchronously. It starts with a "cap string". assert isinstance(writecap, (bytes, type(None))), type(writecap) assert isinstance(readcap, (bytes, type(None))), type(readcap) bigcap = writecap or readcap if not bigcap: # maybe the writecap was hidden because we're in a readonly # directory, and the future cap format doesn't have a readcap, or # something. return UnknownNode(None, None) # deep_immutable and name not needed # The name doesn't matter for caching since it's only used in the error # attribute of an UnknownNode, and we don't cache those. if deep_immutable: memokey = b"I" + bigcap else: memokey = b"M" + bigcap try: node = self._node_cache[memokey] except KeyError: cap = uri.from_string(bigcap, deep_immutable=deep_immutable, name=name) node = self._create_from_single_cap(cap) # node is None for an unknown URI, otherwise it is a type for which # is_mutable() is known. We avoid cacheing mutable nodes due to # ticket #1679. if node is None: # don't cache UnknownNode node = UnknownNode(writecap, readcap, deep_immutable=deep_immutable, name=name) elif node.is_mutable(): self._node_cache[memokey] = node # note: WeakValueDictionary if self.blacklist: si = node.get_storage_index() # if this node is blacklisted, return the reason, otherwise return None reason = self.blacklist.check_storageindex(si) if reason is not None: # The original node object is cached above, not the ProhibitedNode wrapper. # This ensures that removing the blacklist entry will make the node # accessible if create_from_cap is called again. node = ProhibitedNode(node, reason) return node def _create_from_single_cap(self, cap): if isinstance(cap, uri.LiteralFileURI): return self._create_lit(cap) if isinstance(cap, uri.CHKFileURI): return self._create_immutable(cap) if isinstance(cap, uri.CHKFileVerifierURI): return self._create_immutable_verifier(cap) if isinstance(cap, (uri.ReadonlySSKFileURI, uri.WriteableSSKFileURI, uri.WriteableMDMFFileURI, uri.ReadonlyMDMFFileURI)): return self._create_mutable(cap) if isinstance(cap, (uri.DirectoryURI, uri.ReadonlyDirectoryURI, uri.ImmutableDirectoryURI, uri.LiteralDirectoryURI, uri.MDMFDirectoryURI, uri.ReadonlyMDMFDirectoryURI)): filenode = self._create_from_single_cap(cap.get_filenode_cap()) return self._create_dirnode(filenode) return None def create_mutable_file(self, contents=None, version=None, keypair: tuple[PublicKey, PrivateKey] | None = None): if version is None: version = self.mutable_file_default n = MutableFileNode(self.storage_broker, self.secret_holder, self.default_encoding_parameters, self.history) if keypair is None: d = self.key_generator.generate() else: d = succeed(keypair) d.addCallback(n.create_with_keys, contents, version=version) d.addCallback(lambda res: n) return d def create_new_mutable_directory( self, initial_children=None, version=None, *, keypair: tuple[PublicKey, PrivateKey] | None = None, ): if initial_children is None: initial_children = {} for (name, (node, metadata)) in initial_children.items(): precondition(isinstance(metadata, dict), "create_new_mutable_directory requires metadata to be a dict, not None", metadata) node.raise_error() d = self.create_mutable_file(lambda n: MutableData(pack_children(initial_children, n.get_writekey())), version=version, keypair=keypair) d.addCallback(self._create_dirnode) return d def create_immutable_directory(self, children, convergence=None): if convergence is None: convergence = self.secret_holder.get_convergence_secret() packed = pack_children(children, None, deep_immutable=True) uploadable = Data(packed, convergence) # XXX should pass reactor arg d = self.uploader.upload(uploadable) d.addCallback(lambda results: self.create_from_cap(None, results.get_uri())) d.addCallback(self._create_dirnode) return d tahoe_lafs-1.20.0/src/allmydata/protocol_switch.py0000644000000000000000000002014113615410400017150 0ustar00""" Support for listening with both HTTPS and Foolscap on the same port. The goal is to make the transition from Foolscap to HTTPS-based protocols as simple as possible, with no extra configuration needed. Listening on the same port means a user upgrading Tahoe-LAFS will automatically get HTTPS working with no additional changes. Use ``create_tub_with_https_support()`` creates a new ``Tub`` that has its ``negotiationClass`` modified to be a new subclass tied to that specific ``Tub`` instance. Calling ``tub.negotiationClass.add_storage_server(...)`` then adds relevant information for a storage server once it becomes available later in the configuration process. """ from __future__ import annotations from itertools import chain from typing import cast from twisted.internet.protocol import Protocol from twisted.internet.interfaces import IDelayedCall, IReactorFromThreads from twisted.internet.ssl import CertificateOptions from twisted.web.server import Site from twisted.protocols.tls import TLSMemoryBIOFactory from twisted.internet import reactor from hyperlink import DecodedURL from foolscap.negotiate import Negotiation from foolscap.api import Tub from .storage.http_server import HTTPServer, build_nurl from .storage.server import StorageServer class _PretendToBeNegotiation(type): """ Metaclass that allows ``_FoolscapOrHttps`` to pretend to be a ``Negotiation`` instance, since Foolscap does some checks like ``assert isinstance(protocol, tub.negotiationClass)`` in its internals, and sometimes that ``protocol`` is a ``_FoolscapOrHttps`` instance, but sometimes it's a ``Negotiation`` instance. """ def __instancecheck__(self, instance): return issubclass(instance.__class__, self) or isinstance(instance, Negotiation) class _FoolscapOrHttps(Protocol, metaclass=_PretendToBeNegotiation): """ Based on initial query, decide whether we're talking Foolscap or HTTP. Additionally, pretends to be a ``foolscap.negotiate.Negotiation`` instance, since these are created by Foolscap's ``Tub``, by setting this to be the tub's ``negotiationClass``. Do not instantiate directly, use ``create_tub_with_https_support(...)`` instead. The way this class works is that a new subclass is created for a specific ``Tub`` instance. """ # These are class attributes; they will be set by # create_tub_with_https_support() and add_storage_server(). # The Twisted HTTPS protocol factory wrapping the storage server HTTP API: https_factory: TLSMemoryBIOFactory # The tub that created us: tub: Tub @classmethod def add_storage_server( cls, storage_server: StorageServer, swissnum: bytes ) -> set[DecodedURL]: """ Update a ``_FoolscapOrHttps`` subclass for a specific ``Tub`` instance with the class attributes it requires for a specific storage server. Returns the resulting NURLs. """ # We need to be a subclass: assert cls != _FoolscapOrHttps # The tub instance must already be set: assert hasattr(cls, "tub") assert isinstance(cls.tub, Tub) # Tub.myCertificate is a twisted.internet.ssl.PrivateCertificate # instance. certificate_options = CertificateOptions( privateKey=cls.tub.myCertificate.privateKey.original, certificate=cls.tub.myCertificate.original, ) http_storage_server = HTTPServer(cast(IReactorFromThreads, reactor), storage_server, swissnum) cls.https_factory = TLSMemoryBIOFactory( certificate_options, False, Site(http_storage_server.get_resource()), ) storage_nurls = set() # Individual hints can be in the form # "tcp:host:port,tcp:host:port,tcp:host:port". for location_hint in chain.from_iterable( hints.split(",") for hints in cls.tub.locationHints ): if location_hint.startswith("tcp:") or location_hint.startswith("tor:"): scheme, hostname, port = location_hint.split(":") if scheme == "tcp": subscheme = None else: subscheme = "tor" # If we're listening on Tor, the hostname needs to have an # .onion TLD. assert hostname.endswith(".onion") # The I2P scheme is yet not supported by the HTTP client, so we # don't want generate a NURL that won't work. This will be # fixed in https://tahoe-lafs.org/trac/tahoe-lafs/ticket/4037 port = int(port) storage_nurls.add( build_nurl( hostname, port, str(swissnum, "ascii"), cls.tub.myCertificate.original.to_cryptography(), subscheme ) ) return storage_nurls def __init__(self, *args, **kwargs): self._foolscap: Negotiation = Negotiation(*args, **kwargs) def __setattr__(self, name, value): if name in {"_foolscap", "_buffer", "transport", "__class__", "_timeout"}: object.__setattr__(self, name, value) else: setattr(self._foolscap, name, value) def __getattr__(self, name): return getattr(self._foolscap, name) def _convert_to_negotiation(self): """ Convert self to a ``Negotiation`` instance. """ self.__class__ = Negotiation # type: ignore self.__dict__ = self._foolscap.__dict__ def initClient(self, *args, **kwargs): # After creation, a Negotiation instance either has initClient() or # initServer() called. Since this is a client, we're never going to do # HTTP, so we can immediately become a Negotiation instance. assert not hasattr(self, "_buffer") self._convert_to_negotiation() return self.initClient(*args, **kwargs) def connectionMade(self): self._buffer: bytes = b"" self._timeout: IDelayedCall = reactor.callLater( 30, self.transport.abortConnection ) def connectionLost(self, reason): if self._timeout.active(): self._timeout.cancel() def dataReceived(self, data: bytes) -> None: """Handle incoming data. Once we've decided which protocol we are, update self.__class__, at which point all methods will be called on the new class. """ self._buffer += data if len(self._buffer) < 8: return # Check if it looks like a Foolscap request. If so, it can handle this # and later data, otherwise assume HTTPS. self._timeout.cancel() if self._buffer.startswith(b"GET /id/"): # We're a Foolscap Negotiation server protocol instance: transport = self.transport buf = self._buffer self._convert_to_negotiation() self.makeConnection(transport) self.dataReceived(buf) return else: # We're a HTTPS protocol instance, serving the storage protocol: assert self.transport is not None protocol = self.https_factory.buildProtocol(self.transport.getPeer()) protocol.makeConnection(self.transport) protocol.dataReceived(self._buffer) # Update the factory so it knows we're transforming to a new # protocol object (we'll do that next) value = self.https_factory.protocols.pop(protocol) self.https_factory.protocols[self] = value # Transform self into the TLS protocol 🪄 self.__class__ = protocol.__class__ self.__dict__ = protocol.__dict__ def create_tub_with_https_support(**kwargs) -> Tub: """ Create a new Tub that also supports HTTPS. This involves creating a new protocol switch class for the specific ``Tub`` instance. """ the_tub = Tub(**kwargs) class FoolscapOrHttpForTub(_FoolscapOrHttps): tub = the_tub the_tub.negotiationClass = FoolscapOrHttpForTub # type: ignore return the_tub tahoe_lafs-1.20.0/src/allmydata/stats.py0000644000000000000000000000533613615410400015075 0ustar00""" Ported to Python 3. """ from collections import deque from time import process_time import time from typing import Deque, Tuple from twisted.application import service from twisted.application.internet import TimerService from zope.interface import implementer from allmydata.util import log, dictutil from allmydata.interfaces import IStatsProducer @implementer(IStatsProducer) class CPUUsageMonitor(service.MultiService): HISTORY_LENGTH: int = 15 POLL_INTERVAL: float = 60 initial_cpu: float = 0.0 def __init__(self): service.MultiService.__init__(self) self.samples: Deque[Tuple[float, float]] = deque([], self.HISTORY_LENGTH + 1) # we provide 1min, 5min, and 15min moving averages TimerService(self.POLL_INTERVAL, self.check).setServiceParent(self) def startService(self): self.initial_cpu = process_time() return super().startService() def check(self): now_wall = time.time() now_cpu = process_time() self.samples.append( (now_wall, now_cpu) ) def _average_N_minutes(self, size): if len(self.samples) < size+1: return None first = -size-1 elapsed_wall = self.samples[-1][0] - self.samples[first][0] elapsed_cpu = self.samples[-1][1] - self.samples[first][1] fraction = elapsed_cpu / elapsed_wall return fraction def get_stats(self): s = {} avg = self._average_N_minutes(1) if avg is not None: s["cpu_monitor.1min_avg"] = avg avg = self._average_N_minutes(5) if avg is not None: s["cpu_monitor.5min_avg"] = avg avg = self._average_N_minutes(15) if avg is not None: s["cpu_monitor.15min_avg"] = avg now_cpu = process_time() s["cpu_monitor.total"] = now_cpu - self.initial_cpu return s class StatsProvider(service.MultiService): def __init__(self, node): service.MultiService.__init__(self) self.node = node self.counters = dictutil.UnicodeKeyDict() self.stats_producers = [] self.cpu_monitor = CPUUsageMonitor() self.cpu_monitor.setServiceParent(self) self.register_producer(self.cpu_monitor) def count(self, name, delta=1): val = self.counters.setdefault(name, 0) self.counters[name] = val + delta def register_producer(self, stats_producer): self.stats_producers.append(IStatsProducer(stats_producer)) def get_stats(self): stats = {} for sp in self.stats_producers: stats.update(sp.get_stats()) ret = { 'counters': self.counters, 'stats': stats } log.msg(format='get_stats() -> %(stats)s', stats=ret, level=log.NOISY) return ret tahoe_lafs-1.20.0/src/allmydata/storage_client.py0000644000000000000000000017100113615410400016732 0ustar00 """ I contain the client-side code which speaks to storage servers, in particular the foolscap-based server implemented in src/allmydata/storage/*.py . Ported to Python 3. """ # roadmap: # # 1: implement StorageFarmBroker (i.e. "storage broker"), change Client to # create it, change uploader/servermap to get rrefs from it. ServerFarm calls # IntroducerClient.subscribe_to . ServerFarm hides descriptors, passes rrefs # to clients. webapi status pages call broker.get_info_about_serverid. # # 2: move get_info methods to the descriptor, webapi status pages call # broker.get_descriptor_for_serverid().get_info # # 3?later?: store descriptors in UploadResults/etc instead of serverids, # webapi status pages call descriptor.get_info and don't use storage_broker # or Client # # 4: enable static config: tahoe.cfg can add descriptors. Make the introducer # optional. This closes #467 # # 5: implement NativeStorageClient, pass it to Tahoe2PeerSelector and other # clients. Clients stop doing callRemote(), use NativeStorageClient methods # instead (which might do something else, i.e. http or whatever). The # introducer and tahoe.cfg only create NativeStorageClients for now. # # 6: implement other sorts of IStorageClient classes: S3, etc from __future__ import annotations from typing import Union, Callable, Any, Optional, cast, Dict, Iterable from os import urandom import re import time import hashlib from io import StringIO from configparser import NoSectionError import json import attr from attr import define from hyperlink import DecodedURL from twisted.web.client import HTTPConnectionPool from zope.interface import ( Attribute, Interface, implementer, ) from twisted.python.failure import Failure from twisted.web import http from twisted.internet.task import LoopingCall from twisted.internet import defer, reactor from twisted.internet.interfaces import IReactorTime from twisted.application import service from twisted.logger import Logger from twisted.plugin import ( getPlugins, ) from eliot import ( log_call, ) from foolscap.ipb import IRemoteReference from foolscap.api import eventually, RemoteException from foolscap.reconnector import ( ReconnectionInfo, ) from allmydata.interfaces import ( IStorageBroker, IDisplayableServer, IServer, IStorageServer, IFoolscapStoragePlugin, VersionMessage ) from allmydata.grid_manager import ( create_grid_manager_verifier, SignedCertificate ) from allmydata.crypto import ( ed25519, ) from allmydata.util.tor_provider import _Provider as TorProvider from allmydata.util import log, base32, connection_status from allmydata.util.assertutil import precondition from allmydata.util.observer import ObserverList from allmydata.util.rrefutil import add_version_to_remote_reference from allmydata.util.hashutil import permute_server_hash from allmydata.util.dictutil import BytesKeyDict, UnicodeKeyDict from allmydata.util.deferredutil import async_to_deferred, race from allmydata.util.attrs_provides import provides from allmydata.storage.http_client import ( StorageClient, StorageClientImmutables, StorageClientGeneral, ClientException as HTTPClientException, StorageClientMutables, ReadVector, TestWriteVectors, WriteVector, TestVector, ClientException, StorageClientFactory ) from .node import _Config _log = Logger() ANONYMOUS_STORAGE_NURLS = "anonymous-storage-NURLs" # who is responsible for de-duplication? # both? # IC remembers the unpacked announcements it receives, to provide for late # subscribers and to remove duplicates # if a client subscribes after startup, will they receive old announcements? # yes # who will be responsible for signature checking? # make it be IntroducerClient, so they can push the filter outwards and # reduce inbound network traffic # what should the interface between StorageFarmBroker and IntroducerClient # look like? # don't pass signatures: only pass validated blessed-objects @attr.s class StorageClientConfig(object): """ Configuration for a node acting as a storage client. :ivar preferred_peers: An iterable of the server-ids (``bytes``) of the storage servers where share placement is preferred, in order of decreasing preference. See the *[client]peers.preferred* documentation for details. :ivar dict[unicode, dict[unicode, unicode]] storage_plugins: A mapping from names of ``IFoolscapStoragePlugin`` configured in *tahoe.cfg* to the respective configuration. :ivar list[ed25519.VerifyKey] grid_manager_keys: with no keys in this list, we'll upload to any storage server. Otherwise, we will only upload to a storage-server that has a valid certificate signed by at least one of these keys. """ preferred_peers : Iterable[bytes] = attr.ib(default=()) storage_plugins : dict[str, dict[str, str]] = attr.ib(default=attr.Factory(dict)) grid_manager_keys : list[ed25519.Ed25519PublicKey] = attr.ib(default=attr.Factory(list)) @classmethod def from_node_config(cls, config): """ Create a ``StorageClientConfig`` from a complete Tahoe-LAFS node configuration. :param _Config config: The loaded Tahoe-LAFS node configuration. """ ps = config.get_config("client", "peers.preferred", "").split(",") preferred_peers = tuple([p.strip() for p in ps if p != ""]) enabled_storage_plugins = ( name.strip() for name in config.get_config( "client", "storage.plugins", "", ).split(u",") if name.strip() ) storage_plugins = {} for plugin_name in enabled_storage_plugins: try: plugin_config = config.items("storageclient.plugins." + plugin_name) except NoSectionError: plugin_config = [] storage_plugins[plugin_name] = dict(plugin_config) grid_manager_keys = [] for name, gm_key in config.enumerate_section('grid_managers').items(): grid_manager_keys.append( ed25519.verifying_key_from_string(gm_key.encode("ascii")) ) return cls( preferred_peers, storage_plugins, grid_manager_keys, ) def get_configured_storage_plugins(self) -> dict[str, IFoolscapStoragePlugin]: """ :returns: a mapping from names to instances for all available plugins :raises MissingPlugin: if the configuration asks for a plugin for which there is no corresponding instance (e.g. it is not installed). """ plugins = { plugin.name: plugin for plugin in getPlugins(IFoolscapStoragePlugin) } # mypy doesn't like "str" in place of Any ... configured: Dict[Any, IFoolscapStoragePlugin] = dict() for plugin_name in self.storage_plugins: try: plugin = plugins[plugin_name] except KeyError: raise MissingPlugin(plugin_name) configured[plugin_name] = plugin return configured @implementer(IStorageBroker) class StorageFarmBroker(service.MultiService): """I live on the client, and know about storage servers. For each server that is participating in a grid, I either maintain a connection to it or remember enough information to establish a connection to it on demand. I'm also responsible for subscribing to the IntroducerClient to find out about new servers as they are announced by the Introducer. :ivar _tub_maker: A one-argument callable which accepts a dictionary of "handler overrides" and returns a ``foolscap.api.Tub``. :ivar StorageClientConfig storage_client_config: Values from the node configuration file relating to storage behavior. """ @property def preferred_peers(self): return self.storage_client_config.preferred_peers def __init__( self, permute_peers, tub_maker, node_config: _Config, storage_client_config=None, default_connection_handlers=None, tor_provider: Optional[TorProvider]=None, ): service.MultiService.__init__(self) if default_connection_handlers is None: default_connection_handlers = {"tcp": "tcp"} assert permute_peers # False not implemented yet self.permute_peers = permute_peers self._tub_maker = tub_maker self.node_config = node_config if storage_client_config is None: storage_client_config = StorageClientConfig() self.storage_client_config = storage_client_config # self.servers maps serverid -> IServer, and keeps track of all the # storage servers that we've heard about. Each descriptor manages its # own Reconnector, and will give us a RemoteReference when we ask # them for it. self.servers = BytesKeyDict() self._static_server_ids : set[bytes] = set() # ignore announcements for these self.introducer_client = None self._threshold_listeners : list[tuple[float,defer.Deferred[Any]]]= [] # tuples of (threshold, Deferred) self._connected_high_water_mark = 0 self._tor_provider = tor_provider self._default_connection_handlers = default_connection_handlers @log_call(action_type=u"storage-client:broker:set-static-servers") def set_static_servers(self, servers): # Sorting the items gives us a deterministic processing order. This # doesn't really matter but it makes the logging behavior more # predictable and easier to test (and at least one test does depend on # this sorted order). for (server_id, server) in sorted(servers.items()): try: storage_server = self._make_storage_server( server_id.encode("utf-8"), server, ) except Exception: # TODO: The _make_storage_server failure is logged but maybe # we should write a traceback here. Notably, tests don't # automatically fail just because we hit this case. Well # written tests will still fail if a surprising exception # arrives here but they might be harder to debug without this # information. pass else: if isinstance(server_id, str): server_id = server_id.encode("utf-8") self._static_server_ids.add(server_id) self.servers[server_id] = storage_server storage_server.setServiceParent(self) storage_server.start_connecting(self._trigger_connections) def get_client_storage_plugin_web_resources(self, node_config): """ Get all of the client-side ``IResource`` implementations provided by enabled storage plugins. :param allmydata.node._Config node_config: The complete node configuration for the node from which these web resources will be served. :return dict[unicode, IResource]: Resources for all of the plugins. """ plugins = { plugin.name: plugin for plugin in getPlugins(IFoolscapStoragePlugin) } return UnicodeKeyDict({ name: plugins[name].get_client_resource(node_config) for (name, config) in self.storage_client_config.storage_plugins.items() }) @staticmethod def _should_we_use_http(node_config: _Config, announcement: dict) -> bool: """ Given an announcement dictionary and config, return whether we should connect to storage server over HTTP. """ return not node_config.get_config( "client", "force_foolscap", default=False, boolean=True, ) and len(announcement.get(ANONYMOUS_STORAGE_NURLS, [])) > 0 @log_call( action_type=u"storage-client:broker:make-storage-server", include_args=["server_id"], include_result=False, ) def _make_storage_server(self, server_id, server): """ Create a new ``IServer`` for the given storage server announcement. :param bytes server_id: The unique identifier for the server. :param dict server: The server announcement. See ``Static Server Definitions`` in the configuration documentation for details about the structure and contents. :return IServer: The object-y representation of the server described by the given announcement. """ assert isinstance(server_id, bytes) gm_verifier = create_grid_manager_verifier( self.storage_client_config.grid_manager_keys, [SignedCertificate.load(StringIO(json.dumps(data))) for data in server["ann"].get("grid-manager-certificates", [])], "pub-{}".format(str(server_id, "ascii")).encode("ascii"), # server_id is v0- not pub-v0-key .. for reasons? ) if self._should_we_use_http(self.node_config, server["ann"]): s = HTTPNativeStorageServer( server_id, server["ann"], grid_manager_verifier=gm_verifier, default_connection_handlers=self._default_connection_handlers, tor_provider=self._tor_provider ) s.on_status_changed(lambda _: self._got_connection()) return s handler_overrides = server.get("connections", {}) s = NativeStorageServer( server_id, server["ann"], self._tub_maker, handler_overrides, self.node_config, self.storage_client_config, gm_verifier, ) s.on_status_changed(lambda _: self._got_connection()) return s def when_connected_enough(self, threshold): """ :returns: a Deferred that fires if/when our high water mark for number of connected servers becomes (or ever was) above "threshold". """ d = defer.Deferred() self._threshold_listeners.append( (threshold, d) ) self._check_connected_high_water_mark() return d # these two are used in unit tests def test_add_rref(self, serverid, rref, ann): s = self._make_storage_server( serverid, {"ann": ann.copy()}, ) s._rref = rref s._is_connected = True self.servers[serverid] = s def test_add_server(self, server_id, s): s.on_status_changed(lambda _: self._got_connection()) self.servers[server_id] = s def use_introducer(self, introducer_client): self.introducer_client = ic = introducer_client ic.subscribe_to("storage", self._got_announcement) def _got_connection(self): # this is called by NativeStorageServer when it is connected self._check_connected_high_water_mark() def _check_connected_high_water_mark(self): current = len(self.get_connected_servers()) if current > self._connected_high_water_mark: self._connected_high_water_mark = current remaining = [] for threshold, d in self._threshold_listeners: if self._connected_high_water_mark >= threshold: eventually(d.callback, None) else: remaining.append( (threshold, d) ) self._threshold_listeners = remaining def _should_ignore_announcement(self, server_id, ann): """ Determine whether a new storage announcement should be discarded or used to update our collection of storage servers. :param bytes server_id: The unique identifier for the storage server which made the announcement. :param dict ann: The announcement. :return bool: ``True`` if the announcement should be ignored, ``False`` if it should be used to update our local storage server state. """ # Let local static configuration always override any announcement for # a particular server. if server_id in self._static_server_ids: log.msg(format="ignoring announcement for static server '%(id)s'", id=server_id, facility="tahoe.storage_broker", umid="AlxzqA", level=log.UNUSUAL) return True try: old = self.servers[server_id] except KeyError: # We don't know anything about this server. Let's use the # announcement to change that. return False else: # Determine if this announcement is at all difference from the # announcement we already have for the server. If it is the same, # we don't need to change anything. return old.get_announcement() == ann def _got_announcement(self, key_s, ann): """ This callback is given to the introducer and called any time an announcement is received which has a valid signature and does not have a sequence number less than or equal to a previous sequence number seen for that server by that introducer. Note sequence numbers are not considered between different introducers so if we use more than one introducer it is possible for them to deliver us stale announcements in some cases. """ precondition(isinstance(key_s, bytes), key_s) precondition(key_s.startswith(b"v0-"), key_s) precondition(ann["service-name"] == "storage", ann["service-name"]) server_id = key_s if self._should_ignore_announcement(server_id, ann): return s = self._make_storage_server( server_id, {u"ann": ann}, ) try: old = self.servers.pop(server_id) except KeyError: pass else: # It's a replacement, get rid of the old one. old.stop_connecting() old.disownServiceParent() # NOTE: this disownServiceParent() returns a Deferred that # doesn't fire until Tub.stopService fires, which will wait for # any existing connections to be shut down. This doesn't # generally matter for normal runtime, but unit tests can run # into DirtyReactorErrors if they don't block on these. If a test # replaces one server with a newer version, then terminates # before the old one has been shut down, it might get # DirtyReactorErrors. The fix would be to gather these Deferreds # into a structure that will block StorageFarmBroker.stopService # until they have fired (but hopefully don't keep reference # cycles around when they fire earlier than that, which will # almost always be the case for normal runtime). # now we forget about them and start using the new one s.setServiceParent(self) self.servers[server_id] = s s.start_connecting(self._trigger_connections) # the descriptor will manage their own Reconnector, and each time we # need servers, we'll ask them if they're connected or not. def _trigger_connections(self): # when one connection is established, reset the timers on all others, # to trigger a reconnection attempt in one second. This is intended # to accelerate server connections when we've been offline for a # while. The goal is to avoid hanging out for a long time with # connections to only a subset of the servers, which would increase # the chances that we'll put shares in weird places (and not update # existing shares of mutable files). See #374 for more details. for dsc in list(self.servers.values()): dsc.try_to_connect() def get_servers_for_psi(self, peer_selection_index, for_upload=False): """ :param for_upload: used to determine if we should include any servers that are invalid according to Grid Manager processing. When for_upload is True and we have any Grid Manager keys configured, any storage servers with invalid or missing certificates will be excluded. """ # return a list of server objects (IServers) assert self.permute_peers == True connected_servers = self.get_connected_servers() preferred_servers = frozenset(s for s in connected_servers if s.get_longname() in self.preferred_peers) if for_upload: # print("upload processing: {}".format([srv.upload_permitted() for srv in connected_servers])) connected_servers = [ srv for srv in connected_servers if srv.upload_permitted() ] def _permuted(server): seed = server.get_permutation_seed() is_unpreferred = server not in preferred_servers return (is_unpreferred, permute_server_hash(peer_selection_index, seed)) return sorted(connected_servers, key=_permuted) def get_all_serverids(self): return frozenset(self.servers.keys()) def get_connected_servers(self): return frozenset([s for s in self.servers.values() if s.is_connected()]) def get_known_servers(self): return frozenset(self.servers.values()) def get_nickname_for_serverid(self, serverid): if serverid in self.servers: return self.servers[serverid].get_nickname() return None def get_stub_server(self, serverid): if serverid in self.servers: return self.servers[serverid] # some time before 1.12, we changed "serverid" to be "key_s" (the # printable verifying key, used in V2 announcements), instead of the # tubid. When the immutable uploader delegates work to a Helper, # get_stub_server() is used to map the returning server identifiers # to IDisplayableServer instances (to get a name, for display on the # Upload Results web page). If the Helper is running 1.12 or newer, # it will send pubkeys, but if it's still running 1.11, it will send # tubids. This clause maps the old tubids to our existing servers. for s in list(self.servers.values()): if isinstance(s, NativeStorageServer): if serverid == s.get_tubid(): return s return StubServer(serverid) @implementer(IDisplayableServer) class StubServer(object): def __init__(self, serverid): assert isinstance(serverid, bytes) self.serverid = serverid # binary tubid def get_serverid(self): return self.serverid def get_name(self): return base32.b2a(self.serverid)[:8] def get_longname(self): return base32.b2a(self.serverid) def get_nickname(self): return "?" class IFoolscapStorageServer(Interface): """ An internal interface that mediates between ``NativeStorageServer`` and Foolscap-based ``IStorageServer`` implementations. """ nickname = Attribute(""" A name for this server for presentation to users. """) permutation_seed = Attribute(""" A stable value associated with this server which a client can use as an input to the server selection permutation ordering. """) tubid = Attribute(""" The identifier for the Tub in which the server is run. """) storage_server = Attribute(""" An IStorageServer provide which implements a concrete Foolscap-based protocol for communicating with the server. """) name = Attribute(""" Another name for this server for presentation to users. """) longname = Attribute(""" *Another* name for this server for presentation to users. """) lease_seed = Attribute(""" A stable value associated with this server which a client can use as an input to a lease secret generation function. """) def connect_to(tub, got_connection): """ Attempt to establish and maintain a connection to the server. :param Tub tub: A Foolscap Tub from which the connection is to originate. :param got_connection: A one-argument callable which is called with a Foolscap ``RemoteReference`` when a connection is established. This may be called multiple times if the connection is lost and then re-established. :return foolscap.reconnector.Reconnector: An object which manages the connection and reconnection attempts. """ def _parse_announcement(server_id: bytes, furl: bytes, ann: dict) -> tuple[str, bytes, bytes, bytes, bytes]: """ Parse the furl and announcement, return: (nickname, permutation_seed, tubid, short_description, long_description) """ m = re.match(br'pb://(\w+)@', furl) assert m, furl tubid_s = m.group(1).lower() tubid = base32.a2b(tubid_s) if "permutation-seed-base32" in ann: seed = ann["permutation-seed-base32"] if isinstance(seed, str): seed = seed.encode("utf-8") ps = base32.a2b(seed) elif re.search(br'^v0-[0-9a-zA-Z]{52}$', server_id): ps = base32.a2b(server_id[3:]) else: log.msg("unable to parse serverid '%(server_id)s as pubkey, " "hashing it to get permutation-seed, " "may not converge with other clients", server_id=server_id, facility="tahoe.storage_broker", level=log.UNUSUAL, umid="qu86tw") ps = hashlib.sha256(server_id).digest() permutation_seed = ps assert server_id long_description = server_id if server_id.startswith(b"v0-"): # remove v0- prefix from abbreviated name short_description = server_id[3:3+8] else: short_description = server_id[:8] nickname = ann.get("nickname", "") return (nickname, permutation_seed, tubid, short_description, long_description) @implementer(IFoolscapStorageServer) @attr.s(frozen=True) class _FoolscapStorage(object): """ Abstraction for connecting to a storage server exposed via Foolscap. """ nickname = attr.ib() permutation_seed = attr.ib() tubid = attr.ib() storage_server = attr.ib(validator=provides(IStorageServer)) _furl = attr.ib() _short_description = attr.ib() _long_description = attr.ib() @property def name(self): return self._short_description @property def longname(self): return self._long_description @property def lease_seed(self): return self.tubid @classmethod def from_announcement(cls, server_id, furl, ann, storage_server): """ Create an instance from a fURL and an announcement like:: {"permutation-seed-base32": "...", "nickname": "...", "grid-manager-certificates": [..], } *nickname* and *grid-manager-certificates* are optional. The furl will be a Unicode string on Python 3; on Python 2 it will be either a native (bytes) string or a Unicode string. """ (nickname, permutation_seed, tubid, short_description, long_description) = _parse_announcement(server_id, furl.encode("utf-8"), ann) return cls( nickname=nickname, permutation_seed=permutation_seed, tubid=tubid, storage_server=storage_server, furl=furl.encode("utf-8"), short_description=short_description, long_description=long_description, ) def connect_to(self, tub, got_connection): return tub.connectTo(self._furl, got_connection) @implementer(IFoolscapStorageServer) @define class _NullStorage(object): """ Abstraction for *not* communicating with a storage server of a type with which we can't communicate. """ nickname = "" permutation_seed = hashlib.sha256(b"").digest() tubid = hashlib.sha256(b"").digest() storage_server = None lease_seed = hashlib.sha256(b"").digest() name = "" longname: str = "" def connect_to(self, tub, got_connection): return NonReconnector() class NonReconnector(object): """ A ``foolscap.reconnector.Reconnector``-alike that doesn't do anything. """ def stopConnecting(self): pass def reset(self): pass def getReconnectionInfo(self): return ReconnectionInfo() class AnnouncementNotMatched(Exception): """ A storage server announcement wasn't matched by any of the locally enabled plugins. """ @attr.s(auto_exc=True) class MissingPlugin(Exception): """ A particular plugin was requested but is missing """ plugin_name = attr.ib() def __str__(self): return "Missing plugin '{}'".format(self.plugin_name) def _storage_from_foolscap_plugin(node_config, config, announcement, get_rref): """ Construct an ``IStorageServer`` from the most locally-preferred plugin that is offered in the given announcement. :param allmydata.node._Config node_config: The node configuration to pass to the plugin. :param dict announcement: The storage announcement for the storage server we should build """ storage_options = announcement.get(u"storage-options", []) plugins = config.get_configured_storage_plugins() # for every storage-option that we have enabled locally (in order # of preference), see if the announcement asks for such a thing. # if it does, great: we return that storage-client # otherwise we've run out of options... for options in storage_options: try: plugin = plugins[options[u"name"]] except KeyError: # we didn't configure this kind of plugin locally, so # consider the next announced option continue furl = options[u"storage-server-FURL"] return furl, plugin.get_storage_client( node_config, options, get_rref, ) # none of the storage options in the announcement are configured # locally; we can't make a storage-client. plugin_names = ", ".join(sorted(option["name"] for option in storage_options)) raise AnnouncementNotMatched(plugin_names) def _available_space_from_version(version): if version is None: return None protocol_v1_version = version.get(b'http://allmydata.org/tahoe/protocols/storage/v1', BytesKeyDict()) available_space = protocol_v1_version.get(b'available-space') if available_space is None: available_space = protocol_v1_version.get(b'maximum-immutable-share-size', None) return available_space def _make_storage_system( node_config: _Config, config: StorageClientConfig, ann: dict, server_id: bytes, get_rref: Callable[[], Optional[IRemoteReference]], ) -> IFoolscapStorageServer: """ Create an object for interacting with the storage server described by the given announcement. :param node_config: The node configuration to pass to any configured storage plugins. :param config: Configuration specifying desired storage client behavior. :param ann: The storage announcement from the storage server we are meant to communicate with. :param server_id: The unique identifier for the server. :param get_rref: A function which returns a remote reference to the server-side object which implements this storage system, if one is available (otherwise None). :return: An object enabling communication via Foolscap with the server which generated the announcement. """ unmatched = None # Try to match the announcement against a plugin. try: furl, storage_server = _storage_from_foolscap_plugin( node_config, config, ann, # Pass in an accessor for our _rref attribute. The value of # the attribute may change over time as connections are lost # and re-established. The _StorageServer should always be # able to get the most up-to-date value. get_rref, ) except AnnouncementNotMatched as e: # show a more-specific error to the user for this server # (Note this will only be shown if the server _doesn't_ offer # anonymous service, which will match below) unmatched = _NullStorage('{}: missing plugin "{}"'.format(server_id.decode("utf8"), str(e))) else: return _FoolscapStorage.from_announcement( server_id, furl, ann, storage_server, ) # Try to match the announcement against the anonymous access scheme. try: furl = ann[u"anonymous-storage-FURL"] except KeyError: # Nope pass else: # See comment above for the _storage_from_foolscap_plugin case # about passing in get_rref. storage_server = _StorageServer(get_rref=get_rref) return _FoolscapStorage.from_announcement( server_id, furl, ann, storage_server, ) # Nothing matched so we can't talk to this server. (There should # not be a way to get here without this local being valid) assert unmatched is not None, "Expected unmatched plugin error" return unmatched @implementer(IServer) class NativeStorageServer(service.MultiService): """I hold information about a storage server that we want to connect to. If we are connected, I hold the RemoteReference, their host address, and the their version information. I remember information about when we were last connected too, even if we aren't currently connected. @ivar last_connect_time: when we last established a connection @ivar last_loss_time: when we last lost a connection @ivar version: the server's versiondict, from the most recent announcement @ivar nickname: the server's self-reported nickname (unicode), same @ivar rref: the RemoteReference, if connected, otherwise None """ VERSION_DEFAULTS = UnicodeKeyDict({ "http://allmydata.org/tahoe/protocols/storage/v1" : UnicodeKeyDict({ "maximum-immutable-share-size": 2**32 - 1, "maximum-mutable-share-size": 2*1000*1000*1000, # maximum prior to v1.9.2 "tolerates-immutable-read-overrun": False, "delete-mutable-shares-with-zero-length-writev": False, "available-space": None, }), "application-version": "unknown: no get_version()", }) def __init__(self, server_id, ann, tub_maker, handler_overrides, node_config, config=None, grid_manager_verifier=None): service.MultiService.__init__(self) assert isinstance(server_id, bytes) self._server_id = server_id self.announcement = ann self._tub_maker = tub_maker self._handler_overrides = handler_overrides if config is None: config = StorageClientConfig() self._grid_manager_verifier = grid_manager_verifier self._storage = _make_storage_system(node_config, config, ann, self._server_id, self.get_rref) self.last_connect_time = None self.last_loss_time = None self._rref = None self._is_connected = False self._reconnector = None self._trigger_cb = None self._on_status_changed = ObserverList() def upload_permitted(self): """ If our client is configured with Grid Manager public-keys, we will only upload to storage servers that have a currently-valid certificate signed by at least one of the Grid Managers we accept. :return: True if we should use this server for uploads, False otherwise. """ # if we have no Grid Manager keys configured, choice is easy if self._grid_manager_verifier is None: return True return self._grid_manager_verifier() def get_permutation_seed(self): return self._storage.permutation_seed def get_name(self): # keep methodname short # TODO: decide who adds [] in the short description. It should # probably be the output side, not here. return self._storage.name def get_longname(self): return self._storage.longname def get_tubid(self): return self._storage.tubid def get_lease_seed(self): return self._storage.lease_seed def get_foolscap_write_enabler_seed(self): return self._storage.tubid def get_nickname(self): return self._storage.nickname def on_status_changed(self, status_changed): """ :param status_changed: a callable taking a single arg (the NativeStorageServer) that is notified when we become connected """ return self._on_status_changed.subscribe(status_changed) # Special methods used by copy.copy() and copy.deepcopy(). When those are # used in allmydata.immutable.filenode to copy CheckResults during # repair, we want it to treat the IServer instances as singletons, and # not attempt to duplicate them.. def __copy__(self): return self def __deepcopy__(self, memodict): return self def __repr__(self): return "" % self.get_name() def get_serverid(self): return self._server_id def get_version(self): if self._rref: return self._rref.version return None def get_announcement(self): return self.announcement def get_connection_status(self): last_received = None if self._rref: last_received = self._rref.getDataLastReceivedAt() return connection_status.from_foolscap_reconnector(self._reconnector, last_received) def is_connected(self): return self._is_connected def get_available_space(self): version = self.get_version() return _available_space_from_version(version) def start_connecting(self, trigger_cb): self._tub = self._tub_maker(self._handler_overrides) self._tub.setServiceParent(self) self._trigger_cb = trigger_cb self._reconnector = self._storage.connect_to(self._tub, self._got_connection) def _got_connection(self, rref): lp = log.msg(format="got connection to %(name)s, getting versions", name=self.get_name(), facility="tahoe.storage_broker", umid="coUECQ") if self._trigger_cb: eventually(self._trigger_cb) default = self.VERSION_DEFAULTS d = add_version_to_remote_reference(rref, default) d.addCallback(self._got_versioned_service, lp) d.addCallback(lambda ign: self._on_status_changed.notify(self)) d.addErrback(log.err, format="storageclient._got_connection", name=self.get_name(), umid="Sdq3pg") def _got_versioned_service(self, rref, lp): log.msg(format="%(name)s provided version info %(version)s", name=self.get_name(), version=rref.version, facility="tahoe.storage_broker", umid="SWmJYg", level=log.NOISY, parent=lp) self.last_connect_time = time.time() self._rref = rref self._is_connected = True rref.notifyOnDisconnect(self._lost) def get_rref(self): return self._rref def get_storage_server(self): """ See ``IServer.get_storage_server``. """ if self._rref is None: return None return self._storage.storage_server def _lost(self): log.msg(format="lost connection to %(name)s", name=self.get_name(), facility="tahoe.storage_broker", umid="zbRllw") self.last_loss_time = time.time() # self._rref is now stale: all callRemote()s will get a # DeadReferenceError. We leave the stale reference in place so that # uploader/downloader code (which received this IServer through # get_connected_servers() or get_servers_for_psi()) can continue to # use s.get_rref().callRemote() and not worry about it being None. self._is_connected = False def stop_connecting(self): # used when this descriptor has been superceded by another self._reconnector.stopConnecting() def try_to_connect(self): # used when the broker wants us to hurry up self._reconnector.reset() @async_to_deferred async def _pick_a_http_server( reactor, nurls: list[DecodedURL], request: Callable[[object, DecodedURL], defer.Deferred[object]] ) -> DecodedURL: """Pick the first server we successfully send a request to. Fires with ``None`` if no server was found, or with the ``DecodedURL`` of the first successfully-connected server. """ requests = [] for nurl in nurls: def to_nurl(_: object, nurl: DecodedURL=nurl) -> DecodedURL: return nurl requests.append(request(reactor, nurl).addCallback(to_nurl)) queries: defer.Deferred[tuple[int, DecodedURL]] = race(requests) _, nurl = await queries return nurl @implementer(IServer) class HTTPNativeStorageServer(service.MultiService): """ Like ``NativeStorageServer``, but for HTTP clients. The notion of being "connected" is less meaningful for HTTP; we just poll occasionally, and if we've succeeded at last poll, we assume we're "connected". """ def __init__(self, server_id: bytes, announcement, default_connection_handlers: dict[str,str], reactor=reactor, grid_manager_verifier=None, tor_provider: Optional[TorProvider]=None): service.MultiService.__init__(self) assert isinstance(server_id, bytes) self._server_id = server_id self.announcement = announcement self._on_status_changed = ObserverList() self._reactor = reactor self._grid_manager_verifier = grid_manager_verifier self._storage_client_factory = StorageClientFactory( default_connection_handlers, tor_provider ) furl = announcement["anonymous-storage-FURL"].encode("utf-8") ( self._nickname, self._permutation_seed, self._tubid, self._short_description, self._long_description ) = _parse_announcement(server_id, furl, announcement) self._nurls = [ DecodedURL.from_text(u) for u in announcement[ANONYMOUS_STORAGE_NURLS] ] self._istorage_server : Optional[_HTTPStorageServer] = None self._connection_status = connection_status.ConnectionStatus.unstarted() self._version = None self._last_connect_time = None self._connecting_deferred : Optional[defer.Deferred[object]]= None def get_permutation_seed(self): return self._permutation_seed def get_name(self): return self._short_description def get_longname(self): return self._long_description def get_tubid(self): return self._tubid def get_lease_seed(self): # Apparently this is what Foolscap version above does?! return self._tubid def get_foolscap_write_enabler_seed(self): return self._tubid def get_nickname(self): return self._nickname def on_status_changed(self, status_changed): """ :param status_changed: a callable taking a single arg (the NativeStorageServer) that is notified when we become connected """ return self._on_status_changed.subscribe(status_changed) def upload_permitted(self): """ If our client is configured with Grid Manager public-keys, we will only upload to storage servers that have a currently-valid certificate signed by at least one of the Grid Managers we accept. :return: True if we should use this server for uploads, False otherwise. """ # if we have no Grid Manager keys configured, choice is easy if self._grid_manager_verifier is None: return True return self._grid_manager_verifier() # Special methods used by copy.copy() and copy.deepcopy(). When those are # used in allmydata.immutable.filenode to copy CheckResults during # repair, we want it to treat the IServer instances as singletons, and # not attempt to duplicate them.. def __copy__(self): return self def __deepcopy__(self, memodict): return self def __repr__(self): return "" % self.get_name() def get_serverid(self): return self._server_id def get_version(self): return self._version def get_announcement(self): return self.announcement def get_connection_status(self): return self._connection_status def is_connected(self): return self._connection_status.connected def get_available_space(self): version = self.get_version() return _available_space_from_version(version) def start_connecting(self, trigger_cb): self._lc = LoopingCall(self._connect) self._lc.start(1, True) def _got_version(self, version): self._last_connect_time = time.time() self._version = version self._connection_status = connection_status.ConnectionStatus( True, "connected", [], self._last_connect_time, self._last_connect_time ) self._on_status_changed.notify(self) def _failed_to_connect(self, reason): self._connection_status = connection_status.ConnectionStatus( False, f"failure: {reason}", [], self._last_connect_time, self._last_connect_time ) self._on_status_changed.notify(self) def get_storage_server(self): """ See ``IServer.get_storage_server``. """ if self._connection_status.summary == "unstarted": return None return self._istorage_server def stop_connecting(self): self._lc.stop() if self._connecting_deferred is not None: self._connecting_deferred.cancel() def try_to_connect(self): self._connect() def _connect(self) -> defer.Deferred[object]: """ Try to connect to a working storage server. If called while a previous ``_connect()`` is already running, it will just return the same ``Deferred``. ``LoopingCall.stop()`` doesn't cancel ``Deferred``s, unfortunately: https://github.com/twisted/twisted/issues/11814. Thus we want to store the ``Deferred`` so we can cancel it when necessary. We also want to return it so that loop iterations take it into account, and a new iteration doesn't start while we're in the middle of the previous one. """ # Conceivably try_to_connect() was called on this before, in which case # we already are in the middle of connecting. So in that case just # return whatever is in progress: if self._connecting_deferred is not None: return self._connecting_deferred def done(_): self._connecting_deferred = None connecting = self._pick_server_and_get_version() # Set a short timeout since we're relying on this for server liveness. connecting = connecting.addTimeout(5, self._reactor).addCallbacks( self._got_version, self._failed_to_connect ).addBoth(done) self._connecting_deferred = connecting return connecting @async_to_deferred async def _pick_server_and_get_version(self): """ Minimal implementation of connection logic: pick a server, get its version. This doesn't deal with errors much, so as to minimize statefulness. It does change ``self._istorage_server``, so possibly more refactoring would be useful to remove even that much statefulness. """ async def get_istorage_server() -> _HTTPStorageServer: if self._istorage_server is not None: return self._istorage_server # We haven't selected a server yet, so let's do so. # TODO This is somewhat inefficient on startup: it takes two successful # version() calls before we are live talking to a server, it could only # be one. See https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3992 @async_to_deferred async def request(reactor, nurl: DecodedURL): # Since we're just using this one off to check if the NURL # works, no need for persistent pool or other fanciness. pool = HTTPConnectionPool(reactor, persistent=False) pool.retryAutomatically = False storage_client = await self._storage_client_factory.create_storage_client( nurl, reactor, pool ) return await StorageClientGeneral(storage_client).get_version() nurl = await _pick_a_http_server(reactor, self._nurls, request) # If we've gotten this far, we've found a working NURL. storage_client = await self._storage_client_factory.create_storage_client( nurl, cast(IReactorTime, reactor), None ) self._istorage_server = _HTTPStorageServer.from_http_client(storage_client) return self._istorage_server try: storage_server = await get_istorage_server() # Get the version from the remote server. version = await storage_server.get_version() return version except Exception as e: log.msg(f"Failed to connect to a HTTP storage server: {e}", level=log.CURIOUS) raise def stopService(self): if self._connecting_deferred is not None: self._connecting_deferred.cancel() result = service.MultiService.stopService(self) if self._lc.running: self._lc.stop() self._failed_to_connect("shut down") if self._istorage_server is not None: client_shutting_down = self._istorage_server._http_client.shutdown() result.addCallback(lambda _: client_shutting_down) return result class UnknownServerTypeError(Exception): pass @implementer(IStorageServer) @attr.s class _StorageServer(object): """ ``_StorageServer`` is a direct pass-through to an ``RIStorageServer`` via a ``RemoteReference``. """ _get_rref = attr.ib() @property def _rref(self): return self._get_rref() def get_version(self): return self._rref.callRemote( "get_version", ) def allocate_buckets( self, storage_index, renew_secret, cancel_secret, sharenums, allocated_size, canary, ): return self._rref.callRemote( "allocate_buckets", storage_index, renew_secret, cancel_secret, sharenums, allocated_size, canary, ) def add_lease( self, storage_index, renew_secret, cancel_secret, ): return self._rref.callRemote( "add_lease", storage_index, renew_secret, cancel_secret, ) def get_buckets( self, storage_index, ): return self._rref.callRemote( "get_buckets", storage_index, ) def slot_readv( self, storage_index, shares, readv, ): return self._rref.callRemote( "slot_readv", storage_index, shares, readv, ) def slot_testv_and_readv_and_writev( self, storage_index, secrets, tw_vectors, r_vector, ): # Match the wire protocol, which requires 4-tuples for test vectors. wire_format_tw_vectors = { key: ( [(start, length, b"eq", data) for (start, length, data) in value[0]], value[1], value[2], ) for (key, value) in tw_vectors.items() } return self._rref.callRemote( "slot_testv_and_readv_and_writev", storage_index, secrets, wire_format_tw_vectors, r_vector, ) def advise_corrupt_share( self, share_type, storage_index, shnum, reason, ): return self._rref.callRemote( "advise_corrupt_share", share_type, storage_index, shnum, reason, ).addErrback(log.err, "Error from remote call to advise_corrupt_share") @attr.s(hash=True) class _FakeRemoteReference(object): """ Emulate a Foolscap RemoteReference, calling a local object instead. """ local_object = attr.ib(type=object) @defer.inlineCallbacks def callRemote(self, action, *args, **kwargs): try: result = yield getattr(self.local_object, action)(*args, **kwargs) defer.returnValue(result) except HTTPClientException as e: raise RemoteException((e.code, e.message, e.body)) @attr.s class _HTTPBucketWriter(object): """ Emulate a ``RIBucketWriter``, but use HTTP protocol underneath. """ client = attr.ib(type=StorageClientImmutables) storage_index = attr.ib(type=bytes) share_number = attr.ib(type=int) upload_secret = attr.ib(type=bytes) finished = attr.ib(type=defer.Deferred[bool], factory=defer.Deferred) def abort(self): return self.client.abort_upload(self.storage_index, self.share_number, self.upload_secret) @defer.inlineCallbacks def write(self, offset, data): result = yield self.client.write_share_chunk( self.storage_index, self.share_number, self.upload_secret, offset, data ) if result.finished: self.finished.callback(True) defer.returnValue(None) def close(self): # We're not _really_ closed until all writes have succeeded and we # finished writing all the data. return self.finished def _ignore_404(failure: Failure) -> Optional[Failure]: """ Useful for advise_corrupt_share(), since it swallows unknown share numbers in Foolscap. """ if failure.check(HTTPClientException) and failure.value.code == http.NOT_FOUND: return None else: return failure @attr.s(hash=True) class _HTTPBucketReader(object): """ Emulate a ``RIBucketReader``, but use HTTP protocol underneath. """ client = attr.ib(type=StorageClientImmutables) storage_index = attr.ib(type=bytes) share_number = attr.ib(type=int) def read(self, offset, length): return self.client.read_share_chunk( self.storage_index, self.share_number, offset, length ) def advise_corrupt_share(self, reason): return self.client.advise_corrupt_share( self.storage_index, self.share_number, str(reason, "utf-8", errors="backslashreplace") ).addErrback(_ignore_404) # WORK IN PROGRESS, for now it doesn't actually implement whole thing. @implementer(IStorageServer) # type: ignore @attr.s class _HTTPStorageServer(object): """ Talk to remote storage server over HTTP. """ _http_client = attr.ib(type=StorageClient) @staticmethod def from_http_client(http_client: StorageClient) -> _HTTPStorageServer: """ Create an ``IStorageServer`` from a HTTP ``StorageClient``. """ return _HTTPStorageServer(http_client=http_client) def get_version(self) -> defer.Deferred[VersionMessage]: return StorageClientGeneral(self._http_client).get_version() @defer.inlineCallbacks def allocate_buckets( self, storage_index, renew_secret, cancel_secret, sharenums, allocated_size, canary ): upload_secret = urandom(20) immutable_client = StorageClientImmutables(self._http_client) result = immutable_client.create( storage_index, sharenums, allocated_size, upload_secret, renew_secret, cancel_secret ) result = yield result defer.returnValue( (result.already_have, { share_num: _FakeRemoteReference(_HTTPBucketWriter( client=immutable_client, storage_index=storage_index, share_number=share_num, upload_secret=upload_secret )) for share_num in result.allocated }) ) @defer.inlineCallbacks def get_buckets( self, storage_index ): immutable_client = StorageClientImmutables(self._http_client) share_numbers = yield immutable_client.list_shares( storage_index ) defer.returnValue({ share_num: _FakeRemoteReference(_HTTPBucketReader( immutable_client, storage_index, share_num )) for share_num in share_numbers }) @async_to_deferred async def add_lease( self, storage_index, renew_secret, cancel_secret ): client = StorageClientGeneral(self._http_client) try: await client.add_or_renew_lease( storage_index, renew_secret, cancel_secret ) except ClientException as e: if e.code == http.NOT_FOUND: # Silently do nothing, as is the case for the Foolscap client return raise def advise_corrupt_share( self, share_type, storage_index, shnum, reason: bytes ): if share_type == b"immutable": client : Union[StorageClientImmutables, StorageClientMutables] = StorageClientImmutables(self._http_client) elif share_type == b"mutable": client = StorageClientMutables(self._http_client) else: raise ValueError("Unknown share type") return client.advise_corrupt_share( storage_index, shnum, str(reason, "utf-8", errors="backslashreplace") ).addErrback(_ignore_404) @defer.inlineCallbacks def slot_readv(self, storage_index, shares, readv): mutable_client = StorageClientMutables(self._http_client) pending_reads = {} reads = {} # If shares list is empty, that means list all shares, so we need # to do a query to get that. if not shares: shares = yield mutable_client.list_shares(storage_index) # Start all the queries in parallel: for share_number in shares: share_reads = defer.gatherResults( [ mutable_client.read_share_chunk( storage_index, share_number, offset, length ) for (offset, length) in readv ] ) pending_reads[share_number] = share_reads # Wait for all the queries to finish: for share_number, pending_result in pending_reads.items(): reads[share_number] = yield pending_result return reads @defer.inlineCallbacks def slot_testv_and_readv_and_writev( self, storage_index, secrets, tw_vectors, r_vector, ): mutable_client = StorageClientMutables(self._http_client) we_secret, lr_secret, lc_secret = secrets client_tw_vectors = {} for share_num, (test_vector, data_vector, new_length) in tw_vectors.items(): client_test_vectors = [ TestVector(offset=offset, size=size, specimen=specimen) for (offset, size, specimen) in test_vector ] client_write_vectors = [ WriteVector(offset=offset, data=data) for (offset, data) in data_vector ] client_tw_vectors[share_num] = TestWriteVectors( test_vectors=client_test_vectors, write_vectors=client_write_vectors, new_length=new_length ) client_read_vectors = [ ReadVector(offset=offset, size=size) for (offset, size) in r_vector ] try: client_result = yield mutable_client.read_test_write_chunks( storage_index, we_secret, lr_secret, lc_secret, client_tw_vectors, client_read_vectors, ) except ClientException as e: if e.code == http.UNAUTHORIZED: raise RemoteException("Unauthorized write, possibly you passed the wrong write enabler?") raise return (client_result.success, client_result.reads) tahoe_lafs-1.20.0/src/allmydata/unknown.py0000644000000000000000000002066413615410400015437 0ustar00"""Ported to Python 3. """ from zope.interface import implementer from twisted.internet import defer from allmydata.interfaces import IFilesystemNode, MustNotBeUnknownRWError, \ MustBeDeepImmutableError from allmydata import uri from allmydata.uri import ALLEGED_READONLY_PREFIX, ALLEGED_IMMUTABLE_PREFIX # See ticket #833 for design rationale of UnknownNodes. def strip_prefix_for_ro(ro_uri, deep_immutable): """Strip prefixes when storing an URI in a ro_uri slot.""" # It is possible for an alleged-immutable URI to be put into a # mutable directory. In that case the ALLEGED_IMMUTABLE_PREFIX # should not be stripped. In other cases, the prefix can safely # be stripped because it is implied by the context. if ro_uri.startswith(ALLEGED_IMMUTABLE_PREFIX): if not deep_immutable: return ro_uri return ro_uri[len(ALLEGED_IMMUTABLE_PREFIX):] elif ro_uri.startswith(ALLEGED_READONLY_PREFIX): return ro_uri[len(ALLEGED_READONLY_PREFIX):] else: return ro_uri @implementer(IFilesystemNode) class UnknownNode(object): def __init__(self, given_rw_uri, given_ro_uri, deep_immutable=False, name=u""): assert given_rw_uri is None or isinstance(given_rw_uri, bytes) assert given_ro_uri is None or isinstance(given_ro_uri, bytes) given_rw_uri = given_rw_uri or None given_ro_uri = given_ro_uri or None # We don't raise errors when creating an UnknownNode; we instead create an # opaque node (with rw_uri and ro_uri both None) that records the error. # This avoids breaking operations that never store the opaque node. # Note that this means that if a stored dirnode has only a rw_uri, it # might be dropped. Any future "write-only" cap formats should have a dummy # unusable readcap to stop that from happening. self.error = None self.rw_uri = self.ro_uri = None if given_rw_uri: if deep_immutable: if given_rw_uri.startswith(ALLEGED_IMMUTABLE_PREFIX) and not given_ro_uri: # We needed an immutable cap, and were given one. It was given in the # rw_uri slot, but that's fine; we'll move it to ro_uri below. pass elif not given_ro_uri: self.error = MustNotBeUnknownRWError("cannot attach unknown rw cap as immutable child", name, True) return # node will be opaque else: # We could report either error, but this probably makes more sense. self.error = MustBeDeepImmutableError("cannot attach unknown rw cap as immutable child", name) return # node will be opaque if not given_ro_uri: # We were given a single cap argument, or a rw_uri with no ro_uri. if not (given_rw_uri.startswith(ALLEGED_READONLY_PREFIX) or given_rw_uri.startswith(ALLEGED_IMMUTABLE_PREFIX)): # If the single cap is unprefixed, then we cannot tell whether it is a # writecap, and we don't know how to diminish it to a readcap if it is one. # If it didn't *already* have at least an ALLEGED_READONLY_PREFIX, then # prefixing it would be a bad idea because we have been given no reason # to believe that it is a readcap, so we might be letting a client # inadvertently grant excess write authority. self.error = MustNotBeUnknownRWError("cannot attach unknown rw cap as child", name, False) return # node will be opaque # OTOH, if the single cap already had a prefix (which is of the required # strength otherwise an error would have been thrown above), then treat it # as though it had been given in the ro_uri slot. This has a similar effect # to the use for known caps of 'bigcap = writecap or readcap' in # nodemaker.py: create_from_cap. It enables copying of unknown readcaps to # work in as many cases as we can securely allow. given_ro_uri = given_rw_uri given_rw_uri = None elif given_ro_uri.startswith(ALLEGED_IMMUTABLE_PREFIX): # Strange corner case: we were given a cap in both slots, with the ro_uri # alleged to be immutable. A real immutable object wouldn't have a writecap. self.error = MustBeDeepImmutableError("cannot accept a child entry that specifies " "both rw_uri, and ro_uri with an imm. prefix", name) return # node will be opaque # If the ro_uri definitely fails the constraint, it should be treated as opaque and # the error recorded. if given_ro_uri: read_cap = uri.from_string(given_ro_uri, deep_immutable=deep_immutable, name=name) if isinstance(read_cap, uri.UnknownURI): self.error = read_cap.get_error() if self.error: assert self.rw_uri is None and self.ro_uri is None return if deep_immutable: assert self.rw_uri is None # strengthen the constraint on ro_uri to ALLEGED_IMMUTABLE_PREFIX if given_ro_uri: if given_ro_uri.startswith(ALLEGED_IMMUTABLE_PREFIX): self.ro_uri = given_ro_uri elif given_ro_uri.startswith(ALLEGED_READONLY_PREFIX): self.ro_uri = ALLEGED_IMMUTABLE_PREFIX + given_ro_uri[len(ALLEGED_READONLY_PREFIX):] else: self.ro_uri = ALLEGED_IMMUTABLE_PREFIX + given_ro_uri else: # not immutable, so a writecap is allowed self.rw_uri = given_rw_uri # strengthen the constraint on ro_uri to ALLEGED_READONLY_PREFIX if given_ro_uri: if (given_ro_uri.startswith(ALLEGED_READONLY_PREFIX) or given_ro_uri.startswith(ALLEGED_IMMUTABLE_PREFIX)): self.ro_uri = given_ro_uri else: self.ro_uri = ALLEGED_READONLY_PREFIX + given_ro_uri def get_cap(self): return uri.UnknownURI(self.rw_uri or self.ro_uri) def get_readcap(self): return uri.UnknownURI(self.ro_uri) def is_readonly(self): raise AssertionError("an UnknownNode might be either read-only or " "read/write, so we shouldn't be calling is_readonly") def is_mutable(self): raise AssertionError("an UnknownNode might be either mutable or immutable, " "so we shouldn't be calling is_mutable") def is_unknown(self): return True def is_allowed_in_immutable_directory(self): # An UnknownNode consisting only of a ro_uri is allowed in an # immutable directory, even though we do not know that it is # immutable (or even read-only), provided that no error was detected. return not self.error and not self.rw_uri def is_alleged_immutable(self): return not self.error and not self.rw_uri and (not self.ro_uri or self.ro_uri.startswith(ALLEGED_IMMUTABLE_PREFIX)) def raise_error(self): if self.error is not None: raise self.error def get_uri(self): return self.rw_uri or self.ro_uri def get_write_uri(self): return self.rw_uri def get_readonly_uri(self): return self.ro_uri def get_storage_index(self): return None def get_verify_cap(self): return None def get_repair_cap(self): return None def get_size(self): return None def get_current_size(self): return defer.succeed(None) def check(self, monitor, verify, add_lease): return defer.succeed(None) def check_and_repair(self, monitor, verify, add_lease): return defer.succeed(None) def __eq__(self, other): if not isinstance(other, UnknownNode): return False return other.ro_uri == self.ro_uri and other.rw_uri == self.rw_uri def __ne__(self, other): return not (self == other) tahoe_lafs-1.20.0/src/allmydata/uri.py0000644000000000000000000007127513615410400014543 0ustar00""" URIs (kinda sorta, really they're capabilities?). Ported to Python 3. Methods ending in to_string() are actually to_bytes(), possibly should be fixed in follow-up port. """ import re from typing import Type from zope.interface import implementer from twisted.python.components import registerAdapter from allmydata.storage.server import si_a2b, si_b2a from allmydata.util import base32, hashutil from allmydata.util.assertutil import _assert from allmydata.interfaces import IURI, IDirnodeURI, IFileURI, IImmutableFileURI, \ IVerifierURI, IMutableFileURI, IDirectoryURI, IReadonlyDirectoryURI, \ MustBeDeepImmutableError, MustBeReadonlyError, CapConstraintError class BadURIError(CapConstraintError): pass # The URI shall be an ASCII representation of a reference to the file/directory. # It shall contain enough information to retrieve and validate the contents. # It shall be expressed in a limited character set (currently base32 plus ':' and # capital letters, but future URIs might use a larger charset). # TODO: # - rename all of the *URI classes/interfaces to *Cap # - make variable and method names consistently use _uri for an URI string, # and _cap for a Cap object (decoded URI) BASE32STR_128bits = b'(%s{25}%s)' % (base32.BASE32CHAR, base32.BASE32CHAR_3bits) BASE32STR_256bits = b'(%s{51}%s)' % (base32.BASE32CHAR, base32.BASE32CHAR_1bits) NUMBER=b'([0-9]+)' class _BaseURI(object): def __hash__(self): return self.to_string().__hash__() def __eq__(self, them): if isinstance(them, _BaseURI): return self.to_string() == them.to_string() else: return False def __ne__(self, them): if isinstance(them, _BaseURI): return self.to_string() != them.to_string() else: return True def get_storage_index(self): return self.storage_index @implementer(IURI, IImmutableFileURI) class CHKFileURI(_BaseURI): BASE_STRING=b'URI:CHK:' STRING_RE=re.compile(b'^URI:CHK:'+BASE32STR_128bits+b':'+ BASE32STR_256bits+b':'+NUMBER+b':'+NUMBER+b':'+NUMBER+ b'$') def __init__(self, key, uri_extension_hash, needed_shares, total_shares, size): self.key = key self.uri_extension_hash = uri_extension_hash self.needed_shares = needed_shares self.total_shares = total_shares self.size = size self.storage_index = hashutil.storage_index_hash(self.key) if not len(self.storage_index) == 16: # sha256 hash truncated to 128 raise BadURIError("storage index must be 16 bytes long") @classmethod def init_from_string(cls, uri): mo = cls.STRING_RE.search(uri) if not mo: raise BadURIError("%r doesn't look like a %s cap" % (uri, cls)) return cls(base32.a2b(mo.group(1)), base32.a2b(mo.group(2)), int(mo.group(3)), int(mo.group(4)), int(mo.group(5))) def to_string(self): assert isinstance(self.needed_shares, int) assert isinstance(self.total_shares, int) assert isinstance(self.size, int) return (b'URI:CHK:%s:%s:%d:%d:%d' % (base32.b2a(self.key), base32.b2a(self.uri_extension_hash), self.needed_shares, self.total_shares, self.size)) def is_readonly(self): return True def is_mutable(self): return False def get_readonly(self): return self def get_size(self): return self.size def get_verify_cap(self): return CHKFileVerifierURI(storage_index=self.storage_index, uri_extension_hash=self.uri_extension_hash, needed_shares=self.needed_shares, total_shares=self.total_shares, size=self.size) @implementer(IVerifierURI) class CHKFileVerifierURI(_BaseURI): BASE_STRING=b'URI:CHK-Verifier:' STRING_RE=re.compile(b'^URI:CHK-Verifier:'+BASE32STR_128bits+b':'+ BASE32STR_256bits+b':'+NUMBER+b':'+NUMBER+b':'+NUMBER) def __init__(self, storage_index, uri_extension_hash, needed_shares, total_shares, size): assert len(storage_index) == 16 self.storage_index = storage_index self.uri_extension_hash = uri_extension_hash self.needed_shares = needed_shares self.total_shares = total_shares self.size = size @classmethod def init_from_string(cls, uri): mo = cls.STRING_RE.search(uri) if not mo: raise BadURIError("'%s' doesn't look like a %s cap" % (uri, cls)) return cls(si_a2b(mo.group(1)), base32.a2b(mo.group(2)), int(mo.group(3)), int(mo.group(4)), int(mo.group(5))) def to_string(self): assert isinstance(self.needed_shares, int) assert isinstance(self.total_shares, int) assert isinstance(self.size, int) return (b'URI:CHK-Verifier:%s:%s:%d:%d:%d' % (si_b2a(self.storage_index), base32.b2a(self.uri_extension_hash), self.needed_shares, self.total_shares, self.size)) def is_readonly(self): return True def is_mutable(self): return False def get_readonly(self): return self def get_verify_cap(self): return self @implementer(IURI, IImmutableFileURI) class LiteralFileURI(_BaseURI): BASE_STRING=b'URI:LIT:' STRING_RE=re.compile(b'^URI:LIT:'+base32.BASE32STR_anybytes+b'$') def __init__(self, data=None): if data is not None: assert isinstance(data, bytes) self.data = data @classmethod def init_from_string(cls, uri): mo = cls.STRING_RE.search(uri) if not mo: raise BadURIError("'%s' doesn't look like a %s cap" % (uri, cls)) return cls(base32.a2b(mo.group(1))) def to_string(self): return b'URI:LIT:%s' % base32.b2a(self.data) def is_readonly(self): return True def is_mutable(self): return False def get_readonly(self): return self def get_storage_index(self): return None def get_verify_cap(self): # LIT files need no verification, all the data is present in the URI return None def get_size(self): return len(self.data) @implementer(IURI, IMutableFileURI) class WriteableSSKFileURI(_BaseURI): BASE_STRING=b'URI:SSK:' STRING_RE=re.compile(b'^'+BASE_STRING+BASE32STR_128bits+b':'+ BASE32STR_256bits+b'$') def __init__(self, writekey, fingerprint): self.writekey = writekey self.readkey = hashutil.ssk_readkey_hash(writekey) self.storage_index = hashutil.ssk_storage_index_hash(self.readkey) assert len(self.storage_index) == 16 self.fingerprint = fingerprint @classmethod def init_from_string(cls, uri): mo = cls.STRING_RE.search(uri) if not mo: raise BadURIError("%r doesn't look like a %s cap" % (uri, cls)) return cls(base32.a2b(mo.group(1)), base32.a2b(mo.group(2))) def to_string(self): assert isinstance(self.writekey, bytes) assert isinstance(self.fingerprint, bytes) return b'URI:SSK:%s:%s' % (base32.b2a(self.writekey), base32.b2a(self.fingerprint)) def __repr__(self): return "<%s %r>" % (self.__class__.__name__, self.abbrev()) def abbrev(self): return base32.b2a(self.writekey[:5]) def abbrev_si(self): return base32.b2a(self.storage_index)[:5] def is_readonly(self): return False def is_mutable(self): return True def get_readonly(self): return ReadonlySSKFileURI(self.readkey, self.fingerprint) def get_verify_cap(self): return SSKVerifierURI(self.storage_index, self.fingerprint) @implementer(IURI, IMutableFileURI) class ReadonlySSKFileURI(_BaseURI): BASE_STRING=b'URI:SSK-RO:' STRING_RE=re.compile(b'^URI:SSK-RO:'+BASE32STR_128bits+b':'+BASE32STR_256bits+b'$') def __init__(self, readkey, fingerprint): self.readkey = readkey self.storage_index = hashutil.ssk_storage_index_hash(self.readkey) assert len(self.storage_index) == 16 self.fingerprint = fingerprint @classmethod def init_from_string(cls, uri): mo = cls.STRING_RE.search(uri) if not mo: raise BadURIError("%r doesn't look like a %s cap" % (uri, cls)) return cls(base32.a2b(mo.group(1)), base32.a2b(mo.group(2))) def to_string(self): assert isinstance(self.readkey, bytes) assert isinstance(self.fingerprint, bytes) return b'URI:SSK-RO:%s:%s' % (base32.b2a(self.readkey), base32.b2a(self.fingerprint)) def __repr__(self): return "<%s %r>" % (self.__class__.__name__, self.abbrev()) def abbrev(self): return base32.b2a(self.readkey[:5]) def abbrev_si(self): return base32.b2a(self.storage_index)[:5] def is_readonly(self): return True def is_mutable(self): return True def get_readonly(self): return self def get_verify_cap(self): return SSKVerifierURI(self.storage_index, self.fingerprint) @implementer(IVerifierURI) class SSKVerifierURI(_BaseURI): BASE_STRING=b'URI:SSK-Verifier:' STRING_RE=re.compile(b'^'+BASE_STRING+BASE32STR_128bits+b':'+BASE32STR_256bits+b'$') def __init__(self, storage_index, fingerprint): assert len(storage_index) == 16 self.storage_index = storage_index self.fingerprint = fingerprint @classmethod def init_from_string(cls, uri): mo = cls.STRING_RE.search(uri) if not mo: raise BadURIError("%r doesn't look like a %s cap" % (uri, cls)) return cls(si_a2b(mo.group(1)), base32.a2b(mo.group(2))) def to_string(self): assert isinstance(self.storage_index, bytes) assert isinstance(self.fingerprint, bytes) return b'URI:SSK-Verifier:%s:%s' % (si_b2a(self.storage_index), base32.b2a(self.fingerprint)) def is_readonly(self): return True def is_mutable(self): return False def get_readonly(self): return self def get_verify_cap(self): return self @implementer(IURI, IMutableFileURI) class WriteableMDMFFileURI(_BaseURI): BASE_STRING=b'URI:MDMF:' STRING_RE=re.compile(b'^'+BASE_STRING+BASE32STR_128bits+b':'+BASE32STR_256bits+b'(:|$)') def __init__(self, writekey, fingerprint): self.writekey = writekey self.readkey = hashutil.ssk_readkey_hash(writekey) self.storage_index = hashutil.ssk_storage_index_hash(self.readkey) assert len(self.storage_index) == 16 self.fingerprint = fingerprint @classmethod def init_from_string(cls, uri): mo = cls.STRING_RE.search(uri) if not mo: raise BadURIError("%r doesn't look like a %s cap" % (uri, cls)) return cls(base32.a2b(mo.group(1)), base32.a2b(mo.group(2))) def to_string(self): assert isinstance(self.writekey, bytes) assert isinstance(self.fingerprint, bytes) ret = b'URI:MDMF:%s:%s' % (base32.b2a(self.writekey), base32.b2a(self.fingerprint)) return ret def __repr__(self): return "<%s %r>" % (self.__class__.__name__, self.abbrev()) def abbrev(self): return base32.b2a(self.writekey[:5]) def abbrev_si(self): return base32.b2a(self.storage_index)[:5] def is_readonly(self): return False def is_mutable(self): return True def get_readonly(self): return ReadonlyMDMFFileURI(self.readkey, self.fingerprint) def get_verify_cap(self): return MDMFVerifierURI(self.storage_index, self.fingerprint) @implementer(IURI, IMutableFileURI) class ReadonlyMDMFFileURI(_BaseURI): BASE_STRING=b'URI:MDMF-RO:' STRING_RE=re.compile(b'^' +BASE_STRING+BASE32STR_128bits+b':'+BASE32STR_256bits+b'(:|$)') def __init__(self, readkey, fingerprint): self.readkey = readkey self.storage_index = hashutil.ssk_storage_index_hash(self.readkey) assert len(self.storage_index) == 16 self.fingerprint = fingerprint @classmethod def init_from_string(cls, uri): mo = cls.STRING_RE.search(uri) if not mo: raise BadURIError("%r doesn't look like a %s cap" % (uri, cls)) return cls(base32.a2b(mo.group(1)), base32.a2b(mo.group(2))) def to_string(self): assert isinstance(self.readkey, bytes) assert isinstance(self.fingerprint, bytes) ret = b'URI:MDMF-RO:%s:%s' % (base32.b2a(self.readkey), base32.b2a(self.fingerprint)) return ret def __repr__(self): return "<%s %r>" % (self.__class__.__name__, self.abbrev()) def abbrev(self): return base32.b2a(self.readkey[:5]) def abbrev_si(self): return base32.b2a(self.storage_index)[:5] def is_readonly(self): return True def is_mutable(self): return True def get_readonly(self): return self def get_verify_cap(self): return MDMFVerifierURI(self.storage_index, self.fingerprint) @implementer(IVerifierURI) class MDMFVerifierURI(_BaseURI): BASE_STRING=b'URI:MDMF-Verifier:' STRING_RE=re.compile(b'^'+BASE_STRING+BASE32STR_128bits+b':'+BASE32STR_256bits+b'(:|$)') def __init__(self, storage_index, fingerprint): assert len(storage_index) == 16 self.storage_index = storage_index self.fingerprint = fingerprint @classmethod def init_from_string(cls, uri): mo = cls.STRING_RE.search(uri) if not mo: raise BadURIError("%r doesn't look like a %s cap" % (uri, cls)) return cls(si_a2b(mo.group(1)), base32.a2b(mo.group(2))) def to_string(self): assert isinstance(self.storage_index, bytes) assert isinstance(self.fingerprint, bytes) ret = b'URI:MDMF-Verifier:%s:%s' % (si_b2a(self.storage_index), base32.b2a(self.fingerprint)) return ret def is_readonly(self): return True def is_mutable(self): return False def get_readonly(self): return self def get_verify_cap(self): return self @implementer(IDirnodeURI) class _DirectoryBaseURI(_BaseURI): def __init__(self, filenode_uri=None): self._filenode_uri = filenode_uri def __repr__(self): return "<%s %r>" % (self.__class__.__name__, self.abbrev()) @classmethod def init_from_string(cls, uri): mo = cls.BASE_STRING_RE.search(uri) if not mo: raise BadURIError("%r doesn't look like a %s cap" % (uri, cls)) bits = uri[mo.end():] fn = cls.INNER_URI_CLASS.init_from_string( cls.INNER_URI_CLASS.BASE_STRING+bits) return cls(fn) def to_string(self): fnuri = self._filenode_uri.to_string() mo = re.match(self.INNER_URI_CLASS.BASE_STRING, fnuri) assert mo, fnuri bits = fnuri[mo.end():] return self.BASE_STRING+bits def abbrev(self): return self._filenode_uri.to_string().split(b':')[2][:5] def abbrev_si(self): si = self._filenode_uri.get_storage_index() if si is None: return b"" return base32.b2a(si)[:5] def is_mutable(self): return True def get_filenode_cap(self): return self._filenode_uri def get_verify_cap(self): return DirectoryURIVerifier(self._filenode_uri.get_verify_cap()) def get_storage_index(self): return self._filenode_uri.get_storage_index() @implementer(IURI, IDirectoryURI) class DirectoryURI(_DirectoryBaseURI): BASE_STRING=b'URI:DIR2:' BASE_STRING_RE=re.compile(b'^'+BASE_STRING) INNER_URI_CLASS=WriteableSSKFileURI def __init__(self, filenode_uri=None): if filenode_uri: assert not filenode_uri.is_readonly() _DirectoryBaseURI.__init__(self, filenode_uri) def is_readonly(self): return False def get_readonly(self): return ReadonlyDirectoryURI(self._filenode_uri.get_readonly()) @implementer(IURI, IReadonlyDirectoryURI) class ReadonlyDirectoryURI(_DirectoryBaseURI): BASE_STRING=b'URI:DIR2-RO:' BASE_STRING_RE=re.compile(b'^'+BASE_STRING) INNER_URI_CLASS=ReadonlySSKFileURI def __init__(self, filenode_uri=None): if filenode_uri: assert filenode_uri.is_readonly() _DirectoryBaseURI.__init__(self, filenode_uri) def is_readonly(self): return True def get_readonly(self): return self @implementer(IURI, IDirnodeURI) class _ImmutableDirectoryBaseURI(_DirectoryBaseURI): def __init__(self, filenode_uri=None): if filenode_uri: assert isinstance(filenode_uri, self.INNER_URI_CLASS), filenode_uri assert not filenode_uri.is_mutable() _DirectoryBaseURI.__init__(self, filenode_uri) def is_readonly(self): return True def is_mutable(self): return False def get_readonly(self): return self class ImmutableDirectoryURI(_ImmutableDirectoryBaseURI): BASE_STRING=b'URI:DIR2-CHK:' BASE_STRING_RE=re.compile(b'^'+BASE_STRING) INNER_URI_CLASS=CHKFileURI def get_verify_cap(self): vcap = self._filenode_uri.get_verify_cap() return ImmutableDirectoryURIVerifier(vcap) class LiteralDirectoryURI(_ImmutableDirectoryBaseURI): BASE_STRING=b'URI:DIR2-LIT:' BASE_STRING_RE=re.compile(b'^'+BASE_STRING) INNER_URI_CLASS=LiteralFileURI def get_verify_cap(self): # LIT caps have no verifier, since they aren't distributed return None @implementer(IURI, IDirectoryURI) class MDMFDirectoryURI(_DirectoryBaseURI): BASE_STRING=b'URI:DIR2-MDMF:' BASE_STRING_RE=re.compile(b'^'+BASE_STRING) INNER_URI_CLASS=WriteableMDMFFileURI def __init__(self, filenode_uri=None): if filenode_uri: assert not filenode_uri.is_readonly() _DirectoryBaseURI.__init__(self, filenode_uri) def is_readonly(self): return False def get_readonly(self): return ReadonlyMDMFDirectoryURI(self._filenode_uri.get_readonly()) def get_verify_cap(self): return MDMFDirectoryURIVerifier(self._filenode_uri.get_verify_cap()) @implementer(IURI, IReadonlyDirectoryURI) class ReadonlyMDMFDirectoryURI(_DirectoryBaseURI): BASE_STRING=b'URI:DIR2-MDMF-RO:' BASE_STRING_RE=re.compile(b'^'+BASE_STRING) INNER_URI_CLASS=ReadonlyMDMFFileURI def __init__(self, filenode_uri=None): if filenode_uri: assert filenode_uri.is_readonly() _DirectoryBaseURI.__init__(self, filenode_uri) def is_readonly(self): return True def get_readonly(self): return self def get_verify_cap(self): return MDMFDirectoryURIVerifier(self._filenode_uri.get_verify_cap()) def wrap_dirnode_cap(filecap): if isinstance(filecap, WriteableSSKFileURI): return DirectoryURI(filecap) if isinstance(filecap, ReadonlySSKFileURI): return ReadonlyDirectoryURI(filecap) if isinstance(filecap, CHKFileURI): return ImmutableDirectoryURI(filecap) if isinstance(filecap, LiteralFileURI): return LiteralDirectoryURI(filecap) if isinstance(filecap, WriteableMDMFFileURI): return MDMFDirectoryURI(filecap) if isinstance(filecap, ReadonlyMDMFFileURI): return ReadonlyMDMFDirectoryURI(filecap) raise AssertionError("cannot interpret as a directory cap: %s" % filecap.__class__) @implementer(IURI, IVerifierURI) class MDMFDirectoryURIVerifier(_DirectoryBaseURI): BASE_STRING=b'URI:DIR2-MDMF-Verifier:' BASE_STRING_RE=re.compile(b'^'+BASE_STRING) INNER_URI_CLASS=MDMFVerifierURI def __init__(self, filenode_uri=None): if filenode_uri: _assert(IVerifierURI.providedBy(filenode_uri)) self._filenode_uri = filenode_uri def get_filenode_cap(self): return self._filenode_uri def is_mutable(self): return False def is_readonly(self): return True def get_readonly(self): return self @implementer(IURI, IVerifierURI) class DirectoryURIVerifier(_DirectoryBaseURI): BASE_STRING=b'URI:DIR2-Verifier:' BASE_STRING_RE=re.compile(b'^'+BASE_STRING) INNER_URI_CLASS : Type[IVerifierURI] = SSKVerifierURI def __init__(self, filenode_uri=None): if filenode_uri: _assert(IVerifierURI.providedBy(filenode_uri)) self._filenode_uri = filenode_uri def get_filenode_cap(self): return self._filenode_uri def is_mutable(self): return False def is_readonly(self): return True def get_readonly(self): return self @implementer(IVerifierURI) class ImmutableDirectoryURIVerifier(DirectoryURIVerifier): BASE_STRING=b'URI:DIR2-CHK-Verifier:' BASE_STRING_RE=re.compile(b'^'+BASE_STRING) INNER_URI_CLASS=CHKFileVerifierURI class UnknownURI(object): def __init__(self, uri, error=None): self._uri = uri self._error = error def to_string(self): return self._uri def get_readonly(self): return None def get_error(self): return self._error def get_verify_cap(self): return None ALLEGED_READONLY_PREFIX = b'ro.' ALLEGED_IMMUTABLE_PREFIX = b'imm.' def from_string(u, deep_immutable=False, name=u""): """Create URI from either unicode or byte string.""" if isinstance(u, str): u = u.encode("utf-8") if not isinstance(u, bytes): raise TypeError("URI must be unicode string or bytes: %r" % (u,)) # We allow and check ALLEGED_READONLY_PREFIX or ALLEGED_IMMUTABLE_PREFIX # on all URIs, even though we would only strictly need to do so for caps of # new formats (post Tahoe-LAFS 1.6). URIs that are not consistent with their # prefix are treated as unknown. This should be revisited when we add the # new cap formats. See ticket #833 comment:31. s = u can_be_mutable = can_be_writeable = not deep_immutable if s.startswith(ALLEGED_IMMUTABLE_PREFIX): can_be_mutable = can_be_writeable = False s = s[len(ALLEGED_IMMUTABLE_PREFIX):] elif s.startswith(ALLEGED_READONLY_PREFIX): can_be_writeable = False s = s[len(ALLEGED_READONLY_PREFIX):] error = None try: if s.startswith(b'URI:CHK:'): return CHKFileURI.init_from_string(s) elif s.startswith(b'URI:CHK-Verifier:'): return CHKFileVerifierURI.init_from_string(s) elif s.startswith(b'URI:LIT:'): return LiteralFileURI.init_from_string(s) elif s.startswith(b'URI:SSK:'): if can_be_writeable: return WriteableSSKFileURI.init_from_string(s) kind = "URI:SSK file writecap" elif s.startswith(b'URI:SSK-RO:'): if can_be_mutable: return ReadonlySSKFileURI.init_from_string(s) kind = "URI:SSK-RO readcap to a mutable file" elif s.startswith(b'URI:SSK-Verifier:'): return SSKVerifierURI.init_from_string(s) elif s.startswith(b'URI:MDMF:'): if can_be_writeable: return WriteableMDMFFileURI.init_from_string(s) kind = "URI:MDMF file writecap" elif s.startswith(b'URI:MDMF-RO:'): if can_be_mutable: return ReadonlyMDMFFileURI.init_from_string(s) kind = "URI:MDMF-RO readcap to a mutable file" elif s.startswith(b'URI:MDMF-Verifier:'): return MDMFVerifierURI.init_from_string(s) elif s.startswith(b'URI:DIR2:'): if can_be_writeable: return DirectoryURI.init_from_string(s) kind = "URI:DIR2 directory writecap" elif s.startswith(b'URI:DIR2-RO:'): if can_be_mutable: return ReadonlyDirectoryURI.init_from_string(s) kind = "URI:DIR2-RO readcap to a mutable directory" elif s.startswith(b'URI:DIR2-Verifier:'): return DirectoryURIVerifier.init_from_string(s) elif s.startswith(b'URI:DIR2-CHK:'): return ImmutableDirectoryURI.init_from_string(s) elif s.startswith(b'URI:DIR2-CHK-Verifier:'): return ImmutableDirectoryURIVerifier.init_from_string(s) elif s.startswith(b'URI:DIR2-LIT:'): return LiteralDirectoryURI.init_from_string(s) elif s.startswith(b'URI:DIR2-MDMF:'): if can_be_writeable: return MDMFDirectoryURI.init_from_string(s) kind = "URI:DIR2-MDMF directory writecap" elif s.startswith(b'URI:DIR2-MDMF-RO:'): if can_be_mutable: return ReadonlyMDMFDirectoryURI.init_from_string(s) kind = "URI:DIR2-MDMF-RO readcap to a mutable directory" elif s.startswith(b'URI:DIR2-MDMF-Verifier:'): return MDMFDirectoryURIVerifier.init_from_string(s) elif s.startswith(b'x-tahoe-future-test-writeable:') and not can_be_writeable: # For testing how future writeable caps would behave in read-only contexts. kind = "x-tahoe-future-test-writeable: testing cap" elif s.startswith(b'x-tahoe-future-test-mutable:') and not can_be_mutable: # For testing how future mutable readcaps would behave in immutable contexts. kind = "x-tahoe-future-test-mutable: testing cap" else: return UnknownURI(u) # We fell through because a constraint was not met. # Prefer to report the most specific constraint. if not can_be_mutable: error = MustBeDeepImmutableError(kind + " used in an immutable context", name) else: error = MustBeReadonlyError(kind + " used in a read-only context", name) except BadURIError as e: error = e return UnknownURI(u, error=error) def is_uri(s): try: from_string(s, deep_immutable=False) return True except (TypeError, AssertionError): return False def is_literal_file_uri(s): if isinstance(s, str): s = s.encode("utf-8") if not isinstance(s, bytes): return False return (s.startswith(b'URI:LIT:') or s.startswith(ALLEGED_READONLY_PREFIX + b'URI:LIT:') or s.startswith(ALLEGED_IMMUTABLE_PREFIX + b'URI:LIT:')) def has_uri_prefix(s): if isinstance(s, str): s = s.encode("utf-8") if not isinstance(s, bytes): return False return (s.startswith(b"URI:") or s.startswith(ALLEGED_READONLY_PREFIX + b'URI:') or s.startswith(ALLEGED_IMMUTABLE_PREFIX + b'URI:')) # These take the same keyword arguments as from_string above. def from_string_dirnode(s, **kwargs): u = from_string(s, **kwargs) _assert(IDirnodeURI.providedBy(u)) return u registerAdapter(from_string_dirnode, bytes, IDirnodeURI) def from_string_filenode(s, **kwargs): u = from_string(s, **kwargs) _assert(IFileURI.providedBy(u)) return u registerAdapter(from_string_filenode, bytes, IFileURI) def from_string_mutable_filenode(s, **kwargs): u = from_string(s, **kwargs) _assert(IMutableFileURI.providedBy(u)) return u registerAdapter(from_string_mutable_filenode, bytes, IMutableFileURI) def from_string_verifier(s, **kwargs): u = from_string(s, **kwargs) _assert(IVerifierURI.providedBy(u)) return u registerAdapter(from_string_verifier, bytes, IVerifierURI) def pack_extension(data): pieces = [] for k in sorted(data.keys()): value = data[k] if isinstance(value, int): value = b"%d" % value if isinstance(k, str): k = k.encode("utf-8") assert isinstance(value, bytes), k assert re.match(br'^[a-zA-Z_\-]+$', k) pieces.append(k + b':' + hashutil.netstring(value)) uri_extension = b''.join(pieces) return uri_extension def unpack_extension(data): d = {} while data: colon = data.index(b':') key = data[:colon] data = data[colon+1:] colon = data.index(b':') number = data[:colon] length = int(number) data = data[colon+1:] value = data[:length] assert data[length:length+1] == b',' data = data[length+1:] d[str(key, "utf-8")] = value # convert certain things to numbers for intkey in ('size', 'segment_size', 'num_segments', 'needed_shares', 'total_shares'): if intkey in d: d[intkey] = int(d[intkey]) return d def unpack_extension_readable(data): unpacked = unpack_extension(data) unpacked["UEB_hash"] = hashutil.uri_extension_hash(data) for k in sorted(unpacked.keys()): if 'hash' in k: unpacked[k] = base32.b2a(unpacked[k]) return unpacked tahoe_lafs-1.20.0/src/allmydata/webish.py0000644000000000000000000003070413615410400015215 0ustar00""" General web server-related utilities. """ from __future__ import annotations from six import ensure_str from typing import IO, Callable, Optional import re, time, tempfile from urllib.parse import parse_qsl, urlencode from cgi import ( FieldStorage, ) from io import ( BytesIO, ) from twisted.application import service, strports, internet from twisted.web import static from twisted.web.http import ( parse_qs, ) from twisted.web.server import ( Request, Site, ) from twisted.internet import defer from twisted.internet.address import ( IPv4Address, IPv6Address, ) from allmydata.util import log, fileutil from allmydata.web import introweb, root from allmydata.web.operations import OphandleTable from .web.storage_plugins import ( StoragePlugins, ) class FileUploadFieldStorage(FieldStorage): """ Do terrible things to ensure files are still bytes. On Python 2, uploaded files were always bytes. On Python 3, there's a heuristic: if the filename is set on a field, it's assumed to be a file upload and therefore bytes. If no filename is set, it's Unicode. Unfortunately, we always want it to be bytes, and Tahoe-LAFS also enables setting the filename not via the MIME filename, but via a separate field called "name". Thus we need to do this ridiculous workaround. Mypy doesn't like it either, thus the ``# type: ignore`` below. Source for idea: https://mail.python.org/pipermail/python-dev/2017-February/147402.html """ @property # type: ignore def filename(self): if self.name == "file" and not self._mime_filename: # We use the file field to upload files, see directory.py's # _POST_upload. Lack of _mime_filename means we need to trick # FieldStorage into thinking there is a filename so it'll # return bytes. return "unknown-filename" return self._mime_filename @filename.setter def filename(self, value): self._mime_filename = value class TahoeLAFSRequest(Request, object): """ ``TahoeLAFSRequest`` adds several features to a Twisted Web ``Request`` that are useful for Tahoe-LAFS. :ivar NoneType|FieldStorage fields: For POST requests, a structured representation of the contents of the request body. For anything else, ``None``. """ fields = None def requestReceived(self, command, path, version): """ Called by channel when all data has been received. Override the base implementation to apply certain site-wide policies and to provide less memory-intensive multipart/form-post handling for large file uploads. """ self.content.seek(0) self.args = {} self.stack = [] self.method, self.uri = command, path self.clientproto = version x = self.uri.split(b'?', 1) if len(x) == 1: self.path = self.uri else: self.path, argstring = x self.args = parse_qs(argstring, 1) content_type = (self.requestHeaders.getRawHeaders("content-type") or [""])[0] if self.method == b'POST' and content_type.split(";")[0] in ("multipart/form-data", "application/x-www-form-urlencoded"): # We use FieldStorage here because it performs better than # cgi.parse_multipart(self.content, pdict) which is what # twisted.web.http.Request uses. headers = { ensure_str(name.lower()): ensure_str(value[-1]) for (name, value) in self.requestHeaders.getAllRawHeaders() } if 'content-length' not in headers: # Python 3's cgi module would really, really like us to set Content-Length. self.content.seek(0, 2) headers['content-length'] = str(self.content.tell()) self.content.seek(0) self.fields = FileUploadFieldStorage( self.content, headers, environ={'REQUEST_METHOD': 'POST'}) self.content.seek(0) self._tahoeLAFSSecurityPolicy() self.processing_started_timestamp = time.time() self.process() def _tahoeLAFSSecurityPolicy(self): """ Set response properties related to Tahoe-LAFS-imposed security policy. This will ensure that all HTTP requests received by the Tahoe-LAFS HTTP server have this policy imposed, regardless of other implementation details. """ # See https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Frame-Options self.responseHeaders.setRawHeaders("X-Frame-Options", ["DENY"]) # See https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Referrer-Policy self.setHeader("Referrer-Policy", "no-referrer") def _get_client_ip(request): try: get = request.getClientAddress except AttributeError: return request.getClientIP() else: client_addr = get() if isinstance(client_addr, (IPv4Address, IPv6Address)): return client_addr.host return None def _logFormatter(logDateTime, request): # we build up a log string that hides most of the cap, to preserve # user privacy. We retain the query args so we can identify things # like t=json. Then we send it to the flog. We make no attempt to # match apache formatting. TODO: when we move to DSA dirnodes and # shorter caps, consider exposing a few characters of the cap, or # maybe a few characters of its hash. x = request.uri.split(b"?", 1) if len(x) == 1: # no query args path = request.uri queryargs = b"" else: path, queryargs = x queryargs = b"?" + censor(queryargs) if path.startswith(b"/uri/"): path = b"/uri/[CENSORED]" elif path.startswith(b"/file/"): path = b"/file/[CENSORED]" elif path.startswith(b"/named/"): path = b"/named/[CENSORED]" uri = path + queryargs template = "web: %(clientip)s %(method)s %(uri)s %(code)s %(length)s" return template % dict( clientip=_get_client_ip(request), method=str(request.method, "utf-8"), uri=str(uri, "utf-8"), code=request.code, length=(request.sentLength or "-"), facility="tahoe.webish", level=log.OPERATIONAL, ) def censor(queryargs: bytes) -> bytes: """ Replace potentially sensitive values in query arguments with a constant string. """ args = parse_qsl(queryargs.decode("ascii"), keep_blank_values=True, encoding="utf8") result = [] for k, v in args: if k == "uri": # there is a form handler which redirects POST /uri?uri=FOO into # GET /uri/FOO so folks can paste in non-HTTP-prefixed uris. Make # sure we censor these. v = "[CENSORED]" elif k == "private-key": # Likewise, sometimes a private key is supplied with mutable # creation. v = "[CENSORED]" result.append((k, v)) # Customize safe to try to leave our markers intact. return urlencode(result, safe="[]").encode("ascii") def anonymous_tempfile_factory(tempdir: bytes) -> Callable[[], IO[bytes]]: """ Create a no-argument callable for creating a new temporary file in the given directory. :param tempdir: The directory in which temporary files with be created. :return: The callable. """ return lambda: tempfile.TemporaryFile(dir=tempdir) class TahoeLAFSSite(Site, object): """ The HTTP protocol factory used by Tahoe-LAFS. Among the behaviors provided: * A configurable temporary file factory for large request bodies to avoid keeping them in memory. * A log formatter that writes some access logs but omits capability strings to help keep them secret. """ requestFactory = TahoeLAFSRequest def __init__(self, make_tempfile: Callable[[], IO[bytes]], *args, **kwargs): Site.__init__(self, *args, logFormatter=_logFormatter, **kwargs) assert callable(make_tempfile) with make_tempfile(): pass self._make_tempfile = make_tempfile def getContentFile(self, length: Optional[int]) -> IO[bytes]: if length is None or length >= 1024 * 1024: return self._make_tempfile() return BytesIO() class WebishServer(service.MultiService): # The type in Twisted for services is wrong in 22.10... # https://github.com/twisted/twisted/issues/10135 name = "webish" # type: ignore[assignment] def __init__(self, client, webport, make_tempfile, nodeurl_path=None, staticdir=None, clock=None, now_fn=time.time): service.MultiService.__init__(self) # the 'data' argument to all render() methods default to the Client # the 'clock' argument to root.Root is, if set, a # twisted.internet.task.Clock that is provided by the unit tests # so that they can test features that involve the passage of # time in a deterministic manner. self.root = root.Root(client, clock, now_fn) self.buildServer(webport, make_tempfile, nodeurl_path, staticdir) # If set, clock is a twisted.internet.task.Clock that the tests # use to test ophandle expiration. self._operations = OphandleTable(clock) self._operations.setServiceParent(self) self.root.putChild(b"operations", self._operations) self.root.putChild(b"storage-plugins", StoragePlugins(client)) def buildServer(self, webport, make_tempfile, nodeurl_path, staticdir): self.webport = webport self.site = TahoeLAFSSite(make_tempfile, self.root) self.staticdir = staticdir # so tests can check if staticdir: self.root.putChild(b"static", static.File(staticdir)) if re.search(r'^\d', webport): webport = "tcp:"+webport # twisted warns about bare "0" or "3456" # strports must be native strings. webport = ensure_str(webport) s = strports.service(webport, self.site) s.setServiceParent(self) self._scheme = None self._portnum = None self._url = None self._listener = s # stash it so we can query for the portnum self._started = defer.Deferred() if nodeurl_path: def _write_nodeurl_file(ign): # this file will be created with default permissions line = self.getURL() + "\n" fileutil.write_atomically(nodeurl_path, line, mode="") self._started.addCallback(_write_nodeurl_file) def getURL(self): assert self._url return self._url def getPortnum(self): assert self._portnum return self._portnum def startService(self): def _got_port(lp): self._portnum = lp.getHost().port # what is our webport? assert self._scheme self._url = "%s://127.0.0.1:%d/" % (self._scheme, self._portnum) self._started.callback(None) return lp def _fail(f): self._started.errback(f) return f service.MultiService.startService(self) s = self._listener if hasattr(s, 'endpoint') and hasattr(s, '_waitingForPort'): # Twisted 10.2 gives us a StreamServerEndpointService. This is # ugly but should do for now. classname = s.endpoint.__class__.__name__ if classname.startswith('SSL'): self._scheme = 'https' else: self._scheme = 'http' s._waitingForPort.addCallbacks(_got_port, _fail) elif isinstance(s, internet.TCPServer): # Twisted <= 10.1 self._scheme = 'http' _got_port(s._port) elif isinstance(s, internet.SSLServer): # Twisted <= 10.1 self._scheme = 'https' _got_port(s._port) else: # who knows, probably some weirdo future version of Twisted self._started.errback(AssertionError("couldn't find out the scheme or port for the web-API server")) def get_operations(self): """ :return: a reference to our "active operations" tracker """ return self._operations class IntroducerWebishServer(WebishServer): def __init__(self, introducer, webport, nodeurl_path=None, staticdir=None): service.MultiService.__init__(self) self.root = introweb.IntroducerRoot(introducer) self.buildServer(webport, tempfile.TemporaryFile, nodeurl_path, staticdir) tahoe_lafs-1.20.0/src/allmydata/cli/__init__.py0000644000000000000000000000000013615410400016224 0ustar00tahoe_lafs-1.20.0/src/allmydata/cli/grid_manager.py0000644000000000000000000001434313615410400017123 0ustar00""" A CLI for configuring a grid manager. """ from typing import Optional from datetime import ( timedelta, ) import click from twisted.python.filepath import ( FilePath, ) from allmydata.crypto import ( ed25519, ) from allmydata.util.abbreviate import ( abbreviate_time, ) from allmydata.grid_manager import ( create_grid_manager, save_grid_manager, load_grid_manager, current_datetime_with_zone, ) from allmydata.util import jsonbytes as json @click.group() @click.option( '--config', '-c', type=click.Path(), help="Configuration directory (or - for stdin)", required=True, ) @click.pass_context def grid_manager(ctx, config): """ A Tahoe Grid Manager issues certificates to storage-servers A Tahoe client with one or more Grid Manager public keys configured will only upload to a Storage Server that presents a valid certificate signed by one of the configured Grid Manager keys. Grid Manager configuration can be in a local directory or given via stdin. It contains long-term secret information (a private signing key) and should be kept safe. """ class Config(object): """ Available to all sub-commands as Click's context.obj """ _grid_manager = None @property def grid_manager(self): if self._grid_manager is None: config_path = _config_path_from_option(config) try: self._grid_manager = load_grid_manager(config_path) except ValueError as e: raise click.ClickException( "Error loading Grid Manager from '{}': {}".format(config, e) ) return self._grid_manager ctx.obj = Config() @grid_manager.command() @click.pass_context def create(ctx): """ Make a new Grid Manager """ config_location = ctx.parent.params["config"] fp = None if config_location != '-': fp = FilePath(config_location) gm = create_grid_manager() try: save_grid_manager(fp, gm) except OSError as e: raise click.ClickException( "Can't create '{}': {}".format(config_location, e) ) @grid_manager.command() @click.pass_obj def public_identity(config): """ Show the public identity key of a Grid Manager This is what you give to clients to add to their configuration so they use announcements from this Grid Manager """ click.echo(config.grid_manager.public_identity()) @grid_manager.command() @click.argument("name") @click.argument("public_key", type=click.STRING) @click.pass_context def add(ctx, name, public_key): """ Add a new storage-server by name to a Grid Manager PUBLIC_KEY is the contents of a node.pubkey file from a Tahoe node-directory. NAME is an arbitrary label. """ public_key = public_key.encode("ascii") try: ctx.obj.grid_manager.add_storage_server( name, ed25519.verifying_key_from_string(public_key), ) except KeyError: raise click.ClickException( "A storage-server called '{}' already exists".format(name) ) save_grid_manager( _config_path_from_option(ctx.parent.params["config"]), ctx.obj.grid_manager, create=False, ) return 0 @grid_manager.command() @click.argument("name") @click.pass_context def remove(ctx, name): """ Remove an existing storage-server by name from a Grid Manager """ fp = _config_path_from_option(ctx.parent.params["config"]) try: ctx.obj.grid_manager.remove_storage_server(name) except KeyError: raise click.ClickException( "No storage-server called '{}' exists".format(name) ) cert_count = 0 if fp is not None: while fp.child('{}.cert.{}'.format(name, cert_count)).exists(): fp.child('{}.cert.{}'.format(name, cert_count)).remove() cert_count += 1 save_grid_manager(fp, ctx.obj.grid_manager, create=False) @grid_manager.command() # noqa: F811 @click.pass_context def list(ctx): """ List all storage-servers known to a Grid Manager """ for name in sorted(ctx.obj.grid_manager.storage_servers.keys()): blank_name = " " * len(name) click.echo("{}: {}".format( name, str(ctx.obj.grid_manager.storage_servers[name].public_key_string(), "utf-8"))) for cert in ctx.obj.grid_manager.storage_servers[name].certificates: delta = current_datetime_with_zone() - cert.expires click.echo("{} cert {}: ".format(blank_name, cert.index), nl=False) if delta.total_seconds() < 0: click.echo("valid until {} ({})".format(cert.expires, abbreviate_time(delta))) else: click.echo("expired {} ({})".format(cert.expires, abbreviate_time(delta))) @grid_manager.command() @click.argument("name") @click.argument( "expiry_days", type=click.IntRange(1, 5*365), # XXX is 5 years a good maximum? ) @click.pass_context def sign(ctx, name, expiry_days): """ sign a new certificate """ fp = _config_path_from_option(ctx.parent.params["config"]) expiry = timedelta(days=expiry_days) try: certificate = ctx.obj.grid_manager.sign(name, expiry) except KeyError: raise click.ClickException( "No storage-server called '{}' exists".format(name) ) certificate_data = json.dumps(certificate.marshal(), indent=4) click.echo(certificate_data) if fp is not None: next_serial = 0 f = None while f is None: fname = "{}.cert.{}".format(name, next_serial) try: f = fp.child(fname).create() except FileExistsError: f = None except OSError as e: raise click.ClickException(f"{fname}: {e}") next_serial += 1 with f: f.write(certificate_data.encode("ascii")) def _config_path_from_option(config: str) -> Optional[FilePath]: """ :param str config: a path or - :returns: a FilePath instance or None """ if config == "-": return None return FilePath(config) if __name__ == '__main__': grid_manager() # type: ignore tahoe_lafs-1.20.0/src/allmydata/crypto/__init__.py0000644000000000000000000000052213615410400017006 0ustar00""" Helper functions for cryptography-related operations inside Tahoe For the most part, these functions use and return objects that are documented in the `cryptography` library -- however, code inside Tahoe should only use these functions and not rely on features of any objects that `cryptography` documents. Ported to Python 3. """ tahoe_lafs-1.20.0/src/allmydata/crypto/aes.py0000644000000000000000000001070613615410400016024 0ustar00""" Helper functions for cryptograhpy-related operations inside Tahoe using AES These functions use and return objects that are documented in the `cryptography` library -- however, code inside Tahoe should only use functions from allmydata.crypto.aes and not rely on features of any objects that `cryptography` documents. Ported to Python 3. """ from dataclasses import dataclass from typing import Optional from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives.ciphers import ( Cipher, algorithms, modes, CipherContext, ) DEFAULT_IV = b'\x00' * 16 @dataclass class Encryptor: """ An object which can encrypt data. Create one using :func:`create_encryptor` and use it with :func:`encrypt_data` """ encrypt_context: CipherContext @dataclass class Decryptor: """ An object which can decrypt data. Create one using :func:`create_decryptor` and use it with :func:`decrypt_data` """ decrypt_context: CipherContext def create_encryptor(key: bytes, iv: Optional[bytes]=None) -> Encryptor: """ Create and return a new object which can do AES encryptions with the given key and initialization vector (IV). The default IV is 16 zero-bytes. :param bytes key: the key bytes, should be 128 or 256 bits (16 or 32 bytes) :param bytes iv: the Initialization Vector consisting of 16 bytes, or None for the default (which is 16 zero bytes) :returns: an object suitable for use with :func:`encrypt_data` (an :class:`Encryptor`) """ cryptor = _create_cryptor(key, iv) return Encryptor(cryptor) def encrypt_data(encryptor: Encryptor, plaintext: bytes) -> bytes: """ AES-encrypt `plaintext` with the given `encryptor`. :param encryptor: an instance of :class:`Encryptor` previously returned from `create_encryptor` :param bytes plaintext: the data to encrypt :returns: bytes of ciphertext """ if not isinstance(plaintext, (bytes, memoryview)): raise ValueError(f'Plaintext must be bytes or memoryview: {type(plaintext)}') return encryptor.encrypt_context.update(plaintext) def create_decryptor(key: bytes, iv: Optional[bytes]=None) -> Decryptor: """ Create and return a new object which can do AES decryptions with the given key and initialization vector (IV). The default IV is 16 zero-bytes. :param bytes key: the key bytes, should be 128 or 256 bits (16 or 32 bytes) :param bytes iv: the Initialization Vector consisting of 16 bytes, or None for the default (which is 16 zero bytes) :returns: an object suitable for use with :func:`decrypt_data` (an :class:`Decryptor` instance) """ cryptor = _create_cryptor(key, iv) return Decryptor(cryptor) def decrypt_data(decryptor: Decryptor, plaintext: bytes) -> bytes: """ AES-decrypt `plaintext` with the given `decryptor`. :param decryptor: an instance of :class:`Decryptor` previously returned from `create_decryptor` :param bytes plaintext: the data to decrypt :returns: bytes of ciphertext """ if not isinstance(plaintext, (bytes, memoryview)): raise ValueError(f'Plaintext must be bytes or memoryview: {type(plaintext)}') return decryptor.decrypt_context.update(plaintext) def _create_cryptor(key: bytes, iv: Optional[bytes]) -> CipherContext: """ Internal helper. See :func:`create_encryptor` or :func:`create_decryptor`. """ key = _validate_key(key) iv = _validate_iv(iv) cipher = Cipher( algorithms.AES(key), modes.CTR(iv), backend=default_backend() ) return cipher.encryptor() # type: ignore[return-type] def _validate_key(key: bytes) -> bytes: """ confirm `key` is suitable for AES encryption, or raise ValueError """ if not isinstance(key, bytes): raise TypeError('Key must be bytes') if len(key) not in (16, 32): raise ValueError('Key must be 16 or 32 bytes long') return key def _validate_iv(iv: Optional[bytes]) -> bytes: """ Returns a suitable initialiation vector. If `iv` is `None`, a default is returned. If `iv` is not a suitable initialization vector an error is raised. `iv` is returned if it valid. """ if iv is None: return DEFAULT_IV if not isinstance(iv, bytes): raise TypeError('IV must be bytes') if len(iv) != 16: raise ValueError('IV must be 16 bytes long') return iv tahoe_lafs-1.20.0/src/allmydata/crypto/ed25519.py0000644000000000000000000001173313615410400016253 0ustar00''' Ed25519 keys and helpers. Key Formatting -------------- - in base32, keys are 52 chars long (both signing and verifying keys) - in base62, keys is 43 chars long - in base64, keys is 43 chars long We can't use base64 because we want to reserve punctuation and preserve cut-and-pasteability. The base62 encoding is shorter than the base32 form, but the minor usability improvement is not worth the documentation and specification confusion of using a non-standard encoding. So we stick with base32. ''' from cryptography.exceptions import ( InvalidSignature, ) from cryptography.hazmat.primitives.asymmetric.ed25519 import ( Ed25519PrivateKey, Ed25519PublicKey, ) from cryptography.hazmat.primitives.serialization import ( Encoding, PrivateFormat, NoEncryption, PublicFormat, ) from allmydata.crypto.util import remove_prefix from allmydata.crypto.error import BadSignature from allmydata.util.base32 import ( a2b, b2a, ) PRIVATE_KEY_PREFIX = b'priv-v0-' PUBLIC_KEY_PREFIX = b'pub-v0-' def create_signing_keypair(): """ Creates a new ed25519 keypair. :returns: 2-tuple of (private_key, public_key) """ private_key = Ed25519PrivateKey.generate() return private_key, private_key.public_key() def verifying_key_from_signing_key(private_key): """ :returns: the public key associated to the given `private_key` """ _validate_private_key(private_key) return private_key.public_key() def sign_data(private_key, data: bytes) -> bytes: """ Sign the given data using the given private key :param private_key: the private part returned from `create_signing_keypair` or from `signing_keypair_from_string` :param bytes data: the data to sign :returns: bytes representing the signature """ _validate_private_key(private_key) if not isinstance(data, bytes): raise ValueError('data must be bytes') return private_key.sign(data) def string_from_signing_key(private_key): """ Encode a private key to a string of bytes :param private_key: the private part returned from `create_signing_keypair` or from `signing_keypair_from_string` :returns: byte-string representing this key """ _validate_private_key(private_key) raw_key_bytes = private_key.private_bytes( Encoding.Raw, PrivateFormat.Raw, NoEncryption(), ) return PRIVATE_KEY_PREFIX + b2a(raw_key_bytes) def signing_keypair_from_string(private_key_bytes: bytes): """ Load a signing keypair from a string of bytes (which includes the PRIVATE_KEY_PREFIX) :returns: a 2-tuple of (private_key, public_key) """ if not isinstance(private_key_bytes, bytes): raise ValueError('private_key_bytes must be bytes') private_key = Ed25519PrivateKey.from_private_bytes( a2b(remove_prefix(private_key_bytes, PRIVATE_KEY_PREFIX)) ) return private_key, private_key.public_key() def verify_signature(public_key, alleged_signature: bytes, data: bytes): """ :param public_key: a verifying key :param bytes alleged_signature: the bytes of the alleged signature :param bytes data: the data which was allegedly signed :raises: BadSignature if the signature is bad :returns: None (or raises an exception). """ if not isinstance(alleged_signature, bytes): raise ValueError('alleged_signature must be bytes') if not isinstance(data, bytes): raise ValueError('data must be bytes') _validate_public_key(public_key) try: public_key.verify(alleged_signature, data) except InvalidSignature: raise BadSignature() def verifying_key_from_string(public_key_bytes): """ Load a verifying key from a string of bytes (which includes the PUBLIC_KEY_PREFIX) :returns: a public_key """ if not isinstance(public_key_bytes, bytes): raise ValueError('public_key_bytes must be bytes') return Ed25519PublicKey.from_public_bytes( a2b(remove_prefix(public_key_bytes, PUBLIC_KEY_PREFIX)) ) def string_from_verifying_key(public_key) -> bytes: """ Encode a public key to a string of bytes :param public_key: the public part of a keypair :returns: byte-string representing this key """ _validate_public_key(public_key) raw_key_bytes = public_key.public_bytes( Encoding.Raw, PublicFormat.Raw, ) return PUBLIC_KEY_PREFIX + b2a(raw_key_bytes) def _validate_public_key(public_key: Ed25519PublicKey): """ Internal helper. Verify that `public_key` is an appropriate object """ if not isinstance(public_key, Ed25519PublicKey): raise ValueError('public_key must be an Ed25519PublicKey') return None def _validate_private_key(private_key: Ed25519PrivateKey): """ Internal helper. Verify that `private_key` is an appropriate object """ if not isinstance(private_key, Ed25519PrivateKey): raise ValueError('private_key must be an Ed25519PrivateKey') return None tahoe_lafs-1.20.0/src/allmydata/crypto/error.py0000644000000000000000000000040713615410400016402 0ustar00""" Exceptions raise by allmydata.crypto.* modules Ported to Python 3. """ class BadSignature(Exception): """ An alleged signature did not match """ class BadPrefixError(Exception): """ A key did not start with the required prefix """ tahoe_lafs-1.20.0/src/allmydata/crypto/rsa.py0000644000000000000000000001576413615410400016052 0ustar00""" Helper functions for cryptography-related operations inside Tahoe using RSA public-key encryption and decryption. In cases where these functions happen to use and return objects that are documented in the `cryptography` library, code outside this module should only use functions from allmydata.crypto.rsa and not rely on features of any objects that `cryptography` documents. That is, the public and private keys are opaque objects; DO NOT depend on any of their methods. """ from __future__ import annotations from typing_extensions import TypeAlias from typing import Callable from functools import partial from cryptography.exceptions import InvalidSignature from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives import hashes from cryptography.hazmat.primitives.asymmetric import rsa, padding from cryptography.hazmat.primitives.serialization import load_der_private_key, load_der_public_key, \ Encoding, PrivateFormat, PublicFormat, NoEncryption from allmydata.crypto.error import BadSignature PublicKey: TypeAlias = rsa.RSAPublicKey PrivateKey: TypeAlias = rsa.RSAPrivateKey # This is the value that was used by `pycryptopp`, and we must continue to use it for # both backwards compatibility and interoperability. # # The docs for `cryptography` suggest to use the constant defined at # `cryptography.hazmat.primitives.asymmetric.padding.PSS.MAX_LENGTH`, but this causes old # signatures to fail to validate. RSA_PSS_SALT_LENGTH = 32 RSA_PADDING = padding.PSS( mgf=padding.MGF1(hashes.SHA256()), salt_length=RSA_PSS_SALT_LENGTH, ) def create_signing_keypair(key_size: int) -> tuple[PrivateKey, PublicKey]: """ Create a new RSA signing (private) keypair from scratch. Can be used with `sign_data` function. :param key_size: length of key in bits :returns: 2-tuple of (private_key, public_key) """ priv_key = rsa.generate_private_key( public_exponent=65537, key_size=key_size, backend=default_backend() ) return priv_key, priv_key.public_key() def create_signing_keypair_from_string(private_key_der: bytes) -> tuple[PrivateKey, PublicKey]: """ Create an RSA signing (private) key from previously serialized private key bytes. :param private_key_der: blob as returned from `der_string_from_signing_keypair` :returns: 2-tuple of (private_key, public_key) """ _load = partial( load_der_private_key, private_key_der, password=None, backend=default_backend(), ) def load_with_validation() -> PrivateKey: k = _load() assert isinstance(k, PrivateKey) return k def load_without_validation() -> PrivateKey: k = _load(unsafe_skip_rsa_key_validation=True) assert isinstance(k, PrivateKey) return k # Load it once without the potentially expensive OpenSSL validation # checks. These have superlinear complexity. We *will* run them just # below - but first we'll apply our own constant-time checks. load: Callable[[], PrivateKey] = load_without_validation try: unsafe_priv_key = load() except TypeError: # cryptography<39 does not support this parameter, so just load the # key with validation... unsafe_priv_key = load_with_validation() # But avoid *reloading* it since that will run the expensive # validation *again*. load = lambda: unsafe_priv_key if not isinstance(unsafe_priv_key, rsa.RSAPrivateKey): raise ValueError( "Private Key did not decode to an RSA key" ) if unsafe_priv_key.key_size != 2048: raise ValueError( "Private Key must be 2048 bits" ) # Now re-load it with OpenSSL's validation applied. safe_priv_key = load() return safe_priv_key, safe_priv_key.public_key() def der_string_from_signing_key(private_key: PrivateKey) -> bytes: """ Serializes a given RSA private key to a DER string :param private_key: a private key object as returned from `create_signing_keypair` or `create_signing_keypair_from_string` :returns: bytes representing `private_key` """ _validate_private_key(private_key) return private_key.private_bytes( # type: ignore[attr-defined] encoding=Encoding.DER, format=PrivateFormat.PKCS8, encryption_algorithm=NoEncryption(), ) def der_string_from_verifying_key(public_key: PublicKey) -> bytes: """ Serializes a given RSA public key to a DER string. :param public_key: a public key object as returned from `create_signing_keypair` or `create_signing_keypair_from_string` :returns: bytes representing `public_key` """ _validate_public_key(public_key) return public_key.public_bytes( encoding=Encoding.DER, format=PublicFormat.SubjectPublicKeyInfo, ) def create_verifying_key_from_string(public_key_der: bytes) -> PublicKey: """ Create an RSA verifying key from a previously serialized public key :param bytes public_key_der: a blob as returned by `der_string_from_verifying_key` :returns: a public key object suitable for use with other functions in this module """ pub_key = load_der_public_key( public_key_der, backend=default_backend(), ) assert isinstance(pub_key, PublicKey) return pub_key def sign_data(private_key: PrivateKey, data: bytes) -> bytes: """ :param private_key: the private part of a keypair returned from `create_signing_keypair_from_string` or `create_signing_keypair` :param data: the bytes to sign :returns: bytes which are a signature of the bytes given as `data`. """ _validate_private_key(private_key) return private_key.sign( data, RSA_PADDING, hashes.SHA256(), ) def verify_signature(public_key: PublicKey, alleged_signature: bytes, data: bytes) -> None: """ :param public_key: a verifying key, returned from `create_verifying_key_from_string` or `create_verifying_key_from_private_key` :param bytes alleged_signature: the bytes of the alleged signature :param bytes data: the data which was allegedly signed """ _validate_public_key(public_key) try: public_key.verify( alleged_signature, data, RSA_PADDING, hashes.SHA256(), ) except InvalidSignature: raise BadSignature() def _validate_public_key(public_key: PublicKey) -> None: """ Internal helper. Checks that `public_key` is a valid cryptography object """ if not isinstance(public_key, rsa.RSAPublicKey): raise ValueError( f"public_key must be an RSAPublicKey not {type(public_key)}" ) def _validate_private_key(private_key: PrivateKey) -> None: """ Internal helper. Checks that `public_key` is a valid cryptography object """ if not isinstance(private_key, rsa.RSAPrivateKey): raise ValueError( f"private_key must be an RSAPrivateKey not {type(private_key)}" ) tahoe_lafs-1.20.0/src/allmydata/crypto/util.py0000644000000000000000000000130013615410400016217 0ustar00""" Utilities used by allmydata.crypto modules Ported to Python 3. """ from allmydata.crypto.error import BadPrefixError def remove_prefix(s_bytes, prefix): """ :param bytes s_bytes: a string of bytes whose prefix is removed :param bytes prefix: the bytes to remove from the beginning of `s_bytes` Removes `prefix` from `s_bytes` and returns the new bytes or raises `BadPrefixError` if `s_bytes` did not start with the `prefix` specified. :returns: `s_bytes` with `prefix` removed from the front. """ if s_bytes.startswith(prefix): return s_bytes[len(prefix):] raise BadPrefixError( "did not see expected '{!r}' prefix".format(prefix) ) tahoe_lafs-1.20.0/src/allmydata/frontends/__init__.py0000644000000000000000000000000013615410400017457 0ustar00tahoe_lafs-1.20.0/src/allmydata/frontends/auth.py0000644000000000000000000000711013615410400016672 0ustar00""" Authentication for frontends. """ from zope.interface import implementer from twisted.internet import defer from twisted.cred import checkers, credentials from twisted.conch.ssh import keys from twisted.conch.checkers import SSHPublicKeyChecker, InMemorySSHKeyDB from allmydata.util.dictutil import BytesKeyDict from allmydata.util.fileutil import abspath_expanduser_unicode class NeedRootcapLookupScheme(Exception): """Accountname+Password-based access schemes require some kind of mechanism to translate name+passwd pairs into a rootcap, either a file of name/passwd/rootcap tuples, or a server to do the translation.""" class FTPAvatarID(object): def __init__(self, username, rootcap): self.username = username self.rootcap = rootcap @implementer(checkers.ICredentialsChecker) class AccountFileChecker(object): credentialInterfaces = (credentials.ISSHPrivateKey,) def __init__(self, client, accountfile): self.client = client path = abspath_expanduser_unicode(accountfile) with open_account_file(path) as f: self.rootcaps, pubkeys = load_account_file(f) self._pubkeychecker = SSHPublicKeyChecker(InMemorySSHKeyDB(pubkeys)) def _avatarId(self, username): return FTPAvatarID(username, self.rootcaps[username]) def requestAvatarId(self, creds): if credentials.ISSHPrivateKey.providedBy(creds): d = defer.maybeDeferred(self._pubkeychecker.requestAvatarId, creds) d.addCallback(self._avatarId) return d raise NotImplementedError() def open_account_file(path): """ Open and return the accounts file at the given path. """ return open(path, "rt", encoding="utf-8") def load_account_file(lines): """ Load credentials from an account file. :param lines: An iterable of account lines to load. :return: See ``create_account_maps``. """ return create_account_maps( parse_accounts( content_lines( lines, ), ), ) def content_lines(lines): """ Drop empty and commented-out lines (``#``-prefixed) from an iterator of lines. :param lines: An iterator of lines to process. :return: An iterator of lines including only those from ``lines`` that include content intended to be loaded. """ for line in lines: line = line.strip() if line and not line.startswith("#"): yield line def parse_accounts(lines): """ Parse account lines into their components (name, key, rootcap). """ for line in lines: name, passwd, rest = line.split(None, 2) if not passwd.startswith("ssh-"): raise ValueError( "Password-based authentication is not supported; " "configure key-based authentication instead." ) bits = rest.split() keystring = " ".join([passwd] + bits[:-1]) key = keys.Key.fromString(keystring) rootcap = bits[-1] yield (name, key, rootcap) def create_account_maps(accounts): """ Build mappings from account names to keys and rootcaps. :param accounts: An iterator if (name, key, rootcap) tuples. :return: A tuple of two dicts. The first maps account names to rootcaps. The second maps account names to public keys. """ rootcaps = BytesKeyDict() pubkeys = BytesKeyDict() for (name, key, rootcap) in accounts: name_bytes = name.encode("utf-8") rootcaps[name_bytes] = rootcap.encode("utf-8") pubkeys[name_bytes] = [key] return rootcaps, pubkeys tahoe_lafs-1.20.0/src/allmydata/frontends/sftpd.py0000644000000000000000000026435413615410400017070 0ustar00""" Ported to Python 3. """ import six import heapq, traceback, stat, struct from stat import S_IFREG, S_IFDIR from time import time, strftime, localtime from zope.interface import implementer from twisted.python import components from twisted.application import service, strports from twisted.conch.ssh import factory, keys, session from twisted.conch.ssh.filetransfer import FileTransferServer, SFTPError, \ FX_NO_SUCH_FILE, FX_OP_UNSUPPORTED, FX_PERMISSION_DENIED, FX_EOF, \ FX_BAD_MESSAGE, FX_FAILURE, FX_OK from twisted.conch.ssh.filetransfer import FXF_READ, FXF_WRITE, FXF_APPEND, \ FXF_CREAT, FXF_TRUNC, FXF_EXCL from twisted.conch.interfaces import ISFTPServer, ISFTPFile, IConchUser, ISession from twisted.conch.avatar import ConchUser from twisted.conch.openssh_compat import primes from twisted.cred import portal from twisted.internet.error import ProcessDone, ProcessTerminated from twisted.python.failure import Failure from twisted.internet.interfaces import ITransport from twisted.internet import defer from twisted.internet.interfaces import IConsumer from foolscap.api import eventually from allmydata.util import deferredutil from allmydata.util.assertutil import _assert, precondition from allmydata.util.consumer import download_to_data from allmydata.util.encodingutil import get_filesystem_encoding from allmydata.interfaces import IFileNode, IDirectoryNode, ExistingChildError, \ NoSuchChildError, ChildOfWrongTypeError from allmydata.mutable.common import NotWriteableError from allmydata.mutable.publish import MutableFileHandle from allmydata.immutable.upload import FileHandle from allmydata.dirnode import update_metadata from allmydata.util.fileutil import EncryptedTemporaryFile noisy = True from allmydata.util.log import NOISY, OPERATIONAL, WEIRD, \ msg as logmsg, PrefixingLogMixin def createSFTPError(errorCode, errorMessage): """ SFTPError that can accept both Unicode and bytes. Twisted expects _native_ strings for the SFTPError message, but we often do Unicode by default even on Python 2. """ return SFTPError(errorCode, six.ensure_str(errorMessage)) def eventually_callback(d): return lambda res: eventually(d.callback, res) def eventually_errback(d): return lambda err: eventually(d.errback, err) def _utf8(x): if isinstance(x, str): return x.encode('utf-8') if isinstance(x, bytes): return x return repr(x) def _to_sftp_time(t): """SFTP times are unsigned 32-bit integers representing UTC seconds (ignoring leap seconds) since the Unix epoch, January 1 1970 00:00 UTC. A Tahoe time is the corresponding float.""" return int(t) & int(0xFFFFFFFF) def _convert_error(res, request): """If res is not a Failure, return it, otherwise reraise the appropriate SFTPError.""" if not isinstance(res, Failure): logged_res = res if isinstance(res, (bytes, str)): logged_res = "" % (len(res),) logmsg("SUCCESS %r %r" % (request, logged_res,), level=OPERATIONAL) return res err = res logmsg("RAISE %r %r" % (request, err.value), level=OPERATIONAL) try: if noisy: logmsg(traceback.format_exc(err.value), level=NOISY) except Exception: # pragma: no cover pass # The message argument to SFTPError must not reveal information that # might compromise anonymity, if we are running over an anonymous network. if err.check(SFTPError): # original raiser of SFTPError has responsibility to ensure anonymity raise err if err.check(NoSuchChildError): childname = _utf8(err.value.args[0]) raise createSFTPError(FX_NO_SUCH_FILE, childname) if err.check(NotWriteableError) or err.check(ChildOfWrongTypeError): msg = _utf8(err.value.args[0]) raise createSFTPError(FX_PERMISSION_DENIED, msg) if err.check(ExistingChildError): # Versions of SFTP after v3 (which is what twisted.conch implements) # define a specific error code for this case: FX_FILE_ALREADY_EXISTS. # However v3 doesn't; instead, other servers such as sshd return # FX_FAILURE. The gvfs SFTP backend, for example, depends on this # to translate the error to the equivalent of POSIX EEXIST, which is # necessary for some picky programs (such as gedit). msg = _utf8(err.value.args[0]) raise createSFTPError(FX_FAILURE, msg) if err.check(NotImplementedError): raise createSFTPError(FX_OP_UNSUPPORTED, _utf8(err.value)) if err.check(EOFError): raise createSFTPError(FX_EOF, "end of file reached") if err.check(defer.FirstError): _convert_error(err.value.subFailure, request) # We assume that the error message is not anonymity-sensitive. raise createSFTPError(FX_FAILURE, _utf8(err.value)) def _repr_flags(flags): return "|".join([f for f in [(flags & FXF_READ) and "FXF_READ" or None, (flags & FXF_WRITE) and "FXF_WRITE" or None, (flags & FXF_APPEND) and "FXF_APPEND" or None, (flags & FXF_CREAT) and "FXF_CREAT" or None, (flags & FXF_TRUNC) and "FXF_TRUNC" or None, (flags & FXF_EXCL) and "FXF_EXCL" or None, ] if f]) def _lsLine(name, attrs): st_uid = "tahoe" st_gid = "tahoe" st_mtime = attrs.get("mtime", 0) st_mode = attrs["permissions"] # Some clients won't tolerate '?' in the size field (#1337). st_size = attrs.get("size", 0) # We don't know how many links there really are to this object. st_nlink = 1 # Based on . # We previously could not call the version in Twisted because we needed the change # (released in Twisted v8.2). # Since we now depend on Twisted v10.1, consider calling Twisted's version. mode = st_mode perms = ["-"] * 10 ft = stat.S_IFMT(mode) if stat.S_ISDIR(ft): perms[0] = 'd' elif stat.S_ISREG(ft): perms[0] = '-' else: perms[0] = '?' # user if mode&stat.S_IRUSR: perms[1] = 'r' if mode&stat.S_IWUSR: perms[2] = 'w' if mode&stat.S_IXUSR: perms[3] = 'x' # group if mode&stat.S_IRGRP: perms[4] = 'r' if mode&stat.S_IWGRP: perms[5] = 'w' if mode&stat.S_IXGRP: perms[6] = 'x' # other if mode&stat.S_IROTH: perms[7] = 'r' if mode&stat.S_IWOTH: perms[8] = 'w' if mode&stat.S_IXOTH: perms[9] = 'x' # suid/sgid never set l = "".join(perms) l += str(st_nlink).rjust(5) + ' ' un = str(st_uid) l += un.ljust(9) gr = str(st_gid) l += gr.ljust(9) sz = str(st_size) l += sz.rjust(8) l += ' ' day = 60 * 60 * 24 sixmo = day * 7 * 26 now = time() if st_mtime + sixmo < now or st_mtime > now + day: # mtime is more than 6 months ago, or more than one day in the future l += strftime("%b %d %Y ", localtime(st_mtime)) else: l += strftime("%b %d %H:%M ", localtime(st_mtime)) l = l.encode("utf-8") l += name return l def _no_write(parent_readonly, child, metadata=None): """Whether child should be listed as having read-only permissions in parent.""" if child.is_unknown(): return True elif child.is_mutable(): return child.is_readonly() elif parent_readonly or IDirectoryNode.providedBy(child): return True else: return metadata is not None and metadata.get('no-write', False) def _populate_attrs(childnode, metadata, size=None): attrs = {} # The permissions must have the S_IFDIR (040000) or S_IFREG (0100000) # bits, otherwise the client may refuse to open a directory. # Also, sshfs run as a non-root user requires files and directories # to be world-readable/writeable. # It is important that we never set the executable bits on files. # # Directories and unknown nodes have no size, and SFTP doesn't # require us to make one up. # # childnode might be None, meaning that the file doesn't exist yet, # but we're going to write it later. if childnode and childnode.is_unknown(): perms = 0 elif childnode and IDirectoryNode.providedBy(childnode): perms = S_IFDIR | 0o777 else: # For files, omit the size if we don't immediately know it. if childnode and size is None: size = childnode.get_size() if size is not None: _assert(isinstance(size, int) and not isinstance(size, bool), size=size) attrs['size'] = size perms = S_IFREG | 0o666 if metadata: if metadata.get('no-write', False): perms &= S_IFDIR | S_IFREG | 0o555 # clear 'w' bits # See webapi.txt for what these times mean. # We would prefer to omit atime, but SFTP version 3 can only # accept mtime if atime is also set. if 'linkmotime' in metadata.get('tahoe', {}): attrs['ctime'] = attrs['mtime'] = attrs['atime'] = _to_sftp_time(metadata['tahoe']['linkmotime']) elif 'mtime' in metadata: attrs['ctime'] = attrs['mtime'] = attrs['atime'] = _to_sftp_time(metadata['mtime']) if 'linkcrtime' in metadata.get('tahoe', {}): attrs['createtime'] = _to_sftp_time(metadata['tahoe']['linkcrtime']) attrs['permissions'] = perms # twisted.conch.ssh.filetransfer only implements SFTP version 3, # which doesn't include SSH_FILEXFER_ATTR_FLAGS. return attrs def _attrs_to_metadata(attrs): metadata = {} for key in attrs: if key == "mtime" or key == "ctime" or key == "createtime": metadata[key] = int(attrs[key]) elif key.startswith("ext_"): metadata[key] = str(attrs[key]) perms = attrs.get('permissions', stat.S_IWUSR) if not (perms & stat.S_IWUSR): metadata['no-write'] = True return metadata def _direntry_for(filenode_or_parent, childname, filenode=None): precondition(isinstance(childname, (str, type(None))), childname=childname) if childname is None: filenode_or_parent = filenode if filenode_or_parent: rw_uri = filenode_or_parent.get_write_uri() if rw_uri and childname: return rw_uri + b"/" + childname.encode('utf-8') else: return rw_uri return None @implementer(IConsumer) class OverwriteableFileConsumer(PrefixingLogMixin): """I act both as a consumer for the download of the original file contents, and as a wrapper for a temporary file that records the downloaded data and any overwrites. I use a priority queue to keep track of which regions of the file have been overwritten but not yet downloaded, so that the download does not clobber overwritten data. I use another priority queue to record milestones at which to make callbacks indicating that a given number of bytes have been downloaded. The temporary file reflects the contents of the file that I represent, except that: - regions that have neither been downloaded nor overwritten, if present, contain garbage. - the temporary file may be shorter than the represented file (it is never longer). The latter's current size is stored in self.current_size. This abstraction is mostly independent of SFTP. Consider moving it, if it is found useful for other frontends.""" def __init__(self, download_size, tempfile_maker): PrefixingLogMixin.__init__(self, facility="tahoe.sftp") if noisy: self.log(".__init__(%r, %r)" % (download_size, tempfile_maker), level=NOISY) self.download_size = download_size self.current_size = download_size self.f = tempfile_maker() self.downloaded = 0 self.milestones = [] # empty heap of (offset, d) self.overwrites = [] # empty heap of (start, end) self.is_closed = False self.done = defer.Deferred() self.done_status = None # None -> not complete, Failure -> download failed, str -> download succeeded self.producer = None def get_file(self): return self.f def get_current_size(self): return self.current_size def set_current_size(self, size): if noisy: self.log(".set_current_size(%r), current_size = %r, downloaded = %r" % (size, self.current_size, self.downloaded), level=NOISY) if size < self.current_size or size < self.downloaded: self.f.truncate(size) if size > self.current_size: self.overwrite(self.current_size, b"\x00" * (size - self.current_size)) self.current_size = size # make the invariant self.download_size <= self.current_size be true again if size < self.download_size: self.download_size = size if self.downloaded >= self.download_size: self.download_done(b"size changed") def registerProducer(self, p, streaming): if noisy: self.log(".registerProducer(%r, streaming=%r)" % (p, streaming), level=NOISY) if self.producer is not None: raise RuntimeError("producer is already registered") self.producer = p if streaming: # call resumeProducing once to start things off p.resumeProducing() else: def _iterate(): if self.done_status is None: p.resumeProducing() eventually(_iterate) _iterate() def write(self, data): if noisy: self.log(".write()" % (len(data),), level=NOISY) if self.is_closed: return if self.downloaded >= self.download_size: return next_downloaded = self.downloaded + len(data) if next_downloaded > self.download_size: data = data[:(self.download_size - self.downloaded)] while len(self.overwrites) > 0: (start, end) = self.overwrites[0] if start >= next_downloaded: # This and all remaining overwrites are after the data we just downloaded. break if start > self.downloaded: # The data we just downloaded has been partially overwritten. # Write the prefix of it that precedes the overwritten region. self.f.seek(self.downloaded) self.f.write(data[:(start - self.downloaded)]) # This merges consecutive overwrites if possible, which allows us to detect the # case where the download can be stopped early because the remaining region # to download has already been fully overwritten. heapq.heappop(self.overwrites) while len(self.overwrites) > 0: (start1, end1) = self.overwrites[0] if start1 > end: break end = end1 heapq.heappop(self.overwrites) if end >= next_downloaded: # This overwrite extends past the downloaded data, so there is no # more data to consider on this call. heapq.heappush(self.overwrites, (next_downloaded, end)) self._update_downloaded(next_downloaded) return elif end >= self.downloaded: data = data[(end - self.downloaded):] self._update_downloaded(end) self.f.seek(self.downloaded) self.f.write(data) self._update_downloaded(next_downloaded) def _update_downloaded(self, new_downloaded): self.downloaded = new_downloaded milestone = new_downloaded if len(self.overwrites) > 0: (start, end) = self.overwrites[0] if start <= new_downloaded and end > milestone: milestone = end while len(self.milestones) > 0: (next_, d) = self.milestones[0] if next_ > milestone: return if noisy: self.log("MILESTONE %r %r" % (next_, d), level=NOISY) heapq.heappop(self.milestones) eventually_callback(d)(b"reached") if milestone >= self.download_size: self.download_done(b"reached download size") def overwrite(self, offset, data): if noisy: self.log(".overwrite(%r, )" % (offset, len(data)), level=NOISY) if self.is_closed: self.log("overwrite called on a closed OverwriteableFileConsumer", level=WEIRD) raise createSFTPError(FX_BAD_MESSAGE, "cannot write to a closed file handle") if offset > self.current_size: # Normally writing at an offset beyond the current end-of-file # would leave a hole that appears filled with zeroes. However, an # EncryptedTemporaryFile doesn't behave like that (if there is a # hole in the file on disk, the zeroes that are read back will be # XORed with the keystream). So we must explicitly write zeroes in # the gap between the current EOF and the offset. self.f.seek(self.current_size) self.f.write(b"\x00" * (offset - self.current_size)) start = self.current_size else: self.f.seek(offset) start = offset self.f.write(data) end = offset + len(data) self.current_size = max(self.current_size, end) if end > self.downloaded: heapq.heappush(self.overwrites, (start, end)) def read(self, offset, length): """When the data has been read, callback the Deferred that we return with this data. Otherwise errback the Deferred that we return. The caller must perform no more overwrites until the Deferred has fired.""" if noisy: self.log(".read(%r, %r), current_size = %r" % (offset, length, self.current_size), level=NOISY) if self.is_closed: self.log("read called on a closed OverwriteableFileConsumer", level=WEIRD) raise createSFTPError(FX_BAD_MESSAGE, "cannot read from a closed file handle") # Note that the overwrite method is synchronous. When a write request is processed # (e.g. a writeChunk request on the async queue of GeneralSFTPFile), overwrite will # be called and will update self.current_size if necessary before returning. Therefore, # self.current_size will be up-to-date for a subsequent call to this read method, and # so it is correct to do the check for a read past the end-of-file here. if offset >= self.current_size: def _eof(): raise EOFError("read past end of file") return defer.execute(_eof) if offset + length > self.current_size: length = self.current_size - offset if noisy: self.log("truncating read to %r bytes" % (length,), level=NOISY) needed = min(offset + length, self.download_size) # If we fail to reach the needed number of bytes, the read request will fail. d = self.when_reached_or_failed(needed) def _reached_in_read(res): # It is not necessarily the case that self.downloaded >= needed, because # the file might have been truncated (thus truncating the download) and # then extended. _assert(self.current_size >= offset + length, current_size=self.current_size, offset=offset, length=length) if noisy: self.log("_reached_in_read(%r), self.f = %r" % (res, self.f,), level=NOISY) self.f.seek(offset) return self.f.read(length) d.addCallback(_reached_in_read) return d def when_reached_or_failed(self, index): if noisy: self.log(".when_reached_or_failed(%r)" % (index,), level=NOISY) def _reached(res): if noisy: self.log("reached %r with result %r" % (index, res), level=NOISY) return res if self.done_status is not None: return defer.execute(_reached, self.done_status) if index <= self.downloaded: # already reached successfully if noisy: self.log("already reached %r successfully" % (index,), level=NOISY) return defer.succeed("already reached successfully") d = defer.Deferred() d.addCallback(_reached) heapq.heappush(self.milestones, (index, d)) return d def when_done(self): d = defer.Deferred() self.done.addCallback(lambda ign: eventually_callback(d)(self.done_status)) return d def download_done(self, res): _assert(isinstance(res, (bytes, Failure)), res=res) # Only the first call to download_done counts, but we log subsequent calls # (multiple calls are normal). if self.done_status is not None: self.log("IGNORING extra call to download_done with result %r; previous result was %r" % (res, self.done_status), level=OPERATIONAL) return self.log("DONE with result %r" % (res,), level=OPERATIONAL) # We avoid errbacking self.done so that we are not left with an 'Unhandled error in Deferred' # in case when_done() is never called. Instead we stash the failure in self.done_status, # from where the callback added in when_done() can retrieve it. self.done_status = res eventually_callback(self.done)(None) while len(self.milestones) > 0: (next_, d) = self.milestones[0] if noisy: self.log("MILESTONE FINISH %r %r %r" % (next_, d, res), level=NOISY) heapq.heappop(self.milestones) # The callback means that the milestone has been reached if # it is ever going to be. Note that the file may have been # truncated to before the milestone. eventually_callback(d)(res) def close(self): if not self.is_closed: self.is_closed = True try: self.f.close() except Exception as e: self.log("suppressed %r from close of temporary file %r" % (e, self.f), level=WEIRD) self.download_done(b"closed") return self.done_status def unregisterProducer(self): # This will happen just before our client calls download_done, which will tell # us the outcome of the download; we don't know the outcome at this point. self.producer = None self.log("producer unregistered", level=NOISY) SIZE_THRESHOLD = 1000 @implementer(ISFTPFile) class ShortReadOnlySFTPFile(PrefixingLogMixin): """I represent a file handle to a particular file on an SFTP connection. I am used only for short immutable files opened in read-only mode. When I am created, the file contents start to be downloaded to memory. self.async_ is used to delay read requests until the download has finished.""" def __init__(self, userpath, filenode, metadata): PrefixingLogMixin.__init__(self, facility="tahoe.sftp", prefix=userpath) if noisy: self.log(".__init__(%r, %r, %r)" % (userpath, filenode, metadata), level=NOISY) precondition(isinstance(userpath, bytes) and IFileNode.providedBy(filenode), userpath=userpath, filenode=filenode) self.filenode = filenode self.metadata = metadata self.async_ = download_to_data(filenode) self.closed = False def readChunk(self, offset, length): request = ".readChunk(%r, %r)" % (offset, length) self.log(request, level=OPERATIONAL) if self.closed: def _closed(): raise createSFTPError(FX_BAD_MESSAGE, "cannot read from a closed file handle") return defer.execute(_closed) d = defer.Deferred() def _read(data): if noisy: self.log("_read() in readChunk(%r, %r)" % (len(data), offset, length), level=NOISY) # "In response to this request, the server will read as many bytes as it # can from the file (up to 'len'), and return them in a SSH_FXP_DATA # message. If an error occurs or EOF is encountered before reading any # data, the server will respond with SSH_FXP_STATUS. For normal disk # files, it is guaranteed that this will read the specified number of # bytes, or up to end of file." # # i.e. we respond with an EOF error iff offset is already at EOF. if offset >= len(data): eventually_errback(d)(Failure(createSFTPError(FX_EOF, "read at or past end of file"))) else: eventually_callback(d)(data[offset:offset+length]) # truncated if offset+length > len(data) return data self.async_.addCallbacks(_read, eventually_errback(d)) d.addBoth(_convert_error, request) return d def writeChunk(self, offset, data): self.log(".writeChunk(%r, ) denied" % (offset, len(data)), level=OPERATIONAL) def _denied(): raise createSFTPError(FX_PERMISSION_DENIED, "file handle was not opened for writing") return defer.execute(_denied) def close(self): self.log(".close()", level=OPERATIONAL) self.closed = True return defer.succeed(None) def getAttrs(self): request = ".getAttrs()" self.log(request, level=OPERATIONAL) if self.closed: def _closed(): raise createSFTPError(FX_BAD_MESSAGE, "cannot get attributes for a closed file handle") return defer.execute(_closed) d = defer.execute(_populate_attrs, self.filenode, self.metadata) d.addBoth(_convert_error, request) return d def setAttrs(self, attrs): self.log(".setAttrs(%r) denied" % (attrs,), level=OPERATIONAL) def _denied(): raise createSFTPError(FX_PERMISSION_DENIED, "file handle was not opened for writing") return defer.execute(_denied) @implementer(ISFTPFile) class GeneralSFTPFile(PrefixingLogMixin): """I represent a file handle to a particular file on an SFTP connection. I wrap an instance of OverwriteableFileConsumer, which is responsible for storing the file contents. In order to allow write requests to be satisfied immediately, there is effectively a FIFO queue between requests made to this file handle, and requests to my OverwriteableFileConsumer. This queue is implemented by the callback chain of self.async_. When first constructed, I am in an 'unopened' state that causes most operations to be delayed until 'open' is called.""" def __init__(self, userpath, flags, close_notify, convergence): PrefixingLogMixin.__init__(self, facility="tahoe.sftp", prefix=userpath) if noisy: self.log(".__init__(%r, %r = %r, %r, )" % (userpath, flags, _repr_flags(flags), close_notify), level=NOISY) precondition(isinstance(userpath, bytes), userpath=userpath) self.userpath = userpath self.flags = flags self.close_notify = close_notify self.convergence = convergence self.async_ = defer.Deferred() # Creating or truncating the file is a change, but if FXF_EXCL is set, a zero-length file has already been created. self.has_changed = (flags & (FXF_CREAT | FXF_TRUNC)) and not (flags & FXF_EXCL) self.closed = False self.abandoned = False self.parent = None self.childname = None self.filenode = None self.metadata = None # self.consumer should only be relied on in callbacks for self.async_, since it might # not be set before then. self.consumer = None def open(self, parent=None, childname=None, filenode=None, metadata=None): # noqa: F811 self.log(".open(parent=%r, childname=%r, filenode=%r, metadata=%r)" % (parent, childname, filenode, metadata), level=OPERATIONAL) precondition(isinstance(childname, (str, type(None))), childname=childname) precondition(filenode is None or IFileNode.providedBy(filenode), filenode=filenode) precondition(not self.closed, sftpfile=self) # If the file has been renamed, the new (parent, childname) takes precedence. if self.parent is None: self.parent = parent if self.childname is None: self.childname = childname self.filenode = filenode self.metadata = metadata tempfile_maker = EncryptedTemporaryFile if (self.flags & FXF_TRUNC) or not filenode: # We're either truncating or creating the file, so we don't need the old contents. self.consumer = OverwriteableFileConsumer(0, tempfile_maker) self.consumer.download_done(b"download not needed") else: self.async_.addCallback(lambda ignored: filenode.get_best_readable_version()) def _read(version): if noisy: self.log("_read", level=NOISY) download_size = version.get_size() _assert(download_size is not None) self.consumer = OverwriteableFileConsumer(download_size, tempfile_maker) d = version.read(self.consumer, 0, None) def _finished(res): if not isinstance(res, Failure): res = b"download finished" self.consumer.download_done(res) d.addBoth(_finished) # It is correct to drop d here. self.async_.addCallback(_read) eventually_callback(self.async_)(None) if noisy: self.log("open done", level=NOISY) return self def get_userpath(self): return self.userpath def get_direntry(self): return _direntry_for(self.parent, self.childname) def rename(self, new_userpath, new_parent, new_childname): self.log(".rename(%r, %r, %r)" % (new_userpath, new_parent, new_childname), level=OPERATIONAL) precondition(isinstance(new_userpath, bytes) and isinstance(new_childname, str), new_userpath=new_userpath, new_childname=new_childname) self.userpath = new_userpath self.parent = new_parent self.childname = new_childname def abandon(self): self.log(".abandon()", level=OPERATIONAL) self.abandoned = True def sync(self, ign=None): # The ign argument allows some_file.sync to be used as a callback. self.log(".sync()", level=OPERATIONAL) d = defer.Deferred() self.async_.addBoth(eventually_callback(d)) def _done(res): if noisy: self.log("_done(%r) in .sync()" % (res,), level=NOISY) return res d.addBoth(_done) return d def readChunk(self, offset, length): request = ".readChunk(%r, %r)" % (offset, length) self.log(request, level=OPERATIONAL) if not (self.flags & FXF_READ): def _denied(): raise createSFTPError(FX_PERMISSION_DENIED, "file handle was not opened for reading") return defer.execute(_denied) if self.closed: def _closed(): raise createSFTPError(FX_BAD_MESSAGE, "cannot read from a closed file handle") return defer.execute(_closed) d = defer.Deferred() def _read(ign): if noisy: self.log("_read in readChunk(%r, %r)" % (offset, length), level=NOISY) d2 = self.consumer.read(offset, length) d2.addBoth(eventually_callback(d)) # It is correct to drop d2 here. return None self.async_.addCallbacks(_read, eventually_errback(d)) d.addBoth(_convert_error, request) return d def writeChunk(self, offset, data): self.log(".writeChunk(%r, )" % (offset, len(data)), level=OPERATIONAL) if not (self.flags & FXF_WRITE): def _denied(): raise createSFTPError(FX_PERMISSION_DENIED, "file handle was not opened for writing") return defer.execute(_denied) if self.closed: def _closed(): raise createSFTPError(FX_BAD_MESSAGE, "cannot write to a closed file handle") return defer.execute(_closed) self.has_changed = True # Note that we return without waiting for the write to occur. Reads and # close wait for prior writes, and will fail if any prior operation failed. # This is ok because SFTP makes no guarantee that the write completes # before the request does. In fact it explicitly allows write errors to be # delayed until close: # "One should note that on some server platforms even a close can fail. # This can happen e.g. if the server operating system caches writes, # and an error occurs while flushing cached writes during the close." def _write(ign): if noisy: self.log("_write in .writeChunk(%r, ), current_size = %r" % (offset, len(data), self.consumer.get_current_size()), level=NOISY) # FXF_APPEND means that we should always write at the current end of file. write_offset = offset if self.flags & FXF_APPEND: write_offset = self.consumer.get_current_size() self.consumer.overwrite(write_offset, data) if noisy: self.log("overwrite done", level=NOISY) return None self.async_.addCallback(_write) # don't addErrback to self.async_, just allow subsequent async ops to fail. return defer.succeed(None) def _do_close(self, res, d=None): if noisy: self.log("_do_close(%r)" % (res,), level=NOISY) status = None if self.consumer: status = self.consumer.close() # We must close_notify before re-firing self.async_. if self.close_notify: self.close_notify(self.userpath, self.parent, self.childname, self) if not isinstance(res, Failure) and isinstance(status, Failure): res = status if d: eventually_callback(d)(res) elif isinstance(res, Failure): self.log("suppressing %r" % (res,), level=OPERATIONAL) def close(self): request = ".close()" self.log(request, level=OPERATIONAL) if self.closed: return defer.succeed(None) # This means that close has been called, not that the close has succeeded. self.closed = True if not (self.flags & (FXF_WRITE | FXF_CREAT)): # We never fail a close of a handle opened only for reading, even if the file # failed to download. (We could not do so deterministically, because it would # depend on whether we reached the point of failure before abandoning the # download.) Any reads that depended on file content that could not be downloaded # will have failed. It is important that we don't close the consumer until # previous read operations have completed. self.async_.addBoth(self._do_close) return defer.succeed(None) # We must capture the abandoned, parent, and childname variables synchronously # at the close call. This is needed by the correctness arguments in the comments # for _abandon_any_heisenfiles and _rename_heisenfiles. # Note that the file must have been opened before it can be closed. abandoned = self.abandoned parent = self.parent childname = self.childname # has_changed is set when writeChunk is called, not when the write occurs, so # it is correct to optimize out the commit if it is False at the close call. has_changed = self.has_changed def _commit(ign): d2 = self.consumer.when_done() if self.filenode and self.filenode.is_mutable(): self.log("update mutable file %r childname=%r metadata=%r" % (self.filenode, childname, self.metadata), level=OPERATIONAL) if self.metadata.get('no-write', False) and not self.filenode.is_readonly(): _assert(parent and childname, parent=parent, childname=childname, metadata=self.metadata) d2.addCallback(lambda ign: parent.set_metadata_for(childname, self.metadata)) d2.addCallback(lambda ign: self.filenode.overwrite(MutableFileHandle(self.consumer.get_file()))) else: def _add_file(ign): self.log("_add_file childname=%r" % (childname,), level=OPERATIONAL) u = FileHandle(self.consumer.get_file(), self.convergence) return parent.add_file(childname, u, metadata=self.metadata) d2.addCallback(_add_file) return d2 # If the file has been abandoned, we don't want the close operation to get "stuck", # even if self.async_ fails to re-fire. Completing the close independently of self.async_ # in that case should ensure that dropping an ssh connection is sufficient to abandon # any heisenfiles that were not explicitly closed in that connection. if abandoned or not has_changed: d = defer.succeed(None) self.async_.addBoth(self._do_close) else: d = defer.Deferred() self.async_.addCallback(_commit) self.async_.addBoth(self._do_close, d) d.addBoth(_convert_error, request) return d def getAttrs(self): request = ".getAttrs()" self.log(request, level=OPERATIONAL) if self.closed: def _closed(): raise createSFTPError(FX_BAD_MESSAGE, "cannot get attributes for a closed file handle") return defer.execute(_closed) # Optimization for read-only handles, when we already know the metadata. if not (self.flags & (FXF_WRITE | FXF_CREAT)) and self.metadata and self.filenode and not self.filenode.is_mutable(): return defer.succeed(_populate_attrs(self.filenode, self.metadata)) d = defer.Deferred() def _get(ign): if noisy: self.log("_get(%r) in %r, filenode = %r, metadata = %r" % (ign, request, self.filenode, self.metadata), level=NOISY) # self.filenode might be None, but that's ok. attrs = _populate_attrs(self.filenode, self.metadata, size=self.consumer.get_current_size()) eventually_callback(d)(attrs) return None self.async_.addCallbacks(_get, eventually_errback(d)) d.addBoth(_convert_error, request) return d def setAttrs(self, attrs, only_if_at=None): request = ".setAttrs(%r, only_if_at=%r)" % (attrs, only_if_at) self.log(request, level=OPERATIONAL) if not (self.flags & FXF_WRITE): def _denied(): raise createSFTPError(FX_PERMISSION_DENIED, "file handle was not opened for writing") return defer.execute(_denied) if self.closed: def _closed(): raise createSFTPError(FX_BAD_MESSAGE, "cannot set attributes for a closed file handle") return defer.execute(_closed) size = attrs.get("size", None) if size is not None and (not isinstance(size, int) or size < 0): def _bad(): raise createSFTPError(FX_BAD_MESSAGE, "new size is not a valid nonnegative integer") return defer.execute(_bad) d = defer.Deferred() def _set(ign): if noisy: self.log("_set(%r) in %r" % (ign, request), level=NOISY) current_direntry = _direntry_for(self.parent, self.childname, self.filenode) if only_if_at and only_if_at != current_direntry: if noisy: self.log("not setting attributes: current_direntry=%r in %r" % (current_direntry, request), level=NOISY) return None now = time() self.metadata = update_metadata(self.metadata, _attrs_to_metadata(attrs), now) if size is not None: # TODO: should we refuse to truncate a file opened with FXF_APPEND? # self.consumer.set_current_size(size) eventually_callback(d)(None) return None self.async_.addCallbacks(_set, eventually_errback(d)) d.addBoth(_convert_error, request) return d class StoppableList(object): def __init__(self, items): self.items = items def __iter__(self): for i in self.items: yield i def close(self): pass class Reason(object): def __init__(self, value): self.value = value # A "heisenfile" is a file that has been opened with write flags # (FXF_WRITE and/or FXF_CREAT) and not yet close-notified. # 'all_heisenfiles' maps from a direntry string to a list of # GeneralSFTPFile. # # A direntry string is parent_write_uri + "/" + childname_utf8 for # an immutable file, or file_write_uri for a mutable file. # Updates to this dict are single-threaded. all_heisenfiles = {} def _reload(): global all_heisenfiles all_heisenfiles = {} @implementer(ISFTPServer) class SFTPUserHandler(ConchUser, PrefixingLogMixin): def __init__(self, client, rootnode, username): ConchUser.__init__(self) PrefixingLogMixin.__init__(self, facility="tahoe.sftp", prefix=username) if noisy: self.log(".__init__(%r, %r, %r)" % (client, rootnode, username), level=NOISY) self.channelLookup[b"session"] = session.SSHSession self.subsystemLookup[b"sftp"] = FileTransferServer self._client = client self._root = rootnode self._username = username self._convergence = client.convergence # maps from UTF-8 paths for this user, to files written and still open self._heisenfiles = {} def gotVersion(self, otherVersion, extData): self.log(".gotVersion(%r, %r)" % (otherVersion, extData), level=OPERATIONAL) # advertise the same extensions as the OpenSSH SFTP server # return {'posix-rename@openssh.com': '1', 'statvfs@openssh.com': '2', 'fstatvfs@openssh.com': '2', } def logout(self): self.log(".logout()", level=OPERATIONAL) for files in self._heisenfiles.values(): for f in files: f.abandon() def _add_heisenfile_by_path(self, file): self.log("._add_heisenfile_by_path(%r)" % (file,), level=OPERATIONAL) userpath = file.get_userpath() if userpath in self._heisenfiles: self._heisenfiles[userpath] += [file] else: self._heisenfiles[userpath] = [file] def _add_heisenfile_by_direntry(self, file): self.log("._add_heisenfile_by_direntry(%r)" % (file,), level=OPERATIONAL) direntry = file.get_direntry() if direntry: if direntry in all_heisenfiles: all_heisenfiles[direntry] += [file] else: all_heisenfiles[direntry] = [file] def _abandon_any_heisenfiles(self, userpath, direntry): request = "._abandon_any_heisenfiles(%r, %r)" % (userpath, direntry) self.log(request, level=OPERATIONAL) precondition(isinstance(userpath, bytes), userpath=userpath) # First we synchronously mark all heisenfiles matching the userpath or direntry # as abandoned, and remove them from the two heisenfile dicts. Then we .sync() # each file that we abandoned. # # For each file, the call to .abandon() occurs: # * before the file is closed, in which case it will never be committed # (uploaded+linked or published); or # * after it is closed but before it has been close_notified, in which case the # .sync() ensures that it has been committed (successfully or not) before we # return. # # This avoids a race that might otherwise cause the file to be committed after # the remove operation has completed. # # We return a Deferred that fires with True if any files were abandoned (this # does not mean that they were not committed; it is used to determine whether # a NoSuchChildError from the attempt to delete the file should be suppressed). files = [] if direntry in all_heisenfiles: files = all_heisenfiles[direntry] del all_heisenfiles[direntry] if userpath in self._heisenfiles: files += self._heisenfiles[userpath] del self._heisenfiles[userpath] if noisy: self.log("files = %r in %r" % (files, request), level=NOISY) for f in files: f.abandon() d = defer.succeed(None) for f in files: d.addBoth(f.sync) def _done(ign): self.log("done %r" % (request,), level=OPERATIONAL) return len(files) > 0 d.addBoth(_done) return d def _rename_heisenfiles(self, from_userpath, from_parent, from_childname, to_userpath, to_parent, to_childname, overwrite=True): request = ("._rename_heisenfiles(%r, %r, %r, %r, %r, %r, overwrite=%r)" % (from_userpath, from_parent, from_childname, to_userpath, to_parent, to_childname, overwrite)) self.log(request, level=OPERATIONAL) precondition((isinstance(from_userpath, bytes) and isinstance(from_childname, str) and isinstance(to_userpath, bytes) and isinstance(to_childname, str)), from_userpath=from_userpath, from_childname=from_childname, to_userpath=to_userpath, to_childname=to_childname) if noisy: self.log("all_heisenfiles = %r\nself._heisenfiles = %r" % (all_heisenfiles, self._heisenfiles), level=NOISY) # First we synchronously rename all heisenfiles matching the userpath or direntry. # Then we .sync() each file that we renamed. # # For each file, the call to .rename occurs: # * before the file is closed, in which case it will be committed at the # new direntry; or # * after it is closed but before it has been close_notified, in which case the # .sync() ensures that it has been committed (successfully or not) before we # return. # # This avoids a race that might otherwise cause the file to be committed at the # old name after the rename operation has completed. # # Note that if overwrite is False, the caller should already have checked # whether a real direntry exists at the destination. It is possible that another # direntry (heisen or real) comes to exist at the destination after that check, # but in that case it is correct for the rename to succeed (and for the commit # of the heisenfile at the destination to possibly clobber the other entry, since # that can happen anyway when we have concurrent write handles to the same direntry). # # We return a Deferred that fires with True if any files were renamed (this # does not mean that they were not committed; it is used to determine whether # a NoSuchChildError from the rename attempt should be suppressed). If overwrite # is False and there were already heisenfiles at the destination userpath or # direntry, we return a Deferred that fails with createSFTPError(FX_PERMISSION_DENIED). from_direntry = _direntry_for(from_parent, from_childname) to_direntry = _direntry_for(to_parent, to_childname) if noisy: self.log("from_direntry = %r, to_direntry = %r, len(all_heisenfiles) = %r, len(self._heisenfiles) = %r in %r" % (from_direntry, to_direntry, len(all_heisenfiles), len(self._heisenfiles), request), level=NOISY) if not overwrite and (to_userpath in self._heisenfiles or to_direntry in all_heisenfiles): def _existing(): raise createSFTPError(FX_PERMISSION_DENIED, "cannot rename to existing path " + str(to_userpath, "utf-8")) if noisy: self.log("existing", level=NOISY) return defer.execute(_existing) from_files = [] if from_direntry in all_heisenfiles: from_files = all_heisenfiles[from_direntry] del all_heisenfiles[from_direntry] if from_userpath in self._heisenfiles: from_files += self._heisenfiles[from_userpath] del self._heisenfiles[from_userpath] if noisy: self.log("from_files = %r in %r" % (from_files, request), level=NOISY) for f in from_files: f.rename(to_userpath, to_parent, to_childname) self._add_heisenfile_by_path(f) self._add_heisenfile_by_direntry(f) d = defer.succeed(None) for f in from_files: d.addBoth(f.sync) def _done(ign): if noisy: self.log("done: len(all_heisenfiles) = %r, len(self._heisenfiles) = %r in %r" % (len(all_heisenfiles), len(self._heisenfiles), request), level=NOISY) return len(from_files) > 0 d.addBoth(_done) return d def _update_attrs_for_heisenfiles(self, userpath, direntry, attrs): request = "._update_attrs_for_heisenfiles(%r, %r, %r)" % (userpath, direntry, attrs) self.log(request, level=OPERATIONAL) _assert(isinstance(userpath, bytes) and isinstance(direntry, bytes), userpath=userpath, direntry=direntry) files = [] if direntry in all_heisenfiles: files = all_heisenfiles[direntry] if userpath in self._heisenfiles: files += self._heisenfiles[userpath] if noisy: self.log("files = %r in %r" % (files, request), level=NOISY) # We set the metadata for all heisenfiles at this path or direntry. # Since a direntry includes a write URI, we must have authority to # change the metadata of heisenfiles found in the all_heisenfiles dict. # However that's not necessarily the case for heisenfiles found by # path. Therefore we tell the setAttrs method of each file to only # perform the update if the file is at the correct direntry. d = defer.succeed(None) for f in files: d.addBoth(f.setAttrs, attrs, only_if_at=direntry) def _done(ign): self.log("done %r" % (request,), level=OPERATIONAL) # TODO: this should not return True if only_if_at caused all files to be skipped. return len(files) > 0 d.addBoth(_done) return d def _sync_heisenfiles(self, userpath, direntry, ignore=None): request = "._sync_heisenfiles(%r, %r, ignore=%r)" % (userpath, direntry, ignore) self.log(request, level=OPERATIONAL) _assert(isinstance(userpath, bytes) and isinstance(direntry, (bytes, type(None))), userpath=userpath, direntry=direntry) files = [] if direntry in all_heisenfiles: files = all_heisenfiles[direntry] if userpath in self._heisenfiles: files += self._heisenfiles[userpath] if noisy: self.log("files = %r in %r" % (files, request), level=NOISY) d = defer.succeed(None) for f in files: if f is not ignore: d.addBoth(f.sync) def _done(ign): self.log("done %r" % (request,), level=OPERATIONAL) return None d.addBoth(_done) return d def _remove_heisenfile(self, userpath, parent, childname, file_to_remove): if noisy: self.log("._remove_heisenfile(%r, %r, %r, %r)" % (userpath, parent, childname, file_to_remove), level=NOISY) _assert(isinstance(userpath, bytes) and isinstance(childname, (str, type(None))), userpath=userpath, childname=childname) direntry = _direntry_for(parent, childname) if direntry in all_heisenfiles: all_old_files = all_heisenfiles[direntry] all_new_files = [f for f in all_old_files if f is not file_to_remove] if len(all_new_files) > 0: all_heisenfiles[direntry] = all_new_files else: del all_heisenfiles[direntry] if userpath in self._heisenfiles: old_files = self._heisenfiles[userpath] new_files = [f for f in old_files if f is not file_to_remove] if len(new_files) > 0: self._heisenfiles[userpath] = new_files else: del self._heisenfiles[userpath] if noisy: self.log("all_heisenfiles = %r\nself._heisenfiles = %r" % (all_heisenfiles, self._heisenfiles), level=NOISY) def _make_file(self, existing_file, userpath, flags, parent=None, childname=None, filenode=None, metadata=None): if noisy: self.log("._make_file(%r, %r, %r = %r, parent=%r, childname=%r, filenode=%r, metadata=%r)" % (existing_file, userpath, flags, _repr_flags(flags), parent, childname, filenode, metadata), level=NOISY) _assert((isinstance(userpath, bytes) and isinstance(childname, (str, type(None))) and (metadata is None or 'no-write' in metadata)), userpath=userpath, childname=childname, metadata=metadata) writing = (flags & (FXF_WRITE | FXF_CREAT)) != 0 direntry = _direntry_for(parent, childname, filenode) d = self._sync_heisenfiles(userpath, direntry, ignore=existing_file) if not writing and (flags & FXF_READ) and filenode and not filenode.is_mutable() and filenode.get_size() <= SIZE_THRESHOLD: d.addCallback(lambda ign: ShortReadOnlySFTPFile(userpath, filenode, metadata)) else: close_notify = None if writing: close_notify = self._remove_heisenfile d.addCallback(lambda ign: existing_file or GeneralSFTPFile(userpath, flags, close_notify, self._convergence)) def _got_file(file): file.open(parent=parent, childname=childname, filenode=filenode, metadata=metadata) if writing: self._add_heisenfile_by_direntry(file) return file d.addCallback(_got_file) return d def openFile(self, pathstring, flags, attrs, delay=None): request = ".openFile(%r, %r = %r, %r, delay=%r)" % (pathstring, flags, _repr_flags(flags), attrs, delay) self.log(request, level=OPERATIONAL) # This is used for both reading and writing. # First exclude invalid combinations of flags, and empty paths. if not (flags & (FXF_READ | FXF_WRITE)): def _bad_readwrite(): raise createSFTPError(FX_BAD_MESSAGE, "invalid file open flags: at least one of FXF_READ and FXF_WRITE must be set") return defer.execute(_bad_readwrite) if (flags & FXF_EXCL) and not (flags & FXF_CREAT): def _bad_exclcreat(): raise createSFTPError(FX_BAD_MESSAGE, "invalid file open flags: FXF_EXCL cannot be set without FXF_CREAT") return defer.execute(_bad_exclcreat) path = self._path_from_string(pathstring) if not path: def _emptypath(): raise createSFTPError(FX_NO_SUCH_FILE, "path cannot be empty") return defer.execute(_emptypath) # The combination of flags is potentially valid. # To work around clients that have race condition bugs, a getAttr, rename, or # remove request following an 'open' request with FXF_WRITE or FXF_CREAT flags, # should succeed even if the 'open' request has not yet completed. So we now # synchronously add a file object into the self._heisenfiles dict, indexed # by its UTF-8 userpath. (We can't yet add it to the all_heisenfiles dict, # because we don't yet have a user-independent path for the file.) The file # object does not know its filenode, parent, or childname at this point. userpath = self._path_to_utf8(path) if flags & (FXF_WRITE | FXF_CREAT): file = GeneralSFTPFile(userpath, flags, self._remove_heisenfile, self._convergence) self._add_heisenfile_by_path(file) else: # We haven't decided which file implementation to use yet. file = None desired_metadata = _attrs_to_metadata(attrs) # Now there are two major cases: # # 1. The path is specified as /uri/FILECAP, with no parent directory. # If the FILECAP is mutable and writeable, then we can open it in write-only # or read/write mode (non-exclusively), otherwise we can only open it in # read-only mode. The open should succeed immediately as long as FILECAP is # a valid known filecap that grants the required permission. # # 2. The path is specified relative to a parent. We find the parent dirnode and # get the child's URI and metadata if it exists. There are four subcases: # a. the child does not exist: FXF_CREAT must be set, and we must be able # to write to the parent directory. # b. the child exists but is not a valid known filecap: fail # c. the child is mutable: if we are trying to open it write-only or # read/write, then we must be able to write to the file. # d. the child is immutable: if we are trying to open it write-only or # read/write, then we must be able to write to the parent directory. # # To reduce latency, open normally succeeds as soon as these conditions are # met, even though there might be a failure in downloading the existing file # or uploading a new one. However, there is an exception: if a file has been # written, then closed, and is now being reopened, then we have to delay the # open until the previous upload/publish has completed. This is necessary # because sshfs does not wait for the result of an FXF_CLOSE message before # reporting to the client that a file has been closed. It applies both to # mutable files, and to directory entries linked to an immutable file. # # Note that the permission checks below are for more precise error reporting on # the open call; later operations would fail even if we did not make these checks. d = delay or defer.succeed(None) d.addCallback(lambda ign: self._get_root(path)) def _got_root(root_and_path): (root, path) = root_and_path if root.is_unknown(): raise createSFTPError(FX_PERMISSION_DENIED, "cannot open an unknown cap (or child of an unknown object). " "Upgrading the gateway to a later Tahoe-LAFS version may help") if not path: # case 1 if noisy: self.log("case 1: root = %r, path[:-1] = %r" % (root, path[:-1]), level=NOISY) if not IFileNode.providedBy(root): raise createSFTPError(FX_PERMISSION_DENIED, "cannot open a directory cap") if (flags & FXF_WRITE) and root.is_readonly(): raise createSFTPError(FX_PERMISSION_DENIED, "cannot write to a non-writeable filecap without a parent directory") if flags & FXF_EXCL: raise createSFTPError(FX_FAILURE, "cannot create a file exclusively when it already exists") # The file does not need to be added to all_heisenfiles, because it is not # associated with a directory entry that needs to be updated. metadata = update_metadata(None, desired_metadata, time()) # We have to decide what to pass for the 'parent_readonly' argument to _no_write, # given that we don't actually have a parent. This only affects the permissions # reported by a getAttrs on this file handle in the case of an immutable file. # We choose 'parent_readonly=True' since that will cause the permissions to be # reported as r--r--r--, which is appropriate because an immutable file can't be # written via this path. metadata['no-write'] = _no_write(True, root) return self._make_file(file, userpath, flags, filenode=root, metadata=metadata) else: # case 2 childname = path[-1] if noisy: self.log("case 2: root = %r, childname = %r, desired_metadata = %r, path[:-1] = %r" % (root, childname, desired_metadata, path[:-1]), level=NOISY) d2 = root.get_child_at_path(path[:-1]) def _got_parent(parent): if noisy: self.log("_got_parent(%r)" % (parent,), level=NOISY) if parent.is_unknown(): raise createSFTPError(FX_PERMISSION_DENIED, "cannot open a child of an unknown object. " "Upgrading the gateway to a later Tahoe-LAFS version may help") parent_readonly = parent.is_readonly() d3 = defer.succeed(None) if flags & FXF_EXCL: # FXF_EXCL means that the link to the file (not the file itself) must # be created atomically wrt updates by this storage client. # That is, we need to create the link before returning success to the # SFTP open request (and not just on close, as would normally be the # case). We make the link initially point to a zero-length LIT file, # which is consistent with what might happen on a POSIX filesystem. if parent_readonly: raise createSFTPError(FX_FAILURE, "cannot create a file exclusively when the parent directory is read-only") # 'overwrite=False' ensures failure if the link already exists. # FIXME: should use a single call to set_uri and return (child, metadata) (#1035) zero_length_lit = b"URI:LIT:" if noisy: self.log("%r.set_uri(%r, None, readcap=%r, overwrite=False)" % (parent, zero_length_lit, childname), level=NOISY) d3.addCallback(lambda ign: parent.set_uri(childname, None, readcap=zero_length_lit, metadata=desired_metadata, overwrite=False)) def _seturi_done(child): if noisy: self.log("%r.get_metadata_for(%r)" % (parent, childname), level=NOISY) d4 = parent.get_metadata_for(childname) d4.addCallback(lambda metadata: (child, metadata)) return d4 d3.addCallback(_seturi_done) else: if noisy: self.log("%r.get_child_and_metadata(%r)" % (parent, childname), level=NOISY) d3.addCallback(lambda ign: parent.get_child_and_metadata(childname)) def _got_child(filenode_and_current_metadata): (filenode, current_metadata) = filenode_and_current_metadata if noisy: self.log("_got_child( (%r, %r) )" % (filenode, current_metadata), level=NOISY) metadata = update_metadata(current_metadata, desired_metadata, time()) # Ignore the permissions of the desired_metadata in an open call. The permissions # can only be set by setAttrs. metadata['no-write'] = _no_write(parent_readonly, filenode, current_metadata) if filenode.is_unknown(): raise createSFTPError(FX_PERMISSION_DENIED, "cannot open an unknown cap. Upgrading the gateway " "to a later Tahoe-LAFS version may help") if not IFileNode.providedBy(filenode): raise createSFTPError(FX_PERMISSION_DENIED, "cannot open a directory as if it were a file") if (flags & FXF_WRITE) and metadata['no-write']: raise createSFTPError(FX_PERMISSION_DENIED, "cannot open a non-writeable file for writing") return self._make_file(file, userpath, flags, parent=parent, childname=childname, filenode=filenode, metadata=metadata) def _no_child(f): if noisy: self.log("_no_child(%r)" % (f,), level=NOISY) f.trap(NoSuchChildError) if not (flags & FXF_CREAT): raise createSFTPError(FX_NO_SUCH_FILE, "the file does not exist, and was not opened with the creation (CREAT) flag") if parent_readonly: raise createSFTPError(FX_PERMISSION_DENIED, "cannot create a file when the parent directory is read-only") return self._make_file(file, userpath, flags, parent=parent, childname=childname) d3.addCallbacks(_got_child, _no_child) return d3 d2.addCallback(_got_parent) return d2 d.addCallback(_got_root) def _remove_on_error(err): if file: self._remove_heisenfile(userpath, None, None, file) return err d.addErrback(_remove_on_error) d.addBoth(_convert_error, request) return d def renameFile(self, from_pathstring, to_pathstring, overwrite=False): request = ".renameFile(%r, %r)" % (from_pathstring, to_pathstring) self.log(request, level=OPERATIONAL) from_path = self._path_from_string(from_pathstring) to_path = self._path_from_string(to_pathstring) from_userpath = self._path_to_utf8(from_path) to_userpath = self._path_to_utf8(to_path) # the target directory must already exist d = deferredutil.gatherResults([self._get_parent_or_node(from_path), self._get_parent_or_node(to_path)]) def _got(from_pair_and_to_pair): (from_pair, to_pair) = from_pair_and_to_pair if noisy: self.log("_got( (%r, %r) ) in .renameFile(%r, %r, overwrite=%r)" % (from_pair, to_pair, from_pathstring, to_pathstring, overwrite), level=NOISY) (from_parent, from_childname) = from_pair (to_parent, to_childname) = to_pair if from_childname is None: raise createSFTPError(FX_NO_SUCH_FILE, "cannot rename a source object specified by URI") if to_childname is None: raise createSFTPError(FX_NO_SUCH_FILE, "cannot rename to a destination specified by URI") # # "It is an error if there already exists a file with the name specified # by newpath." # OpenSSH's SFTP server returns FX_PERMISSION_DENIED for this error. # # For the standard SSH_FXP_RENAME operation, overwrite=False. # We also support the posix-rename@openssh.com extension, which uses overwrite=True. d2 = defer.succeed(None) if not overwrite: d2.addCallback(lambda ign: to_parent.get(to_childname)) def _expect_fail(res): if not isinstance(res, Failure): raise createSFTPError(FX_PERMISSION_DENIED, "cannot rename to existing path " + str(to_userpath, "utf-8")) # It is OK if we fail for errors other than NoSuchChildError, since that probably # indicates some problem accessing the destination directory. res.trap(NoSuchChildError) d2.addBoth(_expect_fail) # If there are heisenfiles to be written at the 'from' direntry, then ensure # they will now be written at the 'to' direntry instead. d2.addCallback(lambda ign: self._rename_heisenfiles(from_userpath, from_parent, from_childname, to_userpath, to_parent, to_childname, overwrite=overwrite)) def _move(renamed): # FIXME: use move_child_to_path to avoid possible data loss due to #943 #d3 = from_parent.move_child_to_path(from_childname, to_root, to_path, overwrite=overwrite) d3 = from_parent.move_child_to(from_childname, to_parent, to_childname, overwrite=overwrite) def _check(err): if noisy: self.log("_check(%r) in .renameFile(%r, %r, overwrite=%r)" % (err, from_pathstring, to_pathstring, overwrite), level=NOISY) if not isinstance(err, Failure) or (renamed and err.check(NoSuchChildError)): return None if not overwrite and err.check(ExistingChildError): raise createSFTPError(FX_PERMISSION_DENIED, "cannot rename to existing path " + str(to_userpath, "utf-8")) return err d3.addBoth(_check) return d3 d2.addCallback(_move) return d2 d.addCallback(_got) d.addBoth(_convert_error, request) return d def makeDirectory(self, pathstring, attrs): request = ".makeDirectory(%r, %r)" % (pathstring, attrs) self.log(request, level=OPERATIONAL) path = self._path_from_string(pathstring) metadata = _attrs_to_metadata(attrs) if 'no-write' in metadata: def _denied(): raise createSFTPError(FX_PERMISSION_DENIED, "cannot create a directory that is initially read-only") return defer.execute(_denied) d = self._get_root(path) d.addCallback(lambda root_and_path: self._get_or_create_directories(root_and_path[0], root_and_path[1], metadata)) d.addBoth(_convert_error, request) return d def _get_or_create_directories(self, node, path, metadata): if not IDirectoryNode.providedBy(node): # TODO: provide the name of the blocking file in the error message. def _blocked(): raise createSFTPError(FX_FAILURE, "cannot create directory because there " "is a file in the way") # close enough return defer.execute(_blocked) if not path: return defer.succeed(node) d = node.get(path[0]) def _maybe_create(f): f.trap(NoSuchChildError) return node.create_subdirectory(path[0]) d.addErrback(_maybe_create) d.addCallback(self._get_or_create_directories, path[1:], metadata) return d def removeFile(self, pathstring): request = ".removeFile(%r)" % (pathstring,) self.log(request, level=OPERATIONAL) path = self._path_from_string(pathstring) d = self._remove_object(path, must_be_file=True) d.addBoth(_convert_error, request) return d def removeDirectory(self, pathstring): request = ".removeDirectory(%r)" % (pathstring,) self.log(request, level=OPERATIONAL) path = self._path_from_string(pathstring) d = self._remove_object(path, must_be_directory=True) d.addBoth(_convert_error, request) return d def _remove_object(self, path, must_be_directory=False, must_be_file=False): userpath = self._path_to_utf8(path) d = self._get_parent_or_node(path) def _got_parent(parent_and_childname): (parent, childname) = parent_and_childname if childname is None: raise createSFTPError(FX_NO_SUCH_FILE, "cannot remove an object specified by URI") direntry = _direntry_for(parent, childname) d2 = defer.succeed(False) if not must_be_directory: d2.addCallback(lambda ign: self._abandon_any_heisenfiles(userpath, direntry)) d2.addCallback(lambda abandoned: parent.delete(childname, must_exist=not abandoned, must_be_directory=must_be_directory, must_be_file=must_be_file)) return d2 d.addCallback(_got_parent) return d def openDirectory(self, pathstring): request = ".openDirectory(%r)" % (pathstring,) self.log(request, level=OPERATIONAL) path = self._path_from_string(pathstring) d = self._get_parent_or_node(path) def _got_parent_or_node(parent_or_node__and__childname): (parent_or_node, childname) = parent_or_node__and__childname if noisy: self.log("_got_parent_or_node( (%r, %r) ) in openDirectory(%r)" % (parent_or_node, childname, pathstring), level=NOISY) if childname is None: return parent_or_node else: return parent_or_node.get(childname) d.addCallback(_got_parent_or_node) def _list(dirnode): if dirnode.is_unknown(): raise createSFTPError(FX_PERMISSION_DENIED, "cannot list an unknown cap as a directory. Upgrading the gateway " "to a later Tahoe-LAFS version may help") if not IDirectoryNode.providedBy(dirnode): raise createSFTPError(FX_PERMISSION_DENIED, "cannot list a file as if it were a directory") d2 = dirnode.list() def _render(children): parent_readonly = dirnode.is_readonly() results = [] for filename, (child, metadata) in list(children.items()): # The file size may be cached or absent. metadata['no-write'] = _no_write(parent_readonly, child, metadata) attrs = _populate_attrs(child, metadata) filename_utf8 = filename.encode('utf-8') longname = _lsLine(filename_utf8, attrs) results.append( (filename_utf8, longname, attrs) ) return StoppableList(results) d2.addCallback(_render) return d2 d.addCallback(_list) d.addBoth(_convert_error, request) return d def getAttrs(self, pathstring, followLinks): request = ".getAttrs(%r, followLinks=%r)" % (pathstring, followLinks) self.log(request, level=OPERATIONAL) # When asked about a specific file, report its current size. # TODO: the modification time for a mutable file should be # reported as the update time of the best version. But that # information isn't currently stored in mutable shares, I think. path = self._path_from_string(pathstring) userpath = self._path_to_utf8(path) d = self._get_parent_or_node(path) def _got_parent_or_node(parent_or_node__and__childname): (parent_or_node, childname) = parent_or_node__and__childname if noisy: self.log("_got_parent_or_node( (%r, %r) )" % (parent_or_node, childname), level=NOISY) # Some clients will incorrectly try to get the attributes # of a file immediately after opening it, before it has been put # into the all_heisenfiles table. This is a race condition bug in # the client, but we handle it anyway by calling .sync() on all # files matching either the path or the direntry. direntry = _direntry_for(parent_or_node, childname) d2 = self._sync_heisenfiles(userpath, direntry) if childname is None: node = parent_or_node d2.addCallback(lambda ign: node.get_current_size()) d2.addCallback(lambda size: _populate_attrs(node, {'no-write': node.is_unknown() or node.is_readonly()}, size=size)) else: parent = parent_or_node d2.addCallback(lambda ign: parent.get_child_and_metadata_at_path([childname])) def _got(child_and_metadata): (child, metadata) = child_and_metadata if noisy: self.log("_got( (%r, %r) )" % (child, metadata), level=NOISY) _assert(IDirectoryNode.providedBy(parent), parent=parent) metadata['no-write'] = _no_write(parent.is_readonly(), child, metadata) d3 = child.get_current_size() d3.addCallback(lambda size: _populate_attrs(child, metadata, size=size)) return d3 def _nosuch(err): if noisy: self.log("_nosuch(%r)" % (err,), level=NOISY) err.trap(NoSuchChildError) if noisy: self.log("checking open files:\nself._heisenfiles = %r\nall_heisenfiles = %r\ndirentry=%r" % (self._heisenfiles, all_heisenfiles, direntry), level=NOISY) if direntry in all_heisenfiles: files = all_heisenfiles[direntry] if len(files) == 0: # pragma: no cover return err # use the heisenfile that was most recently opened return files[-1].getAttrs() return err d2.addCallbacks(_got, _nosuch) return d2 d.addCallback(_got_parent_or_node) d.addBoth(_convert_error, request) return d def setAttrs(self, pathstring, attrs): request = ".setAttrs(%r, %r)" % (pathstring, attrs) self.log(request, level=OPERATIONAL) if "size" in attrs: # this would require us to download and re-upload the truncated/extended # file contents def _unsupported(): raise createSFTPError(FX_OP_UNSUPPORTED, "setAttrs wth size attribute unsupported") return defer.execute(_unsupported) path = self._path_from_string(pathstring) userpath = self._path_to_utf8(path) d = self._get_parent_or_node(path) def _got_parent_or_node(parent_or_node__and__childname): (parent_or_node, childname) = parent_or_node__and__childname if noisy: self.log("_got_parent_or_node( (%r, %r) )" % (parent_or_node, childname), level=NOISY) direntry = _direntry_for(parent_or_node, childname) d2 = self._update_attrs_for_heisenfiles(userpath, direntry, attrs) def _update(updated_heisenfiles): if childname is None: if updated_heisenfiles: return None raise createSFTPError(FX_NO_SUCH_FILE, userpath) else: desired_metadata = _attrs_to_metadata(attrs) if noisy: self.log("desired_metadata = %r" % (desired_metadata,), level=NOISY) d3 = parent_or_node.set_metadata_for(childname, desired_metadata) def _nosuch(err): if updated_heisenfiles: err.trap(NoSuchChildError) else: return err d3.addErrback(_nosuch) return d3 d2.addCallback(_update) d2.addCallback(lambda ign: None) return d2 d.addCallback(_got_parent_or_node) d.addBoth(_convert_error, request) return d def readLink(self, pathstring): self.log(".readLink(%r)" % (pathstring,), level=OPERATIONAL) def _unsupported(): raise createSFTPError(FX_OP_UNSUPPORTED, "readLink") return defer.execute(_unsupported) def makeLink(self, linkPathstring, targetPathstring): self.log(".makeLink(%r, %r)" % (linkPathstring, targetPathstring), level=OPERATIONAL) # If this is implemented, note the reversal of arguments described in point 7 of # . def _unsupported(): raise createSFTPError(FX_OP_UNSUPPORTED, "makeLink") return defer.execute(_unsupported) def extendedRequest(self, extensionName, extensionData): self.log(".extendedRequest(%r, )" % (extensionName, len(extensionData)), level=OPERATIONAL) # We implement the three main OpenSSH SFTP extensions; see # if extensionName == b'posix-rename@openssh.com': def _bad(): raise createSFTPError(FX_BAD_MESSAGE, "could not parse posix-rename@openssh.com request") if 4 > len(extensionData): return defer.execute(_bad) (fromPathLen,) = struct.unpack('>L', extensionData[0:4]) if 8 + fromPathLen > len(extensionData): return defer.execute(_bad) (toPathLen,) = struct.unpack('>L', extensionData[(4 + fromPathLen):(8 + fromPathLen)]) if 8 + fromPathLen + toPathLen != len(extensionData): return defer.execute(_bad) fromPathstring = extensionData[4:(4 + fromPathLen)] toPathstring = extensionData[(8 + fromPathLen):] d = self.renameFile(fromPathstring, toPathstring, overwrite=True) # Twisted conch assumes that the response from an extended request is either # an error, or an FXP_EXTENDED_REPLY. But it happens to do the right thing # (respond with an FXP_STATUS message) if we return a Failure with code FX_OK. def _succeeded(ign): raise createSFTPError(FX_OK, "request succeeded") d.addCallback(_succeeded) return d if extensionName == b'statvfs@openssh.com' or extensionName == b'fstatvfs@openssh.com': # f_bsize and f_frsize should be the same to avoid a bug in 'df' return defer.succeed(struct.pack('>11Q', 1024, # uint64 f_bsize /* file system block size */ 1024, # uint64 f_frsize /* fundamental fs block size */ 628318530, # uint64 f_blocks /* number of blocks (unit f_frsize) */ 314159265, # uint64 f_bfree /* free blocks in file system */ 314159265, # uint64 f_bavail /* free blocks for non-root */ 200000000, # uint64 f_files /* total file inodes */ 100000000, # uint64 f_ffree /* free file inodes */ 100000000, # uint64 f_favail /* free file inodes for non-root */ 0x1AF5, # uint64 f_fsid /* file system id */ 2, # uint64 f_flag /* bit mask = ST_NOSUID; not ST_RDONLY */ 65535, # uint64 f_namemax /* maximum filename length */ )) def _unsupported(): raise createSFTPError(FX_OP_UNSUPPORTED, "unsupported %r request " % (extensionName, len(extensionData))) return defer.execute(_unsupported) def realPath(self, pathstring): self.log(".realPath(%r)" % (pathstring,), level=OPERATIONAL) return self._path_to_utf8(self._path_from_string(pathstring)) def _path_to_utf8(self, path): return (u"/" + u"/".join(path)).encode('utf-8') def _path_from_string(self, pathstring): if noisy: self.log("CONVERT %r" % (pathstring,), level=NOISY) _assert(isinstance(pathstring, bytes), pathstring=pathstring) # The home directory is the root directory. pathstring = pathstring.strip(b"/") if pathstring == b"" or pathstring == b".": path_utf8 = [] else: path_utf8 = pathstring.split(b"/") # # "Servers SHOULD interpret a path name component ".." as referring to # the parent directory, and "." as referring to the current directory." path = [] for p_utf8 in path_utf8: if p_utf8 == b"..": # ignore excess .. components at the root if len(path) > 0: path = path[:-1] elif p_utf8 != b".": try: p = p_utf8.decode('utf-8', 'strict') except UnicodeError: raise createSFTPError(FX_NO_SUCH_FILE, "path could not be decoded as UTF-8") path.append(p) if noisy: self.log(" PATH %r" % (path,), level=NOISY) return path def _get_root(self, path): # return Deferred (root, remaining_path) d = defer.succeed(None) if path and path[0] == u"uri": d.addCallback(lambda ign: self._client.create_node_from_uri(path[1].encode('utf-8'))) d.addCallback(lambda root: (root, path[2:])) else: d.addCallback(lambda ign: (self._root, path)) return d def _get_parent_or_node(self, path): # return Deferred (parent, childname) or (node, None) d = self._get_root(path) def _got_root(root_and_remaining_path): (root, remaining_path) = root_and_remaining_path if not remaining_path: return (root, None) else: d2 = root.get_child_at_path(remaining_path[:-1]) d2.addCallback(lambda parent: (parent, remaining_path[-1])) return d2 d.addCallback(_got_root) return d @implementer(ITransport) class FakeTransport(object): def write(self, data): logmsg("FakeTransport.write()" % (len(data),), level=NOISY) def writeSequence(self, data): logmsg("FakeTransport.writeSequence(...)", level=NOISY) def loseConnection(self): logmsg("FakeTransport.loseConnection()", level=NOISY) def getHost(self): raise NotImplementedError() def getPeer(self): raise NotImplementedError() @implementer(ISession) class ShellSession(PrefixingLogMixin): def __init__(self, userHandler): PrefixingLogMixin.__init__(self, facility="tahoe.sftp") if noisy: self.log(".__init__(%r)" % (userHandler), level=NOISY) def getPty(self, terminal, windowSize, attrs): self.log(".getPty(%r, %r, %r)" % (terminal, windowSize, attrs), level=OPERATIONAL) def openShell(self, protocol): self.log(".openShell(%r)" % (protocol,), level=OPERATIONAL) if hasattr(protocol, 'transport') and protocol.transport is None: protocol.transport = FakeTransport() # work around Twisted bug return self._unsupported(protocol) def execCommand(self, protocol, cmd): self.log(".execCommand(%r, %r)" % (protocol, cmd), level=OPERATIONAL) if hasattr(protocol, 'transport') and protocol.transport is None: protocol.transport = FakeTransport() # work around Twisted bug d = defer.succeed(None) if cmd == "df -P -k /": d.addCallback(lambda ign: protocol.write( "Filesystem 1024-blocks Used Available Capacity Mounted on\r\n" "tahoe 628318530 314159265 314159265 50% /\r\n")) d.addCallback(lambda ign: protocol.processEnded(Reason(ProcessDone(None)))) else: d.addCallback(lambda ign: self._unsupported(protocol)) return d def _unsupported(self, protocol): d = defer.succeed(None) d.addCallback(lambda ign: protocol.errReceived( "This server supports only the SFTP protocol. It does not support SCP,\r\n" "interactive shell sessions, or commands other than one needed by sshfs.\r\n")) d.addCallback(lambda ign: protocol.processEnded(Reason(ProcessTerminated(exitCode=1)))) return d def windowChanged(self, newWindowSize): self.log(".windowChanged(%r)" % (newWindowSize,), level=OPERATIONAL) def eofReceived(self): self.log(".eofReceived()", level=OPERATIONAL) def closed(self): self.log(".closed()", level=OPERATIONAL) # If you have an SFTPUserHandler and want something that provides ISession, you get # ShellSession(userHandler). # We use adaptation because this must be a different object to the SFTPUserHandler. components.registerAdapter(ShellSession, SFTPUserHandler, ISession) from allmydata.frontends.auth import AccountFileChecker, NeedRootcapLookupScheme @implementer(portal.IRealm) class Dispatcher(object): def __init__(self, client): self._client = client def requestAvatar(self, avatarId, mind, *interfaces): [interface] = interfaces _assert(interface == IConchUser, interface=interface) rootnode = self._client.create_node_from_uri(avatarId.rootcap) handler = SFTPUserHandler(self._client, rootnode, avatarId.username) return (interface, handler, handler.logout) class SFTPServer(service.MultiService): # The type in Twisted for services is wrong in 22.10... # https://github.com/twisted/twisted/issues/10135 name = "frontend:sftp" # type: ignore[assignment] def __init__(self, client, accountfile, sftp_portstr, pubkey_file, privkey_file): precondition(isinstance(accountfile, (str, type(None))), accountfile) precondition(isinstance(pubkey_file, str), pubkey_file) precondition(isinstance(privkey_file, str), privkey_file) service.MultiService.__init__(self) r = Dispatcher(client) p = portal.Portal(r) if accountfile: c = AccountFileChecker(self, accountfile) p.registerChecker(c) if not accountfile: # we could leave this anonymous, with just the /uri/CAP form raise NeedRootcapLookupScheme("must provide an account file") pubkey = keys.Key.fromFile(pubkey_file.encode(get_filesystem_encoding())) privkey = keys.Key.fromFile(privkey_file.encode(get_filesystem_encoding())) class SSHFactory(factory.SSHFactory): publicKeys = {pubkey.sshType(): pubkey} privateKeys = {privkey.sshType(): privkey} def getPrimes(self): try: # if present, this enables diffie-hellman-group-exchange return primes.parseModuliFile("/etc/ssh/moduli") except IOError: return None f = SSHFactory() f.portal = p s = strports.service(six.ensure_str(sftp_portstr), f) s.setServiceParent(self) tahoe_lafs-1.20.0/src/allmydata/immutable/__init__.py0000644000000000000000000000000013615410400017434 0ustar00tahoe_lafs-1.20.0/src/allmydata/immutable/checker.py0000644000000000000000000011357013615410400017322 0ustar00""" Ported to Python 3. """ from zope.interface import implementer from twisted.internet import defer from foolscap.api import DeadReferenceError, RemoteException from allmydata import hashtree, codec, uri from allmydata.interfaces import IValidatedThingProxy, IVerifierURI from allmydata.hashtree import IncompleteHashTree from allmydata.check_results import CheckResults from allmydata.uri import CHKFileVerifierURI from allmydata.util.assertutil import precondition from allmydata.util import base32, deferredutil, dictutil, log, mathutil from allmydata.util.hashutil import file_renewal_secret_hash, \ file_cancel_secret_hash, bucket_renewal_secret_hash, \ bucket_cancel_secret_hash, uri_extension_hash, CRYPTO_VAL_SIZE, \ block_hash from allmydata.util.happinessutil import servers_of_happiness from allmydata.immutable import layout class IntegrityCheckReject(Exception): pass class BadURIExtension(IntegrityCheckReject): pass class BadURIExtensionHashValue(IntegrityCheckReject): pass class BadOrMissingHash(IntegrityCheckReject): pass class UnsupportedErasureCodec(BadURIExtension): pass @implementer(IValidatedThingProxy) class ValidatedExtendedURIProxy(object): """ I am a front-end for a remote UEB (using a local ReadBucketProxy), responsible for retrieving and validating the elements from the UEB.""" def __init__(self, readbucketproxy, verifycap, fetch_failures=None): # fetch_failures is for debugging -- see test_encode.py self._fetch_failures = fetch_failures self._readbucketproxy = readbucketproxy precondition(IVerifierURI.providedBy(verifycap), verifycap) self._verifycap = verifycap # required self.segment_size = None self.crypttext_root_hash = None self.share_root_hash = None # computed self.block_size = None self.share_size = None self.num_segments = None self.tail_data_size = None self.tail_segment_size = None # optional self.crypttext_hash = None def __str__(self): return "<%s %r>" % (self.__class__.__name__, self._verifycap.to_string()) def _check_integrity(self, data): h = uri_extension_hash(data) if h != self._verifycap.uri_extension_hash: msg = ("The copy of uri_extension we received from %s was bad: wanted %r, got %r" % (self._readbucketproxy, base32.b2a(self._verifycap.uri_extension_hash), base32.b2a(h))) if self._fetch_failures is not None: self._fetch_failures["uri_extension"] += 1 raise BadURIExtensionHashValue(msg) else: return data def _parse_and_validate(self, data): self.share_size = mathutil.div_ceil(self._verifycap.size, self._verifycap.needed_shares) d = uri.unpack_extension(data) # There are several kinds of things that can be found in a UEB. # First, things that we really need to learn from the UEB in order to # do this download. Next: things which are optional but not redundant # -- if they are present in the UEB they will get used. Next, things # that are optional and redundant. These things are required to be # consistent: they don't have to be in the UEB, but if they are in # the UEB then they will be checked for consistency with the # already-known facts, and if they are inconsistent then an exception # will be raised. These things aren't actually used -- they are just # tested for consistency and ignored. Finally: things which are # deprecated -- they ought not be in the UEB at all, and if they are # present then a warning will be logged but they are otherwise # ignored. # First, things that we really need to learn from the UEB: # segment_size, crypttext_root_hash, and share_root_hash. self.segment_size = d['segment_size'] self.block_size = mathutil.div_ceil(self.segment_size, self._verifycap.needed_shares) self.num_segments = mathutil.div_ceil(self._verifycap.size, self.segment_size) self.tail_data_size = self._verifycap.size % self.segment_size if not self.tail_data_size: self.tail_data_size = self.segment_size # padding for erasure code self.tail_segment_size = mathutil.next_multiple(self.tail_data_size, self._verifycap.needed_shares) # Ciphertext hash tree root is mandatory, so that there is at most # one ciphertext that matches this read-cap or verify-cap. The # integrity check on the shares is not sufficient to prevent the # original encoder from creating some shares of file A and other # shares of file B. self.crypttext_root_hash = d['crypttext_root_hash'] self.share_root_hash = d['share_root_hash'] # Next: things that are optional and not redundant: crypttext_hash if 'crypttext_hash' in d: self.crypttext_hash = d['crypttext_hash'] if len(self.crypttext_hash) != CRYPTO_VAL_SIZE: raise BadURIExtension('crypttext_hash is required to be hashutil.CRYPTO_VAL_SIZE bytes, not %s bytes' % (len(self.crypttext_hash),)) # Next: things that are optional, redundant, and required to be # consistent: codec_name, codec_params, tail_codec_params, # num_segments, size, needed_shares, total_shares if 'codec_name' in d: if d['codec_name'] != b"crs": raise UnsupportedErasureCodec(d['codec_name']) if 'codec_params' in d: ucpss, ucpns, ucpts = codec.parse_params(d['codec_params']) if ucpss != self.segment_size: raise BadURIExtension("inconsistent erasure code params: " "ucpss: %s != self.segment_size: %s" % (ucpss, self.segment_size)) if ucpns != self._verifycap.needed_shares: raise BadURIExtension("inconsistent erasure code params: ucpns: %s != " "self._verifycap.needed_shares: %s" % (ucpns, self._verifycap.needed_shares)) if ucpts != self._verifycap.total_shares: raise BadURIExtension("inconsistent erasure code params: ucpts: %s != " "self._verifycap.total_shares: %s" % (ucpts, self._verifycap.total_shares)) if 'tail_codec_params' in d: utcpss, utcpns, utcpts = codec.parse_params(d['tail_codec_params']) if utcpss != self.tail_segment_size: raise BadURIExtension("inconsistent erasure code params: utcpss: %s != " "self.tail_segment_size: %s, self._verifycap.size: %s, " "self.segment_size: %s, self._verifycap.needed_shares: %s" % (utcpss, self.tail_segment_size, self._verifycap.size, self.segment_size, self._verifycap.needed_shares)) if utcpns != self._verifycap.needed_shares: raise BadURIExtension("inconsistent erasure code params: utcpns: %s != " "self._verifycap.needed_shares: %s" % (utcpns, self._verifycap.needed_shares)) if utcpts != self._verifycap.total_shares: raise BadURIExtension("inconsistent erasure code params: utcpts: %s != " "self._verifycap.total_shares: %s" % (utcpts, self._verifycap.total_shares)) if 'num_segments' in d: if d['num_segments'] != self.num_segments: raise BadURIExtension("inconsistent num_segments: size: %s, " "segment_size: %s, computed_num_segments: %s, " "ueb_num_segments: %s" % (self._verifycap.size, self.segment_size, self.num_segments, d['num_segments'])) if 'size' in d: if d['size'] != self._verifycap.size: raise BadURIExtension("inconsistent size: URI size: %s, UEB size: %s" % (self._verifycap.size, d['size'])) if 'needed_shares' in d: if d['needed_shares'] != self._verifycap.needed_shares: raise BadURIExtension("inconsistent needed shares: URI needed shares: %s, UEB " "needed shares: %s" % (self._verifycap.total_shares, d['needed_shares'])) if 'total_shares' in d: if d['total_shares'] != self._verifycap.total_shares: raise BadURIExtension("inconsistent total shares: URI total shares: %s, UEB " "total shares: %s" % (self._verifycap.total_shares, d['total_shares'])) # Finally, things that are deprecated and ignored: plaintext_hash, # plaintext_root_hash if d.get('plaintext_hash'): log.msg("Found plaintext_hash in UEB. This field is deprecated for security reasons " "and is no longer used. Ignoring. %s" % (self,)) if d.get('plaintext_root_hash'): log.msg("Found plaintext_root_hash in UEB. This field is deprecated for security " "reasons and is no longer used. Ignoring. %s" % (self,)) return self def start(self): """Fetch the UEB from bucket, compare its hash to the hash from verifycap, then parse it. Returns a deferred which is called back with self once the fetch is successful, or is erred back if it fails.""" d = self._readbucketproxy.get_uri_extension() d.addCallback(self._check_integrity) d.addCallback(self._parse_and_validate) return d class ValidatedReadBucketProxy(log.PrefixingLogMixin): """I am a front-end for a remote storage bucket, responsible for retrieving and validating data from that bucket. My get_block() method is used by BlockDownloaders. """ def __init__(self, sharenum, bucket, share_hash_tree, num_blocks, block_size, share_size): """ share_hash_tree is required to have already been initialized with the root hash (the number-0 hash), using the share_root_hash from the UEB""" precondition(share_hash_tree[0] is not None, share_hash_tree) prefix = "%d-%s-%s" % (sharenum, bucket, str(base32.b2a(share_hash_tree[0][:8])[:12], "ascii")) log.PrefixingLogMixin.__init__(self, facility="tahoe.immutable.download", prefix=prefix) self.sharenum = sharenum self.bucket = bucket self.share_hash_tree = share_hash_tree self.num_blocks = num_blocks self.block_size = block_size self.share_size = share_size self.block_hash_tree = hashtree.IncompleteHashTree(self.num_blocks) def get_all_sharehashes(self): """Retrieve and validate all the share-hash-tree nodes that are included in this share, regardless of whether we need them to validate the share or not. Each share contains a minimal Merkle tree chain, but there is lots of overlap, so usually we'll be using hashes from other shares and not reading every single hash from this share. The Verifier uses this function to read and validate every single hash from this share. Call this (and wait for the Deferred it returns to fire) before calling get_block() for the first time: this lets us check that the share share contains enough hashes to validate its own data, and avoids downloading any share hash twice. I return a Deferred which errbacks upon failure, probably with BadOrMissingHash.""" d = self.bucket.get_share_hashes() def _got_share_hashes(sh): sharehashes = dict(sh) try: self.share_hash_tree.set_hashes(sharehashes) except IndexError as le: raise BadOrMissingHash(le) except (hashtree.BadHashError, hashtree.NotEnoughHashesError) as le: raise BadOrMissingHash(le) d.addCallback(_got_share_hashes) return d def get_all_blockhashes(self): """Retrieve and validate all the block-hash-tree nodes that are included in this share. Each share contains a full Merkle tree, but we usually only fetch the minimal subset necessary for any particular block. This function fetches everything at once. The Verifier uses this function to validate the block hash tree. Call this (and wait for the Deferred it returns to fire) after calling get_all_sharehashes() and before calling get_block() for the first time: this lets us check that the share contains all block hashes and avoids downloading them multiple times. I return a Deferred which errbacks upon failure, probably with BadOrMissingHash. """ # get_block_hashes(anything) currently always returns everything needed = list(range(len(self.block_hash_tree))) d = self.bucket.get_block_hashes(needed) def _got_block_hashes(blockhashes): if len(blockhashes) < len(self.block_hash_tree): raise BadOrMissingHash() bh = dict(enumerate(blockhashes)) try: self.block_hash_tree.set_hashes(bh) except IndexError as le: raise BadOrMissingHash(le) except (hashtree.BadHashError, hashtree.NotEnoughHashesError) as le: raise BadOrMissingHash(le) d.addCallback(_got_block_hashes) return d def get_all_crypttext_hashes(self, crypttext_hash_tree): """Retrieve and validate all the crypttext-hash-tree nodes that are in this share. Normally we don't look at these at all: the download process fetches them incrementally as needed to validate each segment of ciphertext. But this is a convenient place to give the Verifier a function to validate all of these at once. Call this with a new hashtree object for each share, initialized with the crypttext hash tree root. I return a Deferred which errbacks upon failure, probably with BadOrMissingHash. """ # get_crypttext_hashes() always returns everything d = self.bucket.get_crypttext_hashes() def _got_crypttext_hashes(hashes): if len(hashes) < len(crypttext_hash_tree): raise BadOrMissingHash() ct_hashes = dict(enumerate(hashes)) try: crypttext_hash_tree.set_hashes(ct_hashes) except IndexError as le: raise BadOrMissingHash(le) except (hashtree.BadHashError, hashtree.NotEnoughHashesError) as le: raise BadOrMissingHash(le) d.addCallback(_got_crypttext_hashes) return d def get_block(self, blocknum): # the first time we use this bucket, we need to fetch enough elements # of the share hash tree to validate it from our share hash up to the # hashroot. if self.share_hash_tree.needed_hashes(self.sharenum): d1 = self.bucket.get_share_hashes() else: d1 = defer.succeed([]) # We might need to grab some elements of our block hash tree, to # validate the requested block up to the share hash. blockhashesneeded = self.block_hash_tree.needed_hashes(blocknum, include_leaf=True) # We don't need the root of the block hash tree, as that comes in the # share tree. blockhashesneeded.discard(0) d2 = self.bucket.get_block_hashes(blockhashesneeded) if blocknum < self.num_blocks-1: thisblocksize = self.block_size else: thisblocksize = self.share_size % self.block_size if thisblocksize == 0: thisblocksize = self.block_size d3 = self.bucket.get_block_data(blocknum, self.block_size, thisblocksize) dl = deferredutil.gatherResults([d1, d2, d3]) dl.addCallback(self._got_data, blocknum) return dl def _got_data(self, results, blocknum): precondition(blocknum < self.num_blocks, self, blocknum, self.num_blocks) sharehashes, blockhashes, blockdata = results try: sharehashes = dict(sharehashes) except ValueError as le: le.args = tuple(le.args + (sharehashes,)) raise blockhashes = dict(enumerate(blockhashes)) candidate_share_hash = None # in case we log it in the except block below blockhash = None # in case we log it in the except block below try: if self.share_hash_tree.needed_hashes(self.sharenum): # This will raise exception if the values being passed do not # match the root node of self.share_hash_tree. try: self.share_hash_tree.set_hashes(sharehashes) except IndexError as le: # Weird -- sharehashes contained index numbers outside of # the range that fit into this hash tree. raise BadOrMissingHash(le) # To validate a block we need the root of the block hash tree, # which is also one of the leafs of the share hash tree, and is # called "the share hash". if not self.block_hash_tree[0]: # empty -- no root node yet # Get the share hash from the share hash tree. share_hash = self.share_hash_tree.get_leaf(self.sharenum) if not share_hash: # No root node in block_hash_tree and also the share hash # wasn't sent by the server. raise hashtree.NotEnoughHashesError self.block_hash_tree.set_hashes({0: share_hash}) if self.block_hash_tree.needed_hashes(blocknum): self.block_hash_tree.set_hashes(blockhashes) blockhash = block_hash(blockdata) self.block_hash_tree.set_hashes(leaves={blocknum: blockhash}) #self.log("checking block_hash(shareid=%d, blocknum=%d) len=%d " # "%r .. %r: %s" % # (self.sharenum, blocknum, len(blockdata), # blockdata[:50], blockdata[-50:], base32.b2a(blockhash))) except (hashtree.BadHashError, hashtree.NotEnoughHashesError) as le: # log.WEIRD: indicates undetected disk/network error, or more # likely a programming error self.log("hash failure in block=%d, shnum=%d on %s" % (blocknum, self.sharenum, self.bucket)) if self.block_hash_tree.needed_hashes(blocknum): self.log(""" failure occurred when checking the block_hash_tree. This suggests that either the block data was bad, or that the block hashes we received along with it were bad.""") else: self.log(""" the failure probably occurred when checking the share_hash_tree, which suggests that the share hashes we received from the remote peer were bad.""") self.log(" have candidate_share_hash: %s" % bool(candidate_share_hash)) self.log(" block length: %d" % len(blockdata)) self.log(" block hash: %r" % base32.b2a_or_none(blockhash)) if len(blockdata) < 100: self.log(" block data: %r" % (blockdata,)) else: self.log(" block data start/end: %r .. %r" % (blockdata[:50], blockdata[-50:])) self.log(" share hash tree:\n" + self.share_hash_tree.dump()) self.log(" block hash tree:\n" + self.block_hash_tree.dump()) lines = [] for i,h in sorted(sharehashes.items()): lines.append("%3d: %s" % (i, base32.b2a_or_none(h))) self.log(" sharehashes:\n" + "\n".join(lines) + "\n") lines = [] for i,h in list(blockhashes.items()): lines.append("%3d: %s" % (i, base32.b2a_or_none(h))) log.msg(" blockhashes:\n" + "\n".join(lines) + "\n") raise BadOrMissingHash(le) # If we made it here, the block is good. If the hash trees didn't # like what they saw, they would have raised a BadHashError, causing # our caller to see a Failure and thus ignore this block (as well as # dropping this bucket). return blockdata class Checker(log.PrefixingLogMixin): """I query all servers to see if M uniquely-numbered shares are available. If the verify flag was passed to my constructor, then for each share I download every data block and all metadata from each server and perform a cryptographic integrity check on all of it. If not, I just ask each server 'Which shares do you have?' and believe its answer. In either case, I wait until I have gotten responses from all servers. This fact -- that I wait -- means that an ill-behaved server which fails to answer my questions will make me wait indefinitely. If it is ill-behaved in a way that triggers the underlying foolscap timeouts, then I will wait only as long as those foolscap timeouts, but if it is ill-behaved in a way which placates the foolscap timeouts but still doesn't answer my question then I will wait indefinitely. Before I send any new request to a server, I always ask the 'monitor' object that was passed into my constructor whether this task has been cancelled (by invoking its raise_if_cancelled() method). """ def __init__(self, verifycap, servers, verify, add_lease, secret_holder, monitor): assert precondition(isinstance(verifycap, CHKFileVerifierURI), verifycap, type(verifycap)) prefix = str(base32.b2a(verifycap.get_storage_index()[:8])[:12], "utf-8") log.PrefixingLogMixin.__init__(self, facility="tahoe.immutable.checker", prefix=prefix) self._verifycap = verifycap self._monitor = monitor self._servers = servers self._verify = verify # bool: verify what the servers claim, or not? self._add_lease = add_lease frs = file_renewal_secret_hash(secret_holder.get_renewal_secret(), self._verifycap.get_storage_index()) self.file_renewal_secret = frs fcs = file_cancel_secret_hash(secret_holder.get_cancel_secret(), self._verifycap.get_storage_index()) self.file_cancel_secret = fcs def _get_renewal_secret(self, seed): return bucket_renewal_secret_hash(self.file_renewal_secret, seed) def _get_cancel_secret(self, seed): return bucket_cancel_secret_hash(self.file_cancel_secret, seed) def _get_buckets(self, s, storageindex): """Return a deferred that eventually fires with ({sharenum: bucket}, serverid, success). In case the server is disconnected or returns a Failure then it fires with ({}, serverid, False) (A server disconnecting or returning a Failure when we ask it for buckets is the same, for our purposes, as a server that says it has none, except that we want to track and report whether or not each server responded.)""" storage_server = s.get_storage_server() lease_seed = s.get_lease_seed() if self._add_lease: renew_secret = self._get_renewal_secret(lease_seed) cancel_secret = self._get_cancel_secret(lease_seed) d2 = storage_server.add_lease( storageindex, renew_secret, cancel_secret, ) d2.addErrback(self._add_lease_failed, s.get_name(), storageindex) d = storage_server.get_buckets(storageindex) def _wrap_results(res): return (res, True) def _trap_errs(f): level = log.WEIRD if f.check(DeadReferenceError): level = log.UNUSUAL self.log("failure from server on 'get_buckets' the REMOTE failure was:", facility="tahoe.immutable.checker", failure=f, level=level, umid="AX7wZQ") return ({}, False) d.addCallbacks(_wrap_results, _trap_errs) return d def _add_lease_failed(self, f, server_name, storage_index): # Older versions of Tahoe didn't handle the add-lease message very # well: <=1.1.0 throws a NameError because it doesn't implement # remote_add_lease(), 1.2.0/1.3.0 throw IndexError on unknown buckets # (which is most of them, since we send add-lease to everybody, # before we know whether or not they have any shares for us), and # 1.2.0 throws KeyError even on known buckets due to an internal bug # in the latency-measuring code. # we want to ignore the known-harmless errors and log the others. In # particular we want to log any local errors caused by coding # problems. if f.check(DeadReferenceError): return if f.check(RemoteException): if f.value.failure.check(KeyError, IndexError, NameError): # this may ignore a bit too much, but that only hurts us # during debugging return self.log(format="error in add_lease from [%(name)s]: %(f_value)s", name=server_name, f_value=str(f.value), failure=f, level=log.WEIRD, umid="atbAxw") return # local errors are cause for alarm log.err(f, format="local error in add_lease to [%(name)s]: %(f_value)s", name=server_name, f_value=str(f.value), level=log.WEIRD, umid="hEGuQg") def _download_and_verify(self, server, sharenum, bucket): """Start an attempt to download and verify every block in this bucket and return a deferred that will eventually fire once the attempt completes. If you download and verify every block then fire with (True, sharenum, None), else if the share data couldn't be parsed because it was of an unknown version number fire with (False, sharenum, 'incompatible'), else if any of the blocks were invalid, fire with (False, sharenum, 'corrupt'), else if the server disconnected (False, sharenum, 'disconnect'), else if the server returned a Failure during the process fire with (False, sharenum, 'failure'). If there is an internal error such as an uncaught exception in this code, then the deferred will errback, but if there is a remote error such as the server failing or the returned data being incorrect then it will not errback -- it will fire normally with the indicated results.""" vcap = self._verifycap b = layout.ReadBucketProxy(bucket, server, vcap.get_storage_index()) veup = ValidatedExtendedURIProxy(b, vcap) d = veup.start() def _got_ueb(vup): share_hash_tree = IncompleteHashTree(vcap.total_shares) share_hash_tree.set_hashes({0: vup.share_root_hash}) vrbp = ValidatedReadBucketProxy(sharenum, b, share_hash_tree, vup.num_segments, vup.block_size, vup.share_size) # note: normal download doesn't use get_all_sharehashes(), # because it gets more data than necessary. We've discussed the # security properties of having verification and download look # identical (so the server couldn't, say, provide good responses # for one and not the other), but I think that full verification # is more important than defending against inconsistent server # behavior. Besides, they can't pass the verifier without storing # all the data, so there's not so much to be gained by behaving # inconsistently. d = vrbp.get_all_sharehashes() # we fill share_hash_tree before fetching any blocks, so the # block fetches won't send redundant share-hash-tree requests, to # speed things up. Then we fetch+validate all the blockhashes. d.addCallback(lambda ign: vrbp.get_all_blockhashes()) cht = IncompleteHashTree(vup.num_segments) cht.set_hashes({0: vup.crypttext_root_hash}) d.addCallback(lambda ign: vrbp.get_all_crypttext_hashes(cht)) d.addCallback(lambda ign: vrbp) return d d.addCallback(_got_ueb) def _discard_result(r): assert isinstance(r, bytes), r # to free up the RAM return None def _get_blocks(vrbp): def _get_block(ign, blocknum): db = vrbp.get_block(blocknum) db.addCallback(_discard_result) return db dbs = defer.succeed(None) for blocknum in range(veup.num_segments): dbs.addCallback(_get_block, blocknum) # The Deferred we return will fire after every block of this # share has been downloaded and verified successfully, or else it # will errback as soon as the first error is observed. return dbs d.addCallback(_get_blocks) # if none of those errbacked, the blocks (and the hashes above them) # are good def _all_good(ign): return (True, sharenum, None) d.addCallback(_all_good) # but if anything fails, we'll land here def _errb(f): # We didn't succeed at fetching and verifying all the blocks of # this share. Handle each reason for failure differently. if f.check(DeadReferenceError): return (False, sharenum, 'disconnect') elif f.check(RemoteException): return (False, sharenum, 'failure') elif f.check(layout.ShareVersionIncompatible): return (False, sharenum, 'incompatible') elif f.check(layout.LayoutInvalid, layout.RidiculouslyLargeURIExtensionBlock, BadOrMissingHash, BadURIExtensionHashValue): return (False, sharenum, 'corrupt') # if it wasn't one of those reasons, re-raise the error return f d.addErrback(_errb) return d def _verify_server_shares(self, s): """ Return a deferred which eventually fires with a tuple of (set(sharenum), server, set(corruptsharenum), set(incompatiblesharenum), success) showing all the shares verified to be served by this server, and all the corrupt shares served by the server, and all the incompatible shares served by the server. In case the server is disconnected or returns a Failure then it fires with the last element False. A server disconnecting or returning a failure when we ask it for shares is the same, for our purposes, as a server that says it has none or offers invalid ones, except that we want to track and report the server's behavior. Similarly, the presence of corrupt shares is mainly of use for diagnostics -- you can typically treat it as just like being no share at all by just observing its absence from the verified shares dict and ignoring its presence in the corrupt shares dict. The 'success' argument means whether the server responded to *any* queries during this process, so if it responded to some queries and then disconnected and ceased responding, or returned a failure, it is still marked with the True flag for 'success'. """ d = self._get_buckets(s, self._verifycap.get_storage_index()) def _got_buckets(result): bucketdict, success = result shareverds = [] for (sharenum, bucket) in list(bucketdict.items()): d = self._download_and_verify(s, sharenum, bucket) shareverds.append(d) dl = deferredutil.gatherResults(shareverds) def collect(results): verified = set() corrupt = set() incompatible = set() for succ, sharenum, whynot in results: if succ: verified.add(sharenum) else: if whynot == 'corrupt': corrupt.add(sharenum) elif whynot == 'incompatible': incompatible.add(sharenum) return (verified, s, corrupt, incompatible, success) dl.addCallback(collect) return dl def _err(f): f.trap(RemoteException, DeadReferenceError) return (set(), s, set(), set(), False) d.addCallbacks(_got_buckets, _err) return d def _check_server_shares(self, s): """Return a deferred which eventually fires with a tuple of (set(sharenum), server, set(corrupt), set(incompatible), responded) showing all the shares claimed to be served by this server. In case the server is disconnected then it fires with (set(), server, set(), set(), False) (a server disconnecting when we ask it for buckets is the same, for our purposes, as a server that says it has none, except that we want to track and report whether or not each server responded.) see also _verify_server_shares() """ def _curry_empty_corrupted(res): buckets, responded = res return (set(buckets), s, set(), set(), responded) d = self._get_buckets(s, self._verifycap.get_storage_index()) d.addCallback(_curry_empty_corrupted) return d def _format_results(self, results): SI = self._verifycap.get_storage_index() verifiedshares = dictutil.DictOfSets() # {sharenum: set(server)} servers = {} # {server: set(sharenums)} corruptshare_locators = [] # (server, storageindex, sharenum) incompatibleshare_locators = [] # (server, storageindex, sharenum) servers_responding = set() # server for verified, server, corrupt, incompatible, responded in results: servers.setdefault(server, set()).update(verified) for sharenum in verified: verifiedshares.setdefault(sharenum, set()).add(server) for sharenum in corrupt: corruptshare_locators.append((server, SI, sharenum)) for sharenum in incompatible: incompatibleshare_locators.append((server, SI, sharenum)) if responded: servers_responding.add(server) good_share_hosts = len([s for s in servers.keys() if servers[s]]) assert len(verifiedshares) <= self._verifycap.total_shares, (verifiedshares.keys(), self._verifycap.total_shares) if len(verifiedshares) == self._verifycap.total_shares: healthy = True summary = "Healthy" else: healthy = False summary = ("Not Healthy: %d shares (enc %d-of-%d)" % (len(verifiedshares), self._verifycap.needed_shares, self._verifycap.total_shares)) if len(verifiedshares) >= self._verifycap.needed_shares: recoverable = 1 unrecoverable = 0 else: recoverable = 0 unrecoverable = 1 count_happiness = servers_of_happiness(verifiedshares) cr = CheckResults(self._verifycap, SI, healthy=healthy, recoverable=bool(recoverable), count_happiness=count_happiness, count_shares_needed=self._verifycap.needed_shares, count_shares_expected=self._verifycap.total_shares, count_shares_good=len(verifiedshares), count_good_share_hosts=good_share_hosts, count_recoverable_versions=recoverable, count_unrecoverable_versions=unrecoverable, servers_responding=list(servers_responding), sharemap=verifiedshares, count_wrong_shares=0, # no such thing, for immutable list_corrupt_shares=corruptshare_locators, count_corrupt_shares=len(corruptshare_locators), list_incompatible_shares=incompatibleshare_locators, count_incompatible_shares=len(incompatibleshare_locators), summary=summary, report=[], share_problems=[], servermap=None) return cr def start(self): ds = [] if self._verify: for s in self._servers: ds.append(self._verify_server_shares(s)) else: for s in self._servers: ds.append(self._check_server_shares(s)) return deferredutil.gatherResults(ds).addCallback(self._format_results) tahoe_lafs-1.20.0/src/allmydata/immutable/encode.py0000644000000000000000000007534613615410400017163 0ustar00# -*- test-case-name: allmydata.test.test_encode -*- """ Ported to Python 3. """ import time from zope.interface import implementer from twisted.internet import defer from foolscap.api import fireEventually from allmydata import uri from allmydata.storage.server import si_b2a from allmydata.hashtree import HashTree from allmydata.util import mathutil, hashutil, base32, log, happinessutil from allmydata.util.assertutil import _assert, precondition from allmydata.codec import CRSEncoder from allmydata.interfaces import IEncoder, IStorageBucketWriter, \ IEncryptedUploadable, IUploadStatus, UploadUnhappinessError from ..util.eliotutil import ( log_call_deferred, ) """ The goal of the encoder is to turn the original file into a series of 'shares'. Each share is going to a 'shareholder' (nominally each shareholder is a different host, but for small grids there may be overlap). The number of shares is chosen to hit our reliability goals (more shares on more machines means more reliability), and is limited by overhead (proportional to numshares or log(numshares)) and the encoding technology in use (zfec permits only 256 shares total). It is also constrained by the amount of data we want to send to each host. For estimating purposes, think of 10 shares out of which we need 3 to reconstruct the file. The encoder starts by cutting the original file into segments. All segments except the last are of equal size. The segment size is chosen to constrain the memory footprint (which will probably vary between 1x and 4x segment size) and to constrain the overhead (which will be proportional to log(number of segments)). Each segment (A,B,C) is read into memory, encrypted, and encoded into blocks. The 'share' (say, share #1) that makes it out to a host is a collection of these blocks (block A1, B1, C1), plus some hash-tree information necessary to validate the data upon retrieval. Only one segment is handled at a time: all blocks for segment A are delivered before any work is begun on segment B. As blocks are created, we retain the hash of each one. The list of block hashes for a single share (say, hash(A1), hash(B1), hash(C1)) is used to form the base of a Merkle hash tree for that share, called the block hash tree. This hash tree has one terminal leaf per block. The complete block hash tree is sent to the shareholder after all the data has been sent. At retrieval time, the decoder will ask for specific pieces of this tree before asking for blocks, whichever it needs to validate those blocks. (Note: we don't really need to generate this whole block hash tree ourselves. It would be sufficient to have the shareholder generate it and just tell us the root. This gives us an extra level of validation on the transfer, though, and it is relatively cheap to compute.) Each of these block hash trees has a root hash. The collection of these root hashes for all shares are collected into the 'share hash tree', which has one terminal leaf per share. After sending the blocks and the complete block hash tree to each shareholder, we send them the portion of the share hash tree that is necessary to validate their share. The root of the share hash tree is put into the URI. """ class UploadAborted(Exception): pass KiB=1024 MiB=1024*KiB GiB=1024*MiB TiB=1024*GiB PiB=1024*TiB @implementer(IEncoder) class Encoder(object): def __init__(self, log_parent=None, upload_status=None): object.__init__(self) self.uri_extension_data = {} self._codec = None self._status = None if upload_status: self._status = IUploadStatus(upload_status) precondition(log_parent is None or isinstance(log_parent, int), log_parent) self._log_number = log.msg("creating Encoder %s" % self, facility="tahoe.encoder", parent=log_parent) self._aborted = False def __repr__(self): if hasattr(self, "_storage_index"): return "" % si_b2a(self._storage_index)[:5] return "" def log(self, *args, **kwargs): if "parent" not in kwargs: kwargs["parent"] = self._log_number if "facility" not in kwargs: kwargs["facility"] = "tahoe.encoder" return log.msg(*args, **kwargs) @log_call_deferred(action_type=u"immutable:encode:set-encrypted-uploadable") def set_encrypted_uploadable(self, uploadable): eu = self._uploadable = IEncryptedUploadable(uploadable) d = eu.get_size() def _got_size(size): self.log(format="file size: %(size)d", size=size) self.file_size = size d.addCallback(_got_size) d.addCallback(lambda res: eu.get_all_encoding_parameters()) d.addCallback(self._got_all_encoding_parameters) d.addCallback(lambda res: eu.get_storage_index()) def _done(storage_index): self._storage_index = storage_index return self d.addCallback(_done) return d def _got_all_encoding_parameters(self, params): assert not self._codec k, happy, n, segsize = params self.required_shares = k self.min_happiness = happy self.num_shares = n self.segment_size = segsize self.log("got encoding parameters: %d/%d/%d %d" % (k,happy,n, segsize)) self.log("now setting up codec") assert self.segment_size % self.required_shares == 0 self.num_segments = mathutil.div_ceil(self.file_size, self.segment_size) self._codec = CRSEncoder() self._codec.set_params(self.segment_size, self.required_shares, self.num_shares) data = self.uri_extension_data data['codec_name'] = self._codec.get_encoder_type() data['codec_params'] = self._codec.get_serialized_params() data['size'] = self.file_size data['segment_size'] = self.segment_size self.share_size = mathutil.div_ceil(self.file_size, self.required_shares) data['num_segments'] = self.num_segments data['needed_shares'] = self.required_shares data['total_shares'] = self.num_shares # the "tail" is the last segment. This segment may or may not be # shorter than all other segments. We use the "tail codec" to handle # it. If the tail is short, we use a different codec instance. In # addition, the tail codec must be fed data which has been padded out # to the right size. tail_size = self.file_size % self.segment_size if not tail_size: tail_size = self.segment_size # the tail codec is responsible for encoding tail_size bytes padded_tail_size = mathutil.next_multiple(tail_size, self.required_shares) self._tail_codec = CRSEncoder() self._tail_codec.set_params(padded_tail_size, self.required_shares, self.num_shares) data['tail_codec_params'] = self._tail_codec.get_serialized_params() def _get_share_size(self): share_size = mathutil.div_ceil(self.file_size, self.required_shares) overhead = self._compute_overhead() return share_size + overhead def _compute_overhead(self): return 0 def get_param(self, name): assert self._codec if name == "storage_index": return self._storage_index elif name == "share_counts": return (self.required_shares, self.min_happiness, self.num_shares) elif name == "num_segments": return self.num_segments elif name == "segment_size": return self.segment_size elif name == "block_size": return self._codec.get_block_size() elif name == "share_size": return self._get_share_size() elif name == "serialized_params": return self._codec.get_serialized_params() else: raise KeyError("unknown parameter name '%s'" % name) def set_shareholders(self, landlords, servermap): assert isinstance(landlords, dict) for k in landlords: assert IStorageBucketWriter.providedBy(landlords[k]) self.landlords = landlords.copy() assert isinstance(servermap, dict) for v in servermap.values(): assert isinstance(v, set) self.servermap = servermap.copy() @log_call_deferred(action_type=u"immutable:encode:start") def start(self): """ Returns a Deferred that will fire with the verify cap (an instance of uri.CHKFileVerifierURI).""" self.log("%s starting" % (self,)) #paddedsize = self._size + mathutil.pad_size(self._size, self.needed_shares) assert self._codec self._crypttext_hasher = hashutil.crypttext_hasher() self._crypttext_hashes = [] self.segment_num = 0 self.block_hashes = [[] for x in range(self.num_shares)] # block_hashes[i] is a list that will be accumulated and then send # to landlord[i]. This list contains a hash of each segment_share # that we sent to that landlord. self.share_root_hashes = [None] * self.num_shares self._times = { "cumulative_encoding": 0.0, "cumulative_sending": 0.0, "hashes_and_close": 0.0, "total_encode_and_push": 0.0, } self._start_total_timestamp = time.time() d = fireEventually() d.addCallback(lambda res: self.start_all_shareholders()) for i in range(self.num_segments-1): # note to self: this form doesn't work, because lambda only # captures the slot, not the value #d.addCallback(lambda res: self.do_segment(i)) # use this form instead: d.addCallback(lambda res, i=i: self._encode_segment(i, is_tail=False)) d.addCallback(self._send_segment, i) d.addCallback(self._turn_barrier) last_segnum = self.num_segments - 1 d.addCallback(lambda res: self._encode_segment(last_segnum, is_tail=True)) d.addCallback(self._send_segment, last_segnum) d.addCallback(self._turn_barrier) d.addCallback(lambda res: self.finish_hashing()) # These calls have to happen in order; layout.py now requires writes to # be appended to the data written so far. d.addCallback(lambda res: self.send_crypttext_hash_tree_to_all_shareholders()) d.addCallback(lambda res: self.send_all_block_hash_trees()) d.addCallback(lambda res: self.send_all_share_hash_trees()) d.addCallback(lambda res: self.send_uri_extension_to_all_shareholders()) d.addCallback(lambda res: self.close_all_shareholders()) d.addCallbacks(self.done, self.err) return d def set_status(self, status): if self._status: self._status.set_status(status) def set_encode_and_push_progress(self, sent_segments=None, extra=0.0): if self._status: # we treat the final hash+close as an extra segment if sent_segments is None: sent_segments = self.num_segments progress = float(sent_segments + extra) / (self.num_segments + 1) self._status.set_progress(2, progress) def abort(self): self.log("aborting upload", level=log.UNUSUAL) assert self._codec, "don't call abort before start" self._aborted = True # the next segment read (in _gather_data inside _encode_segment) will # raise UploadAborted(), which will bypass the rest of the upload # chain. If we've sent the final segment's shares, it's too late to # abort. TODO: allow abort any time up to close_all_shareholders. def _turn_barrier(self, res): # putting this method in a Deferred chain imposes a guaranteed # reactor turn between the pre- and post- portions of that chain. # This can be useful to limit memory consumption: since Deferreds do # not do tail recursion, code which uses defer.succeed(result) for # consistency will cause objects to live for longer than you might # normally expect. return fireEventually(res) def start_all_shareholders(self): self.log("starting shareholders", level=log.NOISY) self.set_status("Starting shareholders") dl = [] for shareid in list(self.landlords): d = self.landlords[shareid].put_header() d.addErrback(self._remove_shareholder, shareid, "start") dl.append(d) return self._gather_responses(dl) def _encode_segment(self, segnum, is_tail): """ Encode one segment of input into the configured number of shares. :param segnum: Ostensibly, the number of the segment to encode. In reality, this parameter is ignored and the *next* segment is encoded and returned. :param bool is_tail: ``True`` if this is the last segment, ``False`` otherwise. :return: A ``Deferred`` which fires with a two-tuple. The first element is a list of string-y objects representing the encoded segment data for one of the shares. The second element is a list of integers giving the share numbers of the shares in the first element. """ codec = self._tail_codec if is_tail else self._codec start = time.time() # the ICodecEncoder API wants to receive a total of self.segment_size # bytes on each encode() call, broken up into a number of # identically-sized pieces. Due to the way the codec algorithm works, # these pieces need to be the same size as the share which the codec # will generate. Therefore we must feed it with input_piece_size that # equals the output share size. input_piece_size = codec.get_block_size() # as a result, the number of input pieces per encode() call will be # equal to the number of required shares with which the codec was # constructed. You can think of the codec as chopping up a # 'segment_size' of data into 'required_shares' shares (not doing any # fancy math at all, just doing a split), then creating some number # of additional shares which can be substituted if the primary ones # are unavailable # we read data from the source one segment at a time, and then chop # it into 'input_piece_size' pieces before handing it to the codec crypttext_segment_hasher = hashutil.crypttext_segment_hasher() # memory footprint: we only hold a tiny piece of the plaintext at any # given time. We build up a segment's worth of cryptttext, then hand # it to the encoder. Assuming 3-of-10 encoding (3.3x expansion) and # 1MiB max_segment_size, we get a peak memory footprint of 4.3*1MiB = # 4.3MiB. Lowering max_segment_size to, say, 100KiB would drop the # footprint to 430KiB at the expense of more hash-tree overhead. d = self._gather_data(self.required_shares, input_piece_size, crypttext_segment_hasher, allow_short=is_tail) def _done_gathering(chunks): for c in chunks: # If is_tail then a short trailing chunk will have been padded # by _gather_data assert len(c) == input_piece_size self._crypttext_hashes.append(crypttext_segment_hasher.digest()) # during this call, we hit 5*segsize memory return codec.encode(chunks) d.addCallback(_done_gathering) def _done(res): elapsed = time.time() - start self._times["cumulative_encoding"] += elapsed return res d.addCallback(_done) return d def _gather_data(self, num_chunks, input_chunk_size, crypttext_segment_hasher, allow_short=False): """Return a Deferred that will fire when the required number of chunks have been read (and hashed and encrypted). The Deferred fires with a list of chunks, each of size input_chunk_size.""" # I originally built this to allow read_encrypted() to behave badly: # to let it return more or less data than you asked for. It would # stash the leftovers until later, and then recurse until it got # enough. I don't think that was actually useful. # # who defines read_encrypted? # offloaded.LocalCiphertextReader: real disk file: exact # upload.EncryptAnUploadable: Uploadable, but a wrapper that makes # it exact. The return value is a list of 50KiB chunks, to reduce # the memory footprint of the encryption process. # repairer.Repairer: immutable.filenode.CiphertextFileNode: exact # # This has been redefined to require read_encrypted() to behave like # a local file: return exactly the amount requested unless it hits # EOF. # -warner if self._aborted: raise UploadAborted() read_size = num_chunks * input_chunk_size d = self._uploadable.read_encrypted(read_size, hash_only=False) def _got(data): assert isinstance(data, (list,tuple)) if self._aborted: raise UploadAborted() data = b"".join(data) precondition(len(data) <= read_size, len(data), read_size) if not allow_short: precondition(len(data) == read_size, len(data), read_size) crypttext_segment_hasher.update(data) self._crypttext_hasher.update(data) if allow_short and len(data) < read_size: # padding data += b"\x00" * (read_size - len(data)) encrypted_pieces = [data[i:i+input_chunk_size] for i in range(0, len(data), input_chunk_size)] return encrypted_pieces d.addCallback(_got) return d def _send_segment(self, shares_and_shareids, segnum): # To generate the URI, we must generate the roothash, so we must # generate all shares, even if we aren't actually giving them to # anybody. This means that the set of shares we create will be equal # to or larger than the set of landlords. If we have any landlord who # *doesn't* have a share, that's an error. (shares, shareids) = shares_and_shareids _assert(set(self.landlords.keys()).issubset(set(shareids)), shareids=shareids, landlords=self.landlords) start = time.time() dl = [] self.set_status("Sending segment %d of %d" % (segnum+1, self.num_segments)) self.set_encode_and_push_progress(segnum) lognum = self.log("send_segment(%d)" % segnum, level=log.NOISY) for i in range(len(shares)): block = shares[i] shareid = shareids[i] d = self.send_block(shareid, segnum, block, lognum) dl.append(d) block_hash = hashutil.block_hash(block) #from allmydata.util import base32 #log.msg("creating block (shareid=%d, blocknum=%d) " # "len=%d %r .. %r: %s" % # (shareid, segnum, len(block), # block[:50], block[-50:], base32.b2a(block_hash))) self.block_hashes[shareid].append(block_hash) dl = self._gather_responses(dl) def _logit(res): self.log("%s uploaded %s / %s bytes (%d%%) of your file." % (self, self.segment_size*(segnum+1), self.segment_size*self.num_segments, 100 * (segnum+1) // self.num_segments, ), level=log.OPERATIONAL) elapsed = time.time() - start self._times["cumulative_sending"] += elapsed return res dl.addCallback(_logit) return dl def send_block(self, shareid, segment_num, block, lognum): if shareid not in self.landlords: return defer.succeed(None) sh = self.landlords[shareid] lognum2 = self.log("put_block to %s" % self.landlords[shareid], parent=lognum, level=log.NOISY) d = sh.put_block(segment_num, block) def _done(res): self.log("put_block done", parent=lognum2, level=log.NOISY) return res d.addCallback(_done) d.addErrback(self._remove_shareholder, shareid, "segnum=%d" % segment_num) return d def _remove_shareholder(self, why, shareid, where): ln = self.log(format="error while sending %(method)s to shareholder=%(shnum)d", method=where, shnum=shareid, level=log.UNUSUAL, failure=why) if shareid in self.landlords: self.landlords[shareid].abort() peerid = self.landlords[shareid].get_peerid() assert peerid del self.landlords[shareid] self.servermap[shareid].remove(peerid) if not self.servermap[shareid]: del self.servermap[shareid] else: # even more UNUSUAL self.log("they weren't in our list of landlords", parent=ln, level=log.WEIRD, umid="TQGFRw") happiness = happinessutil.servers_of_happiness(self.servermap) if happiness < self.min_happiness: peerids = set(happinessutil.shares_by_server(self.servermap).keys()) msg = happinessutil.failure_message(len(peerids), self.required_shares, self.min_happiness, happiness) msg = "%s: %s" % (msg, why) raise UploadUnhappinessError(msg) self.log("but we can still continue with %s shares, we'll be happy " "with at least %s" % (happiness, self.min_happiness), parent=ln) def _gather_responses(self, dl): d = defer.DeferredList(dl, fireOnOneErrback=True) def _eatUploadUnhappinessError(f): # all exceptions that occur while talking to a peer are handled # in _remove_shareholder. That might raise UploadUnhappinessError, # which will cause the DeferredList to errback but which should # otherwise be consumed. Allow non-UploadUnhappinessError exceptions # to pass through as an unhandled errback. We use this in lieu of # consumeErrors=True to allow coding errors to be logged. f.trap(UploadUnhappinessError) return None for d0 in dl: d0.addErrback(_eatUploadUnhappinessError) return d def finish_hashing(self): self._start_hashing_and_close_timestamp = time.time() self.set_status("Finishing hashes") self.set_encode_and_push_progress(extra=0.0) crypttext_hash = self._crypttext_hasher.digest() self.uri_extension_data["crypttext_hash"] = crypttext_hash self._uploadable.close() def send_crypttext_hash_tree_to_all_shareholders(self): self.log("sending crypttext hash tree", level=log.NOISY) self.set_status("Sending Crypttext Hash Tree") self.set_encode_and_push_progress(extra=0.3) t = HashTree(self._crypttext_hashes) all_hashes = list(t) self.uri_extension_data["crypttext_root_hash"] = t[0] dl = [] for shareid in list(self.landlords): dl.append(self.send_crypttext_hash_tree(shareid, all_hashes)) return self._gather_responses(dl) def send_crypttext_hash_tree(self, shareid, all_hashes): if shareid not in self.landlords: return defer.succeed(None) sh = self.landlords[shareid] d = sh.put_crypttext_hashes(all_hashes) d.addErrback(self._remove_shareholder, shareid, "put_crypttext_hashes") return d def send_all_block_hash_trees(self): self.log("sending block hash trees", level=log.NOISY) self.set_status("Sending Subshare Hash Trees") self.set_encode_and_push_progress(extra=0.4) dl = [] for shareid,hashes in enumerate(self.block_hashes): # hashes is a list of the hashes of all blocks that were sent # to shareholder[shareid]. dl.append(self.send_one_block_hash_tree(shareid, hashes)) return self._gather_responses(dl) def send_one_block_hash_tree(self, shareid, block_hashes): t = HashTree(block_hashes) all_hashes = list(t) # all_hashes[0] is the root hash, == hash(ah[1]+ah[2]) # all_hashes[1] is the left child, == hash(ah[3]+ah[4]) # all_hashes[n] == hash(all_hashes[2*n+1] + all_hashes[2*n+2]) self.share_root_hashes[shareid] = t[0] if shareid not in self.landlords: return defer.succeed(None) sh = self.landlords[shareid] d = sh.put_block_hashes(all_hashes) d.addErrback(self._remove_shareholder, shareid, "put_block_hashes") return d def send_all_share_hash_trees(self): # Each bucket gets a set of share hash tree nodes that are needed to validate their # share. This includes the share hash itself, but does not include the top-level hash # root (which is stored securely in the URI instead). self.log("sending all share hash trees", level=log.NOISY) self.set_status("Sending Share Hash Trees") self.set_encode_and_push_progress(extra=0.6) dl = [] for h in self.share_root_hashes: assert h # create the share hash tree t = HashTree(self.share_root_hashes) # the root of this hash tree goes into our URI self.uri_extension_data['share_root_hash'] = t[0] # now send just the necessary pieces out to each shareholder for i in range(self.num_shares): # the HashTree is given a list of leaves: 0,1,2,3..n . # These become nodes A+0,A+1,A+2.. of the tree, where A=n-1 needed_hash_indices = t.needed_hashes(i, include_leaf=True) hashes = [(hi, t[hi]) for hi in needed_hash_indices] dl.append(self.send_one_share_hash_tree(i, hashes)) return self._gather_responses(dl) def send_one_share_hash_tree(self, shareid, needed_hashes): if shareid not in self.landlords: return defer.succeed(None) sh = self.landlords[shareid] d = sh.put_share_hashes(needed_hashes) d.addErrback(self._remove_shareholder, shareid, "put_share_hashes") return d def send_uri_extension_to_all_shareholders(self): lp = self.log("sending uri_extension", level=log.NOISY) self.set_status("Sending URI Extensions") self.set_encode_and_push_progress(extra=0.8) for k in ('crypttext_root_hash', 'crypttext_hash', ): assert k in self.uri_extension_data uri_extension = uri.pack_extension(self.uri_extension_data) ed = {} for k,v in self.uri_extension_data.items(): if k.endswith("hash"): ed[k] = base32.b2a(v) else: ed[k] = v self.log("uri_extension_data is %s" % (ed,), level=log.NOISY, parent=lp) self.uri_extension_hash = hashutil.uri_extension_hash(uri_extension) dl = [] for shareid in list(self.landlords): dl.append(self.send_uri_extension(shareid, uri_extension)) return self._gather_responses(dl) def send_uri_extension(self, shareid, uri_extension): sh = self.landlords[shareid] d = sh.put_uri_extension(uri_extension) d.addErrback(self._remove_shareholder, shareid, "put_uri_extension") return d def close_all_shareholders(self): self.log("closing shareholders", level=log.NOISY) self.set_status("Closing Shareholders") self.set_encode_and_push_progress(extra=0.9) dl = [] for shareid in list(self.landlords): d = self.landlords[shareid].close() d.addErrback(self._remove_shareholder, shareid, "close") dl.append(d) return self._gather_responses(dl) def done(self, res): self.log("upload done", level=log.OPERATIONAL) self.set_status("Finished") self.set_encode_and_push_progress(extra=1.0) # done now = time.time() h_and_c_elapsed = now - self._start_hashing_and_close_timestamp self._times["hashes_and_close"] = h_and_c_elapsed total_elapsed = now - self._start_total_timestamp self._times["total_encode_and_push"] = total_elapsed # update our sharemap self._shares_placed = set(self.landlords.keys()) return uri.CHKFileVerifierURI(self._storage_index, self.uri_extension_hash, self.required_shares, self.num_shares, self.file_size) def err(self, f): self.log("upload failed", failure=f, level=log.UNUSUAL) self.set_status("Failed") # we need to abort any remaining shareholders, so they'll delete the # partial share, allowing someone else to upload it again. self.log("aborting shareholders", level=log.UNUSUAL) for shareid in list(self.landlords): self.landlords[shareid].abort() if f.check(defer.FirstError): return f.value.subFailure return f def get_shares_placed(self): # return a set of share numbers that were successfully placed. return self._shares_placed def get_times(self): # return a dictionary of encode+push timings return self._times def get_uri_extension_data(self): return self.uri_extension_data def get_uri_extension_hash(self): return self.uri_extension_hash def get_uri_extension_size(self): """ Calculate the size of the URI extension that gets written at the end of immutables. This may be done earlier than actual encoding, so e.g. we might not know the crypttext hashes, but that's fine for our purposes since we only care about the length. """ params = self.uri_extension_data.copy() params["crypttext_hash"] = b"\x00" * hashutil.CRYPTO_VAL_SIZE params["crypttext_root_hash"] = b"\x00" * hashutil.CRYPTO_VAL_SIZE params["share_root_hash"] = b"\x00" * hashutil.CRYPTO_VAL_SIZE assert params.keys() == { "codec_name", "codec_params", "size", "segment_size", "num_segments", "needed_shares", "total_shares", "tail_codec_params", "crypttext_hash", "crypttext_root_hash", "share_root_hash" }, params.keys() uri_extension = uri.pack_extension(params) return len(uri_extension) tahoe_lafs-1.20.0/src/allmydata/immutable/filenode.py0000644000000000000000000003236513615410400017505 0ustar00""" Ported to Python 3. """ from functools import reduce import binascii from time import time as now from zope.interface import implementer from twisted.internet import defer from allmydata import uri from twisted.internet.interfaces import IConsumer from allmydata.crypto import aes from allmydata.interfaces import IImmutableFileNode, IUploadResults from allmydata.util import consumer from allmydata.check_results import CheckResults, CheckAndRepairResults from allmydata.util.dictutil import DictOfSets from allmydata.util.happinessutil import servers_of_happiness # local imports from allmydata.immutable.checker import Checker from allmydata.immutable.repairer import Repairer from allmydata.immutable.downloader.node import DownloadNode, \ IDownloadStatusHandlingConsumer from allmydata.immutable.downloader.status import DownloadStatus class CiphertextFileNode(object): def __init__(self, verifycap, storage_broker, secret_holder, terminator, history): assert isinstance(verifycap, uri.CHKFileVerifierURI) self._verifycap = verifycap self._storage_broker = storage_broker self._secret_holder = secret_holder self._terminator = terminator self._history = history self._download_status = None self._node = None # created lazily, on read() def _maybe_create_download_node(self): if not self._download_status: ds = DownloadStatus(self._verifycap.storage_index, self._verifycap.size) if self._history: self._history.add_download(ds) self._download_status = ds if self._node is None: self._node = DownloadNode(self._verifycap, self._storage_broker, self._secret_holder, self._terminator, self._history, self._download_status) def read(self, consumer, offset=0, size=None): """I am the main entry point, from which FileNode.read() can get data. I feed the consumer with the desired range of ciphertext. I return a Deferred that fires (with the consumer) when the read is finished.""" self._maybe_create_download_node() return self._node.read(consumer, offset, size) def get_segment(self, segnum): """Begin downloading a segment. I return a tuple (d, c): 'd' is a Deferred that fires with (offset,data) when the desired segment is available, and c is an object on which c.cancel() can be called to disavow interest in the segment (after which 'd' will never fire). You probably need to know the segment size before calling this, unless you want the first few bytes of the file. If you ask for a segment number which turns out to be too large, the Deferred will errback with BadSegmentNumberError. The Deferred fires with the offset of the first byte of the data segment, so that you can call get_segment() before knowing the segment size, and still know which data you received. """ self._maybe_create_download_node() return self._node.get_segment(segnum) def get_segment_size(self): # return a Deferred that fires with the file's real segment size self._maybe_create_download_node() return self._node.get_segsize() def get_storage_index(self): return self._verifycap.storage_index def get_verify_cap(self): return self._verifycap def get_size(self): return self._verifycap.size def raise_error(self): pass def is_mutable(self): return False def check_and_repair(self, monitor, verify=False, add_lease=False): c = Checker(verifycap=self._verifycap, servers=self._storage_broker.get_connected_servers(), verify=verify, add_lease=add_lease, secret_holder=self._secret_holder, monitor=monitor) d = c.start() d.addCallback(self._maybe_repair, monitor) return d def _maybe_repair(self, cr, monitor): crr = CheckAndRepairResults(self._verifycap.storage_index) crr.pre_repair_results = cr if cr.is_healthy(): crr.post_repair_results = cr return defer.succeed(crr) crr.repair_attempted = True crr.repair_successful = False # until proven successful def _repair_error(f): # as with mutable repair, I'm not sure if I want to pass # through a failure or not. TODO crr.repair_successful = False crr.repair_failure = f return f r = Repairer(self, storage_broker=self._storage_broker, secret_holder=self._secret_holder, monitor=monitor) d = r.start() d.addCallbacks(self._gather_repair_results, _repair_error, callbackArgs=(cr, crr,)) return d def _gather_repair_results(self, ur, cr, crr): assert IUploadResults.providedBy(ur), ur # clone the cr (check results) to form the basis of the # prr (post-repair results) verifycap = self._verifycap servers_responding = set(cr.get_servers_responding()) sm = DictOfSets() assert isinstance(cr.get_sharemap(), DictOfSets) for shnum, servers in cr.get_sharemap().items(): for server in servers: sm.add(shnum, server) for shnum, servers in ur.get_sharemap().items(): for server in servers: sm.add(shnum, server) servers_responding.add(server) good_hosts = len(reduce(set.union, sm.values(), set())) is_healthy = bool(len(sm) >= verifycap.total_shares) is_recoverable = bool(len(sm) >= verifycap.needed_shares) count_happiness = servers_of_happiness(sm) prr = CheckResults(cr.get_uri(), cr.get_storage_index(), healthy=is_healthy, recoverable=is_recoverable, count_happiness=count_happiness, count_shares_needed=verifycap.needed_shares, count_shares_expected=verifycap.total_shares, count_shares_good=len(sm), count_good_share_hosts=good_hosts, count_recoverable_versions=int(is_recoverable), count_unrecoverable_versions=int(not is_recoverable), servers_responding=list(servers_responding), sharemap=sm, count_wrong_shares=0, # no such thing as wrong, for immutable list_corrupt_shares=cr.get_corrupt_shares(), count_corrupt_shares=len(cr.get_corrupt_shares()), list_incompatible_shares=cr.get_incompatible_shares(), count_incompatible_shares=len(cr.get_incompatible_shares()), summary="", report=[], share_problems=[], servermap=None) crr.repair_successful = is_healthy crr.post_repair_results = prr return crr def check(self, monitor, verify=False, add_lease=False): verifycap = self._verifycap sb = self._storage_broker servers = sb.get_connected_servers() sh = self._secret_holder v = Checker(verifycap=verifycap, servers=servers, verify=verify, add_lease=add_lease, secret_holder=sh, monitor=monitor) return v.start() @implementer(IConsumer, IDownloadStatusHandlingConsumer) class DecryptingConsumer(object): """I sit between a CiphertextDownloader (which acts as a Producer) and the real Consumer, decrypting everything that passes by. The real Consumer sees the real Producer, but the Producer sees us instead of the real consumer.""" def __init__(self, consumer, readkey, offset): self._consumer = consumer self._read_ev = None self._download_status = None # TODO: pycryptopp CTR-mode needs random-access operations: I want # either a=AES(readkey, offset) or better yet both of: # a=AES(readkey, offset=0) # a.process(ciphertext, offset=xyz) # For now, we fake it with the existing iv= argument. offset_big = offset // 16 offset_small = offset % 16 iv = binascii.unhexlify("%032x" % offset_big) self._decryptor = aes.create_decryptor(readkey, iv) # this is just to advance the counter aes.decrypt_data(self._decryptor, b"\x00" * offset_small) def set_download_status_read_event(self, read_ev): self._read_ev = read_ev def set_download_status(self, ds): self._download_status = ds def registerProducer(self, producer, streaming): # this passes through, so the real consumer can flow-control the real # producer. Therefore we don't need to provide any IPushProducer # methods. We implement all the IConsumer methods as pass-throughs, # and only intercept write() to perform decryption. self._consumer.registerProducer(producer, streaming) def unregisterProducer(self): self._consumer.unregisterProducer() def write(self, ciphertext): started = now() plaintext = aes.decrypt_data(self._decryptor, ciphertext) if self._read_ev: elapsed = now() - started self._read_ev.update(0, elapsed, 0) if self._download_status: self._download_status.add_misc_event("AES", started, now()) self._consumer.write(plaintext) @implementer(IImmutableFileNode) class ImmutableFileNode(object): # I wrap a CiphertextFileNode with a decryption key def __init__(self, filecap, storage_broker, secret_holder, terminator, history): assert isinstance(filecap, uri.CHKFileURI) verifycap = filecap.get_verify_cap() self._cnode = CiphertextFileNode(verifycap, storage_broker, secret_holder, terminator, history) assert isinstance(filecap, uri.CHKFileURI) self.u = filecap self._readkey = filecap.key # TODO: I'm not sure about this.. what's the use case for node==node? If # we keep it here, we should also put this on CiphertextFileNode def __hash__(self): return self.u.__hash__() def __eq__(self, other): if isinstance(other, ImmutableFileNode): return self.u.__eq__(other.u) else: return False def __ne__(self, other): if isinstance(other, ImmutableFileNode): return self.u.__eq__(other.u) else: return True def read(self, consumer, offset=0, size=None): decryptor = DecryptingConsumer(consumer, self._readkey, offset) d = self._cnode.read(decryptor, offset, size) d.addCallback(lambda dc: consumer) return d def raise_error(self): pass def get_write_uri(self): return None def get_readonly_uri(self): return self.get_uri() def get_uri(self): return self.u.to_string() def get_cap(self): return self.u def get_readcap(self): return self.u.get_readonly() def get_verify_cap(self): return self.u.get_verify_cap() def get_repair_cap(self): # CHK files can be repaired with just the verifycap return self.u.get_verify_cap() def get_storage_index(self): return self.u.get_storage_index() def get_size(self): return self.u.get_size() def get_current_size(self): return defer.succeed(self.get_size()) def is_mutable(self): return False def is_readonly(self): return True def is_unknown(self): return False def is_allowed_in_immutable_directory(self): return True def check_and_repair(self, monitor, verify=False, add_lease=False): return self._cnode.check_and_repair(monitor, verify, add_lease) def check(self, monitor, verify=False, add_lease=False): return self._cnode.check(monitor, verify, add_lease) def get_best_readable_version(self): """ Return an IReadable of the best version of this file. Since immutable files can have only one version, we just return the current filenode. """ return defer.succeed(self) def download_best_version(self): """ Download the best version of this file, returning its contents as a bytestring. Since there is only one version of an immutable file, we download and return the contents of this file. """ d = consumer.download_to_data(self) return d # for an immutable file, download_to_data (specified in IReadable) # is the same as download_best_version (specified in IFileNode). For # mutable files, the difference is more meaningful, since they can # have multiple versions. download_to_data = download_best_version # get_size() (IReadable), get_current_size() (IFilesystemNode), and # get_size_of_best_version(IFileNode) are all the same for immutable # files. get_size_of_best_version = get_current_size tahoe_lafs-1.20.0/src/allmydata/immutable/happiness_upload.py0000644000000000000000000003631713615410400021257 0ustar00""" Algorithms for figuring out happiness, the number of unique nodes the data is on. Ported to Python 3. """ from queue import PriorityQueue def augmenting_path_for(graph): """ I return an augmenting path, if there is one, from the source node to the sink node in the flow network represented by my graph argument. If there is no augmenting path, I return False. I assume that the source node is at index 0 of graph, and the sink node is at the last index. I also assume that graph is a flow network in adjacency list form. """ bfs_tree = bfs(graph, 0) if bfs_tree[len(graph) - 1]: n = len(graph) - 1 path = [] # [(u, v)], where u and v are vertices in the graph while n != 0: path.insert(0, (bfs_tree[n], n)) n = bfs_tree[n] return path return False def bfs(graph, s): """ Perform a BFS on graph starting at s, where graph is a graph in adjacency list form, and s is a node in graph. I return the predecessor table that the BFS generates. """ # This is an adaptation of the BFS described in "Introduction to # Algorithms", Cormen et al, 2nd ed., p. 532. # WHITE vertices are those that we haven't seen or explored yet. WHITE = 0 # GRAY vertices are those we have seen, but haven't explored yet GRAY = 1 # BLACK vertices are those we have seen and explored BLACK = 2 color = [WHITE for i in range(len(graph))] predecessor = [None for i in range(len(graph))] distance = [-1 for i in range(len(graph))] queue = [s] # vertices that we haven't explored yet. color[s] = GRAY distance[s] = 0 while queue: n = queue.pop(0) for v in graph[n]: if color[v] == WHITE: color[v] = GRAY distance[v] = distance[n] + 1 predecessor[v] = n queue.append(v) color[n] = BLACK return predecessor def residual_network(graph, f): """ I return the residual network and residual capacity function of the flow network represented by my graph and f arguments. graph is a flow network in adjacency-list form, and f is a flow in graph. """ new_graph = [[] for i in range(len(graph))] cf = [[0 for s in range(len(graph))] for sh in range(len(graph))] for i in range(len(graph)): for v in graph[i]: if f[i][v] == 1: # We add an edge (v, i) with cf[v,i] = 1. This means # that we can remove 1 unit of flow from the edge (i, v) new_graph[v].append(i) cf[v][i] = 1 cf[i][v] = -1 else: # We add the edge (i, v), since we're not using it right # now. new_graph[i].append(v) cf[i][v] = 1 cf[v][i] = -1 return (new_graph, cf) def calculate_happiness(mappings): """ :param mappings: a dict mapping 'share' -> 'peer' :returns: the happiness, which is the number of unique peers we've placed shares on. """ unique_peers = set(mappings.values()) assert None not in unique_peers return len(unique_peers) def _calculate_mappings(peers, shares, servermap=None): """ Given a set of peers, a set of shares, and a dictionary of server -> set(shares), determine how the uploader should allocate shares. If a servermap is supplied, determine which existing allocations should be preserved. If servermap is None, calculate the maximum matching of the bipartite graph (U, V, E) such that: U = peers V = shares E = peers x shares Returns a dictionary {share -> set(peer)}, indicating that the share should be placed on each peer in the set. If a share's corresponding value is None, the share can be placed on any server. Note that the set of peers should only be one peer when returned, but it is possible to duplicate shares by adding additional servers to the set. """ peer_to_index, index_to_peer = _reindex(peers, 1) share_to_index, index_to_share = _reindex(shares, len(peers) + 1) shareIndices = [share_to_index[s] for s in shares] if servermap: graph = _servermap_flow_graph(peers, shares, servermap) else: peerIndices = [peer_to_index[peer] for peer in peers] graph = _flow_network(peerIndices, shareIndices) max_graph = _compute_maximum_graph(graph, shareIndices) return _convert_mappings(index_to_peer, index_to_share, max_graph) def _compute_maximum_graph(graph, shareIndices): """ This is an implementation of the Ford-Fulkerson method for finding a maximum flow in a flow network applied to a bipartite graph. Specifically, it is the Edmonds-Karp algorithm, since it uses a BFS to find the shortest augmenting path at each iteration, if one exists. The implementation here is an adapation of an algorithm described in "Introduction to Algorithms", Cormen et al, 2nd ed., pp 658-662. """ if graph == []: return {} dim = len(graph) flow_function = [[0 for sh in range(dim)] for s in range(dim)] residual_graph, residual_function = residual_network(graph, flow_function) while augmenting_path_for(residual_graph): path = augmenting_path_for(residual_graph) # Delta is the largest amount that we can increase flow across # all of the edges in path. Because of the way that the residual # function is constructed, f[u][v] for a particular edge (u, v) # is the amount of unused capacity on that edge. Taking the # minimum of a list of those values for each edge in the # augmenting path gives us our delta. delta = min(residual_function[u][v] for (u, v) in path) for (u, v) in path: flow_function[u][v] += delta flow_function[v][u] -= delta residual_graph, residual_function = residual_network(graph,flow_function) new_mappings = {} for shareIndex in shareIndices: peer = residual_graph[shareIndex] if peer == [dim - 1]: new_mappings.setdefault(shareIndex, None) else: new_mappings.setdefault(shareIndex, peer[0]) return new_mappings def _extract_ids(mappings): shares = set() peers = set() for share in mappings: if mappings[share] == None: pass else: shares.add(share) for item in mappings[share]: peers.add(item) return (peers, shares) def _distribute_homeless_shares(mappings, homeless_shares, peers_to_shares): """ Shares which are not mapped to a peer in the maximum spanning graph still need to be placed on a server. This function attempts to distribute those homeless shares as evenly as possible over the available peers. If possible a share will be placed on the server it was originally on, signifying the lease should be renewed instead. """ #print("mappings, homeless_shares, peers_to_shares %s %s %s" % (mappings, homeless_shares, peers_to_shares)) servermap_peerids = set([key for key in peers_to_shares]) servermap_shareids = set() for key in sorted(peers_to_shares.keys()): # XXX maybe sort? for share in peers_to_shares[key]: servermap_shareids.add(share) # First check to see if the leases can be renewed. to_distribute = set() for share in homeless_shares: if share in servermap_shareids: for peerid in peers_to_shares: if share in peers_to_shares[peerid]: mappings[share] = set([peerid]) break else: to_distribute.add(share) # This builds a priority queue of peers with the number of shares # each peer holds as the priority. priority = {} pQueue = PriorityQueue() for peerid in servermap_peerids: priority.setdefault(peerid, 0) for share in mappings: if mappings[share] is not None: for peer in mappings[share]: if peer in servermap_peerids: priority[peer] += 1 if priority == {}: return for peerid in priority: pQueue.put((priority[peerid], peerid)) # Distribute the shares to peers with the lowest priority. for share in to_distribute: peer = pQueue.get() mappings[share] = set([peer[1]]) pQueue.put((peer[0]+1, peer[1])) def _convert_mappings(index_to_peer, index_to_share, maximum_graph): """ Now that a maximum spanning graph has been found, convert the indexes back to their original ids so that the client can pass them to the uploader. """ converted_mappings = {} for share in maximum_graph: peer = maximum_graph[share] if peer == None: converted_mappings.setdefault(index_to_share[share], None) else: converted_mappings.setdefault(index_to_share[share], set([index_to_peer[peer]])) return converted_mappings def _servermap_flow_graph(peers, shares, servermap): """ Generates a flow network of peerIndices to shareIndices from a server map of 'peer' -> ['shares']. According to Wikipedia, "a flow network is a directed graph where each edge has a capacity and each edge receives a flow. The amount of flow on an edge cannot exceed the capacity of the edge." This is necessary because in order to find the maximum spanning, the Edmonds-Karp algorithm converts the problem into a maximum flow problem. """ if servermap == {}: return [] peer_to_index, index_to_peer = _reindex(peers, 1) share_to_index, index_to_share = _reindex(shares, len(peers) + 1) graph = [] indexedShares = [] sink_num = len(peers) + len(shares) + 1 graph.append([peer_to_index[peer] for peer in peers]) #print("share_to_index %s" % share_to_index) #print("servermap %s" % servermap) for peer in peers: if peer in servermap: for s in servermap[peer]: if s in share_to_index: indexedShares.append(share_to_index[s]) graph.insert(peer_to_index[peer], indexedShares) for share in shares: graph.insert(share_to_index[share], [sink_num]) graph.append([]) return graph def _reindex(items, base): """ I take an iteratble of items and give each item an index to be used in the construction of a flow network. Indices for these items start at base and continue to base + len(items) - 1. I return two dictionaries: ({item: index}, {index: item}) """ item_to_index = {} index_to_item = {} for item in items: item_to_index.setdefault(item, base) index_to_item.setdefault(base, item) base += 1 return (item_to_index, index_to_item) def _flow_network(peerIndices, shareIndices): """ Given set of peerIndices and a set of shareIndices, I create a flow network to be used by _compute_maximum_graph. The return value is a two dimensional list in the form of a flow network, where each index represents a node, and the corresponding list represents all of the nodes it is connected to. This function is similar to allmydata.util.happinessutil.flow_network_for, but we connect every peer with all shares instead of reflecting a supplied servermap. """ graph = [] # The first entry in our flow network is the source. # Connect the source to every server. graph.append(peerIndices) sink_num = len(peerIndices + shareIndices) + 1 # Connect every server with every share it can possibly store. for peerIndex in peerIndices: graph.insert(peerIndex, shareIndices) # Connect every share with the sink. for shareIndex in shareIndices: graph.insert(shareIndex, [sink_num]) # Add an empty entry for the sink. graph.append([]) return graph def share_placement(peers, readonly_peers, shares, peers_to_shares): """ Generates the allocations the upload should based on the given information. We construct a dictionary of 'share_num' -> 'server_id' and return it to the caller. Existing allocations appear as placements because attempting to place an existing allocation will renew the share. For more information on the algorithm this class implements, refer to docs/specifications/servers-of-happiness.rst """ if not peers: return dict() # First calculate share placement for the readonly servers. readonly_shares = set() readonly_map = {} for peer in sorted(peers_to_shares.keys()): if peer in readonly_peers: readonly_map.setdefault(peer, peers_to_shares[peer]) for share in peers_to_shares[peer]: readonly_shares.add(share) readonly_mappings = _calculate_mappings(readonly_peers, readonly_shares, readonly_map) used_peers, used_shares = _extract_ids(readonly_mappings) # Calculate share placement for the remaining existing allocations new_peers = set(peers) - used_peers # Squash a list of sets into one set new_shares = shares - used_shares servermap = peers_to_shares.copy() for peer in sorted(peers_to_shares.keys()): if peer in used_peers: servermap.pop(peer, None) else: servermap[peer] = set(servermap[peer]) - used_shares if servermap[peer] == set(): servermap.pop(peer, None) # allmydata.test.test_upload.EncodingParameters.test_exception_messages_during_server_selection # allmydata.test.test_upload.EncodingParameters.test_problem_layout_comment_52 # both ^^ trigger a "keyerror" here .. just ignoring is right? (fixes the tests, but ...) try: new_peers.remove(peer) except KeyError: pass existing_mappings = _calculate_mappings(new_peers, new_shares, servermap) existing_peers, existing_shares = _extract_ids(existing_mappings) # Calculate share placement for the remaining peers and shares which # won't be preserved by existing allocations. new_peers = new_peers - existing_peers - used_peers new_shares = new_shares - existing_shares - used_shares new_mappings = _calculate_mappings(new_peers, new_shares) #print("new_peers %s" % new_peers) #print("new_mappings %s" % new_mappings) mappings = dict(list(readonly_mappings.items()) + list(existing_mappings.items()) + list(new_mappings.items())) homeless_shares = set() for share in mappings: if mappings[share] is None: homeless_shares.add(share) if len(homeless_shares) != 0: # 'servermap' should contain only read/write peers _distribute_homeless_shares( mappings, homeless_shares, { k: v for k, v in list(peers_to_shares.items()) if k not in readonly_peers } ) # now, if any share is *still* mapped to None that means "don't # care which server it goes on", so we place it on a round-robin # of read-write servers def round_robin(peers): while True: for peer in peers: yield peer peer_iter = round_robin(peers - readonly_peers) return { k: v.pop() if v else next(peer_iter) for k, v in list(mappings.items()) } tahoe_lafs-1.20.0/src/allmydata/immutable/layout.py0000644000000000000000000005151113615410400017227 0ustar00""" Ported to Python 3. """ from __future__ import annotations import struct from io import BytesIO from attrs import define, field from zope.interface import implementer from twisted.internet import defer from allmydata.interfaces import IStorageBucketWriter, IStorageBucketReader, \ FileTooLargeError, HASH_SIZE from allmydata.util import mathutil, observer, log from allmydata.util.assertutil import precondition from allmydata.storage.server import si_b2a class LayoutInvalid(Exception): """ There is something wrong with these bytes so they can't be interpreted as the kind of immutable file that I know how to download.""" pass class RidiculouslyLargeURIExtensionBlock(LayoutInvalid): """ When downloading a file, the length of the URI Extension Block was given as >= 2**32. This means the share data must have been corrupted, or else the original uploader of the file wrote a ridiculous value into the URI Extension Block length.""" pass class ShareVersionIncompatible(LayoutInvalid): """ When downloading a share, its format was not one of the formats we know how to parse.""" pass """ Share data is written in a file. At the start of the file, there is a series of four-byte big-endian offset values, which indicate where each section starts. Each offset is measured from the beginning of the share data. 0x00: version number (=00 00 00 01) 0x04: block size # See Footnote 1 below. 0x08: share data size # See Footnote 1 below. 0x0c: offset of data (=00 00 00 24) 0x10: offset of plaintext_hash_tree UNUSED 0x14: offset of crypttext_hash_tree 0x18: offset of block_hashes 0x1c: offset of share_hashes 0x20: offset of uri_extension_length + uri_extension 0x24: start of data ? : start of plaintext_hash_tree UNUSED ? : start of crypttext_hash_tree ? : start of block_hashes ? : start of share_hashes each share_hash is written as a two-byte (big-endian) hashnum followed by the 32-byte SHA-256 hash. We store only the hashes necessary to validate the share hash root ? : start of uri_extension_length (four-byte big-endian value) ? : start of uri_extension """ """ v2 shares: these use 8-byte offsets to remove two of the three ~12GiB size limitations described in #346. 0x00: version number (=00 00 00 02) 0x04: block size # See Footnote 1 below. 0x0c: share data size # See Footnote 1 below. 0x14: offset of data (=00 00 00 00 00 00 00 44) 0x1c: offset of plaintext_hash_tree UNUSED 0x24: offset of crypttext_hash_tree 0x2c: offset of block_hashes 0x34: offset of share_hashes 0x3c: offset of uri_extension_length + uri_extension 0x44: start of data : rest of share is the same as v1, above ... ... ? : start of uri_extension_length (eight-byte big-endian value) ? : start of uri_extension """ # Footnote 1: as of Tahoe v1.3.0 these fields are not used when reading, but # they are still provided when writing so that older versions of Tahoe can # read them. FORCE_V2 = False # set briefly by unit tests to make small-sized V2 shares def make_write_bucket_proxy(rref, server, data_size, block_size, num_segments, num_share_hashes, uri_extension_size): # Use layout v1 for small files, so they'll be readable by older versions # ( bool: """ Queue a write. If the result is ``False``, no further action is needed for now. If the result is some ``True``, it's time to call ``flush()`` and do a real write. """ self._to_write.write(data) return self.get_queued_bytes() >= self._batch_size def flush(self) -> tuple[int, bytes]: """Return offset and data to be written.""" offset = self._written_bytes data = self._to_write.getvalue() self._written_bytes += len(data) self._to_write = BytesIO() return (offset, data) def get_queued_bytes(self) -> int: """Return number of queued, unwritten bytes.""" return self._to_write.tell() def get_total_bytes(self) -> int: """Return how many bytes were written or queued in total.""" return self._written_bytes + self.get_queued_bytes() @implementer(IStorageBucketWriter) class WriteBucketProxy(object): """ Note: The various ``put_`` methods need to be called in the order in which the bytes will get written. """ fieldsize = 4 fieldstruct = ">L" def __init__(self, rref, server, data_size, block_size, num_segments, num_share_hashes, uri_extension_size, batch_size=1_000_000): self._rref = rref self._server = server self._data_size = data_size self._block_size = block_size self._num_segments = num_segments effective_segments = mathutil.next_power_of_k(num_segments,2) self._segment_hash_size = (2*effective_segments - 1) * HASH_SIZE # how many share hashes are included in each share? This will be # about ln2(num_shares). self._share_hashtree_size = num_share_hashes * (2+HASH_SIZE) self._uri_extension_size = uri_extension_size self._create_offsets(block_size, data_size) # With a ~1MB batch size, max upload speed is 1MB/(round-trip latency) # assuming the writing code waits for writes to finish, so 20MB/sec if # latency is 50ms. In the US many people only have 1MB/sec upload speed # as of 2022 (standard Comcast). For further discussion of how one # might set batch sizes see # https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3787#comment:1. self._write_buffer = _WriteBuffer(batch_size) def get_allocated_size(self): return (self._offsets['uri_extension'] + self.fieldsize + self._uri_extension_size) def _create_offsets(self, block_size, data_size): if block_size >= 2**32 or data_size >= 2**32: raise FileTooLargeError("This file is too large to be uploaded (data_size).") offsets = self._offsets = {} x = 0x24 offsets['data'] = x x += data_size offsets['plaintext_hash_tree'] = x # UNUSED x += self._segment_hash_size offsets['crypttext_hash_tree'] = x x += self._segment_hash_size offsets['block_hashes'] = x x += self._segment_hash_size offsets['share_hashes'] = x x += self._share_hashtree_size offsets['uri_extension'] = x if x >= 2**32: raise FileTooLargeError("This file is too large to be uploaded (offsets).") offset_data = struct.pack(">LLLLLLLLL", 1, # version number block_size, data_size, offsets['data'], offsets['plaintext_hash_tree'], # UNUSED offsets['crypttext_hash_tree'], offsets['block_hashes'], offsets['share_hashes'], offsets['uri_extension'], ) assert len(offset_data) == 0x24 self._offset_data = offset_data def __repr__(self): return "" % self._server.get_name() def put_header(self): return self._queue_write(0, self._offset_data) def put_block(self, segmentnum, data): offset = self._offsets['data'] + segmentnum * self._block_size assert offset + len(data) <= self._offsets['uri_extension'] assert isinstance(data, bytes) if segmentnum < self._num_segments-1: precondition(len(data) == self._block_size, len(data), self._block_size) else: precondition(len(data) == (self._data_size - (self._block_size * (self._num_segments - 1))), len(data), self._block_size) return self._queue_write(offset, data) def put_crypttext_hashes(self, hashes): # plaintext_hash_tree precedes crypttext_hash_tree. It is not used, and # so is not explicitly written, but we need to write everything, so # fill it in with nulls. d = self._queue_write(self._offsets['plaintext_hash_tree'], b"\x00" * self._segment_hash_size) d.addCallback(lambda _: self._really_put_crypttext_hashes(hashes)) return d def _really_put_crypttext_hashes(self, hashes): offset = self._offsets['crypttext_hash_tree'] assert isinstance(hashes, list) data = b"".join(hashes) precondition(len(data) == self._segment_hash_size, len(data), self._segment_hash_size) precondition(offset + len(data) <= self._offsets['block_hashes'], offset, len(data), offset+len(data), self._offsets['block_hashes']) return self._queue_write(offset, data) def put_block_hashes(self, blockhashes): offset = self._offsets['block_hashes'] assert isinstance(blockhashes, list) data = b"".join(blockhashes) precondition(len(data) == self._segment_hash_size, len(data), self._segment_hash_size) precondition(offset + len(data) <= self._offsets['share_hashes'], offset, len(data), offset+len(data), self._offsets['share_hashes']) return self._queue_write(offset, data) def put_share_hashes(self, sharehashes): # sharehashes is a list of (index, hash) tuples, so they get stored # as 2+32=34 bytes each offset = self._offsets['share_hashes'] assert isinstance(sharehashes, list) data = b"".join([struct.pack(">H", hashnum) + hashvalue for hashnum,hashvalue in sharehashes]) precondition(len(data) == self._share_hashtree_size, len(data), self._share_hashtree_size) precondition(offset + len(data) <= self._offsets['uri_extension'], offset, len(data), offset+len(data), self._offsets['uri_extension']) return self._queue_write(offset, data) def put_uri_extension(self, data): offset = self._offsets['uri_extension'] assert isinstance(data, bytes) precondition(len(data) == self._uri_extension_size) length = struct.pack(self.fieldstruct, len(data)) return self._queue_write(offset, length+data) def _queue_write(self, offset, data): """ This queues up small writes to be written in a single batched larger write. Callers of this function are expected to queue the data in order, with no holes. As such, the offset is technically unnecessary, but is used to check the inputs. Possibly we should get rid of it. """ assert offset == self._write_buffer.get_total_bytes() if self._write_buffer.queue_write(data): return self._actually_write() else: return defer.succeed(False) def _actually_write(self): """Write data to the server.""" offset, data = self._write_buffer.flush() return self._rref.callRemote("write", offset, data) def close(self): assert self._write_buffer.get_total_bytes() == self.get_allocated_size(), ( f"{self._written_buffer.get_total_bytes_queued()} != {self.get_allocated_size()}" ) if self._write_buffer.get_queued_bytes() > 0: d = self._actually_write() else: # No data queued, don't send empty string write. d = defer.succeed(True) d.addCallback(lambda _: self._rref.callRemote("close")) return d def abort(self): return self._rref.callRemote("abort").addErrback(log.err, "Error from remote call to abort an immutable write bucket") def get_servername(self): return self._server.get_name() def get_peerid(self): return self._server.get_serverid() class WriteBucketProxy_v2(WriteBucketProxy): fieldsize = 8 fieldstruct = ">Q" def _create_offsets(self, block_size, data_size): if block_size >= 2**64 or data_size >= 2**64: raise FileTooLargeError("This file is too large to be uploaded (data_size).") offsets = self._offsets = {} x = 0x44 offsets['data'] = x x += data_size offsets['plaintext_hash_tree'] = x # UNUSED x += self._segment_hash_size offsets['crypttext_hash_tree'] = x x += self._segment_hash_size offsets['block_hashes'] = x x += self._segment_hash_size offsets['share_hashes'] = x x += self._share_hashtree_size offsets['uri_extension'] = x if x >= 2**64: raise FileTooLargeError("This file is too large to be uploaded (offsets).") offset_data = struct.pack(">LQQQQQQQQ", 2, # version number block_size, data_size, offsets['data'], offsets['plaintext_hash_tree'], # UNUSED offsets['crypttext_hash_tree'], offsets['block_hashes'], offsets['share_hashes'], offsets['uri_extension'], ) assert len(offset_data) == 0x44, len(offset_data) self._offset_data = offset_data @implementer(IStorageBucketReader) class ReadBucketProxy(object): def __init__(self, rref, server, storage_index): self._rref = rref self._server = server self._storage_index = storage_index self._started = False # sent request to server self._ready = observer.OneShotObserverList() # got response from server def get_peerid(self): return self._server.get_serverid() def __repr__(self): return "" % \ (id(self), self._server.get_name(), si_b2a(self._storage_index)) def _start_if_needed(self): """ Returns a deferred that will be fired when I'm ready to return data, or errbacks if the starting (header reading and parsing) process fails.""" if not self._started: self._start() return self._ready.when_fired() def _start(self): self._started = True # TODO: for small shares, read the whole bucket in _start() d = self._fetch_header() d.addCallback(self._parse_offsets) def _fail_waiters(f): self._ready.fire(f) def _notify_waiters(result): self._ready.fire(result) d.addCallbacks(_notify_waiters, _fail_waiters) return d def _fetch_header(self): return self._read(0, 0x44) def _parse_offsets(self, data): precondition(len(data) >= 0x4) self._offsets = {} (version,) = struct.unpack(">L", data[0:4]) if version != 1 and version != 2: raise ShareVersionIncompatible(version) if version == 1: precondition(len(data) >= 0x24) x = 0x0c fieldsize = 0x4 fieldstruct = ">L" else: precondition(len(data) >= 0x44) x = 0x14 fieldsize = 0x8 fieldstruct = ">Q" self._version = version self._fieldsize = fieldsize self._fieldstruct = fieldstruct for field_name in ( 'data', 'plaintext_hash_tree', # UNUSED 'crypttext_hash_tree', 'block_hashes', 'share_hashes', 'uri_extension', ): offset = struct.unpack(fieldstruct, data[x:x+fieldsize])[0] x += fieldsize self._offsets[field_name] = offset return self._offsets def _get_block_data(self, unused, blocknum, blocksize, thisblocksize): offset = self._offsets['data'] + blocknum * blocksize return self._read(offset, thisblocksize) def get_block_data(self, blocknum, blocksize, thisblocksize): d = self._start_if_needed() d.addCallback(self._get_block_data, blocknum, blocksize, thisblocksize) return d def _str2l(self, s): """ split string (pulled from storage) into a list of blockids """ return [ s[i:i+HASH_SIZE] for i in range(0, len(s), HASH_SIZE) ] def _get_crypttext_hashes(self, unused=None): offset = self._offsets['crypttext_hash_tree'] size = self._offsets['block_hashes'] - offset d = self._read(offset, size) d.addCallback(self._str2l) return d def get_crypttext_hashes(self): d = self._start_if_needed() d.addCallback(self._get_crypttext_hashes) return d def _get_block_hashes(self, unused=None, at_least_these=()): # TODO: fetch only at_least_these instead of all of them. offset = self._offsets['block_hashes'] size = self._offsets['share_hashes'] - offset d = self._read(offset, size) d.addCallback(self._str2l) return d def get_block_hashes(self, at_least_these=()): if at_least_these: d = self._start_if_needed() d.addCallback(self._get_block_hashes, at_least_these) return d else: return defer.succeed([]) def get_share_hashes(self): d = self._start_if_needed() d.addCallback(self._get_share_hashes) return d def _get_share_hashes(self, _ignore): """ Tahoe storage servers < v1.3.0 would return an error if you tried to read past the end of the share, so we need to use the offset and read just that much. HTTP-based storage protocol also doesn't like reading past the end. """ offset = self._offsets['share_hashes'] size = self._offsets['uri_extension'] - offset if size % (2+HASH_SIZE) != 0: raise LayoutInvalid("share hash tree corrupted -- should occupy a multiple of %d bytes, not %d bytes" % ((2+HASH_SIZE), size)) d = self._read(offset, size) def _unpack_share_hashes(data): if len(data) != size: raise LayoutInvalid("share hash tree corrupted -- got a short read of the share data -- should have gotten %d, not %d bytes" % (size, len(data))) hashes = [] for i in range(0, size, 2+HASH_SIZE): hashnum = struct.unpack(">H", data[i:i+2])[0] hashvalue = data[i+2:i+2+HASH_SIZE] hashes.append( (hashnum, hashvalue) ) return hashes d.addCallback(_unpack_share_hashes) return d def _get_uri_extension(self, unused=None): """ Tahoe storage servers < v1.3.0 would return an error if you tried to read past the end of the share, so we need to fetch the UEB size and then read just that much. HTTP-based storage protocol also doesn't like reading past the end. """ offset = self._offsets['uri_extension'] d = self._read(offset, self._fieldsize) def _got_length(data): if len(data) != self._fieldsize: raise LayoutInvalid("not enough bytes to encode URI length -- should be %d bytes long, not %d " % (self._fieldsize, len(data),)) length = struct.unpack(self._fieldstruct, data)[0] if length >= 2000: # URI extension blocks are around 419 bytes long; in previous # versions of the code 1000 was used as a default catchall. So # 2000 or more must be corrupted. raise RidiculouslyLargeURIExtensionBlock(length) return self._read(offset+self._fieldsize, length) d.addCallback(_got_length) return d def get_uri_extension(self): d = self._start_if_needed() d.addCallback(self._get_uri_extension) return d def _read(self, offset, length): return self._rref.callRemote("read", offset, length) tahoe_lafs-1.20.0/src/allmydata/immutable/literal.py0000644000000000000000000000562713615410400017355 0ustar00""" Ported to Python 3. """ from io import BytesIO from zope.interface import implementer from twisted.internet import defer from twisted.protocols import basic from allmydata.interfaces import IImmutableFileNode, ICheckable from allmydata.uri import LiteralFileURI class _ImmutableFileNodeBase(object): def get_write_uri(self): return None def get_readonly_uri(self): return self.get_uri() def is_mutable(self): return False def is_readonly(self): return True def is_unknown(self): return False def is_allowed_in_immutable_directory(self): return True def raise_error(self): pass def __hash__(self): return self.u.__hash__() def __eq__(self, other): if isinstance(other, _ImmutableFileNodeBase): return self.u == other.u else: return False def __ne__(self, other): return not self == other @implementer(IImmutableFileNode, ICheckable) class LiteralFileNode(_ImmutableFileNodeBase): def __init__(self, filecap): assert isinstance(filecap, LiteralFileURI) self.u = filecap def get_size(self): return len(self.u.data) def get_current_size(self): return defer.succeed(self.get_size()) def get_cap(self): return self.u def get_readcap(self): return self.u def get_verify_cap(self): return None def get_repair_cap(self): return None def get_uri(self): return self.u.to_string() def get_storage_index(self): return None def check(self, monitor, verify=False, add_lease=False): return defer.succeed(None) def check_and_repair(self, monitor, verify=False, add_lease=False): return defer.succeed(None) def read(self, consumer, offset=0, size=None): if size is None: data = self.u.data[offset:] else: data = self.u.data[offset:offset+size] # We use twisted.protocols.basic.FileSender, which only does # non-streaming, i.e. PullProducer, where the receiver/consumer must # ask explicitly for each chunk of data. There are only two places in # the Twisted codebase that can't handle streaming=False, both of # which are in the upload path for an FTP/SFTP server # (protocols.ftp.FileConsumer and # vfs.adapters.ftp._FileToConsumerAdapter), neither of which is # likely to be used as the target for a Tahoe download. d = basic.FileSender().beginFileTransfer(BytesIO(data), consumer) d.addCallback(lambda lastSent: consumer) return d # IReadable, IFileNode, IFilesystemNode def get_best_readable_version(self): return defer.succeed(self) def download_best_version(self): return defer.succeed(self.u.data) download_to_data = download_best_version get_size_of_best_version = get_current_size tahoe_lafs-1.20.0/src/allmydata/immutable/offloaded.py0000644000000000000000000006657313615410400017653 0ustar00""" Ported to Python 3. """ import os, stat, time, weakref from zope.interface import implementer from twisted.internet import defer from foolscap.api import Referenceable, DeadReferenceError, eventually import allmydata # for __full_version__ from allmydata import interfaces, uri from allmydata.storage.server import si_b2a from allmydata.immutable import upload from allmydata.immutable.layout import ReadBucketProxy from allmydata.util.assertutil import precondition from allmydata.util import log, observer, fileutil, hashutil, dictutil class NotEnoughWritersError(Exception): pass class CHKCheckerAndUEBFetcher(object): """I check to see if a file is already present in the grid. I also fetch the URI Extension Block, which is useful for an uploading client who wants to avoid the work of encryption and encoding. I return False if the file is not completely healthy: i.e. if there are less than 'N' shares present. If the file is completely healthy, I return a tuple of (sharemap, UEB_data, UEB_hash). A sharemap is a dict with share numbers as keys and sets of server ids (which hold that share) as values. """ def __init__(self, peer_getter, storage_index, logparent): self._peer_getter = peer_getter self._found_shares = set() self._storage_index = storage_index self._sharemap = dictutil.DictOfSets() self._readers = set() self._ueb_hash = None self._ueb_data = None self._logparent = logparent def log(self, *args, **kwargs): if 'facility' not in kwargs: kwargs['facility'] = "tahoe.helper.chk.checkandUEBfetch" if 'parent' not in kwargs: kwargs['parent'] = self._logparent return log.msg(*args, **kwargs) def check(self): """ :return Deferred[bool|(DictOfSets, dict, bytes)]: If no share can be found with a usable UEB block or fewer than N shares can be found then the Deferred fires with ``False``. Otherwise, it fires with a tuple of the sharemap, the UEB data, and the UEB hash. """ d = self._get_all_shareholders(self._storage_index) d.addCallback(self._get_uri_extension) d.addCallback(self._done) return d def _get_all_shareholders(self, storage_index): dl = [] for s in self._peer_getter(storage_index): d = s.get_storage_server().get_buckets(storage_index) d.addCallbacks(self._got_response, self._got_error, callbackArgs=(s,)) dl.append(d) return defer.DeferredList(dl) def _got_response(self, buckets, server): # buckets is a dict: maps shum to an rref of the server who holds it shnums_s = ",".join([str(shnum) for shnum in buckets]) self.log("got_response: [%r] has %d shares (%s)" % (server.get_name(), len(buckets), shnums_s), level=log.NOISY) self._found_shares.update(buckets.keys()) for k in buckets: self._sharemap.add(k, server.get_serverid()) self._readers.update( [ (bucket, server) for bucket in buckets.values() ] ) def _got_error(self, f): if f.check(DeadReferenceError): return log.err(f, parent=self._logparent) def _get_uri_extension(self, res): # assume that we can pull the UEB from any share. If we get an error, # declare the whole file unavailable. if not self._readers: self.log("no readers, so no UEB", level=log.NOISY) return b,server = self._readers.pop() rbp = ReadBucketProxy(b, server, si_b2a(self._storage_index)) d = rbp.get_uri_extension() d.addCallback(self._got_uri_extension) d.addErrback(self._ueb_error) return d def _got_uri_extension(self, ueb): self.log("_got_uri_extension", level=log.NOISY) self._ueb_hash = hashutil.uri_extension_hash(ueb) self._ueb_data = uri.unpack_extension(ueb) def _ueb_error(self, f): # an error means the file is unavailable, but the overall check # shouldn't fail. self.log("UEB fetch failed", failure=f, level=log.WEIRD, umid="sJLKVg") return None def _done(self, res): if self._ueb_data: found = len(self._found_shares) total = self._ueb_data['total_shares'] self.log(format="got %(found)d shares of %(total)d", found=found, total=total, level=log.NOISY) if found < total: # not all shares are present in the grid self.log("not enough to qualify, file not found in grid", level=log.NOISY) return False # all shares are present self.log("all shares present, file is found in grid", level=log.NOISY) return (self._sharemap, self._ueb_data, self._ueb_hash) # no shares are present self.log("unable to find UEB data, file not found in grid", level=log.NOISY) return False @implementer(interfaces.RICHKUploadHelper) class CHKUploadHelper(Referenceable, upload.CHKUploader): # type: ignore # warner/foolscap#78 """I am the helper-server -side counterpart to AssistedUploader. I handle peer selection, encoding, and share pushing. I read ciphertext from the remote AssistedUploader. """ VERSION = { b"http://allmydata.org/tahoe/protocols/helper/chk-upload/v1" : { }, b"application-version": allmydata.__full_version__.encode("utf-8"), } def __init__(self, storage_index, helper, storage_broker, secret_holder, incoming_file, encoding_file, log_number): upload.CHKUploader.__init__(self, storage_broker, secret_holder) self._storage_index = storage_index self._helper = helper self._incoming_file = incoming_file self._encoding_file = encoding_file self._upload_id = si_b2a(storage_index)[:5] self._log_number = log_number self._upload_status = upload.UploadStatus() self._upload_status.set_helper(False) self._upload_status.set_storage_index(storage_index) self._upload_status.set_status("fetching ciphertext") self._upload_status.set_progress(0, 1.0) self._helper.log("CHKUploadHelper starting for SI %r" % self._upload_id, parent=log_number) self._storage_broker = storage_broker self._secret_holder = secret_holder self._fetcher = CHKCiphertextFetcher(self, incoming_file, encoding_file, self._log_number) self._reader = LocalCiphertextReader(self, storage_index, encoding_file) self._finished_observers = observer.OneShotObserverList() self._started = time.time() d = self._fetcher.when_done() d.addCallback(lambda res: self._reader.start()) d.addCallback(lambda res: self.start_encrypted(self._reader)) d.addCallback(self._finished) d.addErrback(self._failed) def log(self, *args, **kwargs): if 'facility' not in kwargs: kwargs['facility'] = "tahoe.helper.chk" return upload.CHKUploader.log(self, *args, **kwargs) def remote_get_version(self): return self.VERSION def remote_upload(self, reader): # reader is an RIEncryptedUploadable. I am specified to return an # UploadResults dictionary. # Log how much ciphertext we need to get. self.log("deciding whether to upload the file or not", level=log.NOISY) if os.path.exists(self._encoding_file): # we have the whole file, and we might be encoding it (or the # encode/upload might have failed, and we need to restart it). self.log("ciphertext already in place", level=log.UNUSUAL) elif os.path.exists(self._incoming_file): # we have some of the file, but not all of it (otherwise we'd be # encoding). The caller might be useful. self.log("partial ciphertext already present", level=log.UNUSUAL) else: # we don't remember uploading this file self.log("no ciphertext yet", level=log.NOISY) # let our fetcher pull ciphertext from the reader. self._fetcher.add_reader(reader) # and also hashes self._reader.add_reader(reader) # and inform the client when the upload has finished return self._finished_observers.when_fired() def _finished(self, ur): assert interfaces.IUploadResults.providedBy(ur), ur vcapstr = ur.get_verifycapstr() precondition(isinstance(vcapstr, bytes), vcapstr) v = uri.from_string(vcapstr) f_times = self._fetcher.get_times() hur = upload.HelperUploadResults() hur.timings = {"cumulative_fetch": f_times["cumulative_fetch"], "total_fetch": f_times["total"], } for key,val in ur.get_timings().items(): hur.timings[key] = val hur.uri_extension_hash = v.uri_extension_hash hur.ciphertext_fetched = self._fetcher.get_ciphertext_fetched() hur.preexisting_shares = ur.get_preexisting_shares() # hur.sharemap needs to be {shnum: set(serverid)} hur.sharemap = {} for shnum, servers in ur.get_sharemap().items(): hur.sharemap[shnum] = set([s.get_serverid() for s in servers]) # and hur.servermap needs to be {serverid: set(shnum)} hur.servermap = {} for server, shnums in ur.get_servermap().items(): hur.servermap[server.get_serverid()] = set(shnums) hur.pushed_shares = ur.get_pushed_shares() hur.file_size = ur.get_file_size() hur.uri_extension_data = ur.get_uri_extension_data() hur.verifycapstr = vcapstr self._reader.close() os.unlink(self._encoding_file) self._finished_observers.fire(hur) self._helper.upload_finished(self._storage_index, v.size) del self._reader def _failed(self, f): self.log(format="CHKUploadHelper(%(si)s) failed", si=si_b2a(self._storage_index)[:5], failure=f, level=log.UNUSUAL) self._finished_observers.fire(f) self._helper.upload_finished(self._storage_index, 0) del self._reader class AskUntilSuccessMixin(object): # create me with a _reader array _last_failure = None def add_reader(self, reader): self._readers.append(reader) def call(self, *args, **kwargs): if not self._readers: raise NotEnoughWritersError("ran out of assisted uploaders, last failure was %s" % self._last_failure) rr = self._readers[0] d = rr.callRemote(*args, **kwargs) def _err(f): self._last_failure = f if rr in self._readers: self._readers.remove(rr) self._upload_helper.log("call to assisted uploader %s failed" % rr, failure=f, level=log.UNUSUAL) # we can try again with someone else who's left return self.call(*args, **kwargs) d.addErrback(_err) return d class CHKCiphertextFetcher(AskUntilSuccessMixin): """I use one or more remote RIEncryptedUploadable instances to gather ciphertext on disk. When I'm done, the file I create can be used by a LocalCiphertextReader to satisfy the ciphertext needs of a CHK upload process. I begin pulling ciphertext as soon as a reader is added. I remove readers when they have any sort of error. If the last reader is removed, I fire my when_done() Deferred with a failure. I fire my when_done() Deferred (with None) immediately after I have moved the ciphertext to 'encoded_file'. """ def __init__(self, helper, incoming_file, encoded_file, logparent): self._upload_helper = helper self._incoming_file = incoming_file self._encoding_file = encoded_file self._upload_id = helper._upload_id self._log_parent = logparent self._done_observers = observer.OneShotObserverList() self._readers = [] self._started = False self._f = None self._times = { "cumulative_fetch": 0.0, "total": 0.0, } self._ciphertext_fetched = 0 def log(self, *args, **kwargs): if "facility" not in kwargs: kwargs["facility"] = "tahoe.helper.chkupload.fetch" if "parent" not in kwargs: kwargs["parent"] = self._log_parent return log.msg(*args, **kwargs) def add_reader(self, reader): AskUntilSuccessMixin.add_reader(self, reader) eventually(self._start) def _start(self): if self._started: return self._started = True started = time.time() if os.path.exists(self._encoding_file): self.log("ciphertext already present, bypassing fetch", level=log.UNUSUAL) d = defer.succeed(None) else: # first, find out how large the file is going to be d = self.call("get_size") d.addCallback(self._got_size) d.addCallback(self._start_reading) d.addCallback(self._done) d.addCallback(self._done2, started) d.addErrback(self._failed) def _got_size(self, size): self.log("total size is %d bytes" % size, level=log.NOISY) self._upload_helper._upload_status.set_size(size) self._expected_size = size def _start_reading(self, res): # then find out how much crypttext we have on disk if os.path.exists(self._incoming_file): self._have = os.stat(self._incoming_file)[stat.ST_SIZE] self._upload_helper._helper.count("chk_upload_helper.resumes") self.log("we already have %d bytes" % self._have, level=log.NOISY) else: self._have = 0 self.log("we do not have any ciphertext yet", level=log.NOISY) self.log("starting ciphertext fetch", level=log.NOISY) self._f = open(self._incoming_file, "ab") # now loop to pull the data from the readers d = defer.Deferred() self._loop(d) # this Deferred will be fired once the last byte has been written to # self._f return d # read data in 50kB chunks. We should choose a more considered number # here, possibly letting the client specify it. The goal should be to # keep the RTT*bandwidth to be less than 10% of the chunk size, to reduce # the upload bandwidth lost because this protocol is non-windowing. Too # large, however, means more memory consumption for both ends. Something # that can be transferred in, say, 10 seconds sounds about right. On my # home DSL line (50kBps upstream), that suggests 500kB. Most lines are # slower, maybe 10kBps, which suggests 100kB, and that's a bit more # memory than I want to hang on to, so I'm going to go with 50kB and see # how that works. CHUNK_SIZE = 50*1024 def _loop(self, fire_when_done): # this slightly weird structure is needed because Deferreds don't do # tail-recursion, so it is important to let each one retire promptly. # Simply chaining them will cause a stack overflow at the end of a # transfer that involves more than a few hundred chunks. # 'fire_when_done' lives a long time, but the Deferreds returned by # the inner _fetch() call do not. start = time.time() d = defer.maybeDeferred(self._fetch) def _done(finished): elapsed = time.time() - start self._times["cumulative_fetch"] += elapsed if finished: self.log("finished reading ciphertext", level=log.NOISY) fire_when_done.callback(None) else: self._loop(fire_when_done) def _err(f): self.log(format="[%(si)s] ciphertext read failed", si=self._upload_id, failure=f, level=log.UNUSUAL) fire_when_done.errback(f) d.addCallbacks(_done, _err) return None def _fetch(self): needed = self._expected_size - self._have fetch_size = min(needed, self.CHUNK_SIZE) if fetch_size == 0: self._upload_helper._upload_status.set_progress(1, 1.0) return True # all done percent = 0.0 if self._expected_size: percent = 1.0 * (self._have+fetch_size) / self._expected_size self.log(format="fetching [%(si)s] %(start)d-%(end)d of %(total)d (%(percent)d%%)", si=self._upload_id, start=self._have, end=self._have+fetch_size, total=self._expected_size, percent=int(100.0*percent), level=log.NOISY) d = self.call("read_encrypted", self._have, fetch_size) def _got_data(ciphertext_v): for data in ciphertext_v: self._f.write(data) self._have += len(data) self._ciphertext_fetched += len(data) self._upload_helper._helper.count("chk_upload_helper.fetched_bytes", len(data)) self._upload_helper._upload_status.set_progress(1, percent) return False # not done d.addCallback(_got_data) return d def _done(self, res): self._f.close() self._f = None self.log(format="done fetching ciphertext, size=%(size)d", size=os.stat(self._incoming_file)[stat.ST_SIZE], level=log.NOISY) os.rename(self._incoming_file, self._encoding_file) def _done2(self, _ignored, started): self.log("done2", level=log.NOISY) elapsed = time.time() - started self._times["total"] = elapsed self._readers = [] self._done_observers.fire(None) def _failed(self, f): if self._f: self._f.close() self._readers = [] self._done_observers.fire(f) def when_done(self): return self._done_observers.when_fired() def get_times(self): return self._times def get_ciphertext_fetched(self): return self._ciphertext_fetched @implementer(interfaces.IEncryptedUploadable) class LocalCiphertextReader(AskUntilSuccessMixin): def __init__(self, upload_helper, storage_index, encoding_file): self._readers = [] self._upload_helper = upload_helper self._storage_index = storage_index self._encoding_file = encoding_file self._status = None def start(self): self._upload_helper._upload_status.set_status("pushing") self._size = os.stat(self._encoding_file)[stat.ST_SIZE] self.f = open(self._encoding_file, "rb") def get_size(self): return defer.succeed(self._size) def get_all_encoding_parameters(self): return self.call("get_all_encoding_parameters") def get_storage_index(self): return defer.succeed(self._storage_index) def read_encrypted(self, length, hash_only): assert hash_only is False d = defer.maybeDeferred(self.f.read, length) d.addCallback(lambda data: [data]) return d def close(self): self.f.close() # ??. I'm not sure if it makes sense to forward the close message. return self.call("close") # https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3561 def set_upload_status(self, upload_status): raise NotImplementedError @implementer(interfaces.RIHelper, interfaces.IStatsProducer) class Helper(Referenceable): # type: ignore # warner/foolscap#78 """ :ivar dict[bytes, CHKUploadHelper] _active_uploads: For any uploads which have been started but not finished, a mapping from storage index to the upload helper. :ivar chk_checker: A callable which returns an object like a CHKCheckerAndUEBFetcher instance which can check CHK shares. Primarily for the convenience of tests to override. :ivar chk_upload: A callable which returns an object like a CHKUploadHelper instance which can upload CHK shares. Primarily for the convenience of tests to override. """ # this is the non-distributed version. When we need to have multiple # helpers, this object will become the HelperCoordinator, and will query # the farm of Helpers to see if anyone has the storage_index of interest, # and send the request off to them. If nobody has it, we'll choose a # helper at random. name = "helper" VERSION = { b"http://allmydata.org/tahoe/protocols/helper/v1" : { }, b"application-version": allmydata.__full_version__.encode("utf-8"), } MAX_UPLOAD_STATUSES = 10 chk_checker = CHKCheckerAndUEBFetcher chk_upload = CHKUploadHelper def __init__(self, basedir, storage_broker, secret_holder, stats_provider, history): self._basedir = basedir self._storage_broker = storage_broker self._secret_holder = secret_holder self._chk_incoming = os.path.join(basedir, "CHK_incoming") self._chk_encoding = os.path.join(basedir, "CHK_encoding") fileutil.make_dirs(self._chk_incoming) fileutil.make_dirs(self._chk_encoding) self._active_uploads = {} self._all_uploads = weakref.WeakKeyDictionary() # for debugging self.stats_provider = stats_provider if stats_provider: stats_provider.register_producer(self) self._counters = {"chk_upload_helper.upload_requests": 0, "chk_upload_helper.upload_already_present": 0, "chk_upload_helper.upload_need_upload": 0, "chk_upload_helper.resumes": 0, "chk_upload_helper.fetched_bytes": 0, "chk_upload_helper.encoded_bytes": 0, } self._history = history def log(self, *args, **kwargs): if 'facility' not in kwargs: kwargs['facility'] = "tahoe.helper" return log.msg(*args, **kwargs) def count(self, key, value=1): if self.stats_provider: self.stats_provider.count(key, value) self._counters[key] += value def get_stats(self): OLD = 86400*2 # 48hours now = time.time() inc_count = inc_size = inc_size_old = 0 enc_count = enc_size = enc_size_old = 0 inc = os.listdir(self._chk_incoming) enc = os.listdir(self._chk_encoding) for f in inc: s = os.stat(os.path.join(self._chk_incoming, f)) size = s[stat.ST_SIZE] mtime = s[stat.ST_MTIME] inc_count += 1 inc_size += size if now - mtime > OLD: inc_size_old += size for f in enc: s = os.stat(os.path.join(self._chk_encoding, f)) size = s[stat.ST_SIZE] mtime = s[stat.ST_MTIME] enc_count += 1 enc_size += size if now - mtime > OLD: enc_size_old += size stats = { 'chk_upload_helper.active_uploads': len(self._active_uploads), 'chk_upload_helper.incoming_count': inc_count, 'chk_upload_helper.incoming_size': inc_size, 'chk_upload_helper.incoming_size_old': inc_size_old, 'chk_upload_helper.encoding_count': enc_count, 'chk_upload_helper.encoding_size': enc_size, 'chk_upload_helper.encoding_size_old': enc_size_old, } stats.update(self._counters) return stats def remote_get_version(self): return self.VERSION def remote_upload_chk(self, storage_index): """ See ``RIHelper.upload_chk`` """ self.count("chk_upload_helper.upload_requests") lp = self.log(format="helper: upload_chk query for SI %(si)s", si=si_b2a(storage_index)) if storage_index in self._active_uploads: self.log("upload is currently active", parent=lp) uh = self._active_uploads[storage_index] return (None, uh) d = self._check_chk(storage_index, lp) d.addCallback(self._did_chk_check, storage_index, lp) def _err(f): self.log("error while checking for chk-already-in-grid", failure=f, level=log.WEIRD, parent=lp, umid="jDtxZg") return f d.addErrback(_err) return d def _check_chk(self, storage_index, lp): # see if this file is already in the grid lp2 = self.log("doing a quick check+UEBfetch", parent=lp, level=log.NOISY) sb = self._storage_broker c = self.chk_checker(sb.get_servers_for_psi, storage_index, lp2) d = c.check() def _checked(res): if res: (sharemap, ueb_data, ueb_hash) = res self.log("found file in grid", level=log.NOISY, parent=lp) hur = upload.HelperUploadResults() hur.uri_extension_hash = ueb_hash hur.sharemap = sharemap hur.uri_extension_data = ueb_data hur.preexisting_shares = len(sharemap) hur.pushed_shares = 0 return hur return None d.addCallback(_checked) return d def _did_chk_check(self, already_present, storage_index, lp): if already_present: # the necessary results are placed in the UploadResults self.count("chk_upload_helper.upload_already_present") self.log("file already found in grid", parent=lp) return (already_present, None) self.count("chk_upload_helper.upload_need_upload") # the file is not present in the grid, by which we mean there are # less than 'N' shares available. self.log("unable to find file in the grid", parent=lp, level=log.NOISY) # We need an upload helper. Check our active uploads again in # case there was a race. if storage_index in self._active_uploads: self.log("upload is currently active", parent=lp) uh = self._active_uploads[storage_index] else: self.log("creating new upload helper", parent=lp) uh = self._make_chk_upload_helper(storage_index, lp) self._active_uploads[storage_index] = uh self._add_upload(uh) return (None, uh) def _make_chk_upload_helper(self, storage_index, lp): si_s = si_b2a(storage_index).decode('ascii') incoming_file = os.path.join(self._chk_incoming, si_s) encoding_file = os.path.join(self._chk_encoding, si_s) uh = self.chk_upload( storage_index, self, self._storage_broker, self._secret_holder, incoming_file, encoding_file, lp, ) return uh def _add_upload(self, uh): self._all_uploads[uh] = None if self._history: s = uh.get_upload_status() self._history.notify_helper_upload(s) def upload_finished(self, storage_index, size): # this is called with size=0 if the upload failed self.count("chk_upload_helper.encoded_bytes", size) uh = self._active_uploads[storage_index] del self._active_uploads[storage_index] s = uh.get_upload_status() s.set_active(False) tahoe_lafs-1.20.0/src/allmydata/immutable/repairer.py0000644000000000000000000001022213615410400017515 0ustar00""" Ported to Python 3. """ from zope.interface import implementer from twisted.internet import defer from allmydata.storage.server import si_b2a from allmydata.util import log, consumer from allmydata.util.assertutil import precondition from allmydata.interfaces import IEncryptedUploadable from allmydata.immutable import upload @implementer(IEncryptedUploadable) class Repairer(log.PrefixingLogMixin): """I generate any shares which were not available and upload them to servers. Which servers? Well, I just use the normal upload process, so any servers that will take shares. In fact, I even believe servers if they say that they already have shares even if attempts to download those shares would fail because the shares are corrupted. My process of uploading replacement shares proceeds in a segment-wise fashion -- first I ask servers if they can hold the new shares, and wait until enough have agreed then I download the first segment of the file and upload the first block of each replacement share, and only after all those blocks have been uploaded do I download the second segment of the file and upload the second block of each replacement share to its respective server. (I do it this way in order to minimize the amount of downloading I have to do and the amount of memory I have to use at any one time.) If any of the servers to which I am uploading replacement shares fails to accept the blocks during this process, then I just stop using that server, abandon any share-uploads that were going to that server, and proceed to finish uploading the remaining shares to their respective servers. At the end of my work, I produce an object which satisfies the ICheckAndRepairResults interface (by firing the deferred that I returned from start() and passing that check-and-repair-results object). Before I send any new request to a server, I always ask the 'monitor' object that was passed into my constructor whether this task has been cancelled (by invoking its raise_if_cancelled() method). """ def __init__(self, filenode, storage_broker, secret_holder, monitor): logprefix = si_b2a(filenode.get_storage_index())[:5] log.PrefixingLogMixin.__init__(self, "allmydata.immutable.repairer", prefix=logprefix) self._filenode = filenode self._storage_broker = storage_broker self._secret_holder = secret_holder self._monitor = monitor self._offset = 0 def start(self): self.log("starting repair") d = self._filenode.get_segment_size() def _got_segsize(segsize): vcap = self._filenode.get_verify_cap() k = vcap.needed_shares N = vcap.total_shares # Per ticket #1212 # (http://tahoe-lafs.org/trac/tahoe-lafs/ticket/1212) happy = 0 self._encodingparams = (k, happy, N, segsize) # XXX should pass a reactor to this ul = upload.CHKUploader(self._storage_broker, self._secret_holder) return ul.start(self) # I am the IEncryptedUploadable d.addCallback(_got_segsize) return d # methods to satisfy the IEncryptedUploader interface # (From the perspective of an uploader I am an IEncryptedUploadable.) def set_upload_status(self, upload_status): self.upload_status = upload_status def get_size(self): size = self._filenode.get_size() assert size is not None return defer.succeed(size) def get_all_encoding_parameters(self): return defer.succeed(self._encodingparams) def read_encrypted(self, length, hash_only): """Returns a deferred which eventually fires with the requested ciphertext, as a list of strings.""" precondition(length) # please don't ask to read 0 bytes mc = consumer.MemoryConsumer() d = self._filenode.read(mc, self._offset, length) self._offset += length d.addCallback(lambda ign: mc.chunks) return d def get_storage_index(self): return self._filenode.get_storage_index() def close(self): pass tahoe_lafs-1.20.0/src/allmydata/immutable/upload.py0000644000000000000000000023103313615410400017175 0ustar00""" Ported to Python 3. """ from __future__ import annotations from six import ensure_str import os, time, weakref, itertools import attr from zope.interface import implementer from twisted.python import failure from twisted.internet import defer from twisted.application import service from foolscap.api import Referenceable, Copyable, RemoteCopy from allmydata.crypto import aes from allmydata.util.hashutil import file_renewal_secret_hash, \ file_cancel_secret_hash, bucket_renewal_secret_hash, \ bucket_cancel_secret_hash, plaintext_hasher, \ storage_index_hash, plaintext_segment_hasher, convergence_hasher from allmydata.util.deferredutil import ( timeout_call, until, ) from allmydata import hashtree, uri from allmydata.storage.server import si_b2a from allmydata.immutable import encode from allmydata.util import base32, dictutil, idlib, log, mathutil from allmydata.util.happinessutil import servers_of_happiness, \ merge_servers, failure_message from allmydata.util.assertutil import precondition, _assert from allmydata.util.rrefutil import add_version_to_remote_reference from allmydata.interfaces import IUploadable, IUploader, IUploadResults, \ IEncryptedUploadable, RIEncryptedUploadable, IUploadStatus, \ NoServersError, InsufficientVersionError, UploadUnhappinessError, \ DEFAULT_IMMUTABLE_MAX_SEGMENT_SIZE, IPeerSelector from allmydata.immutable import layout from io import BytesIO from .happiness_upload import share_placement, calculate_happiness from ..util.eliotutil import ( log_call_deferred, inline_callbacks, ) from eliot import ( ActionType, MessageType, Field, ) _TOTAL_SHARES = Field.for_types( u"total_shares", [int], u"The total number of shares desired.", ) def _serialize_peers(peers): return sorted(base32.b2a(p) for p in peers) _PEERS = Field( u"peers", _serialize_peers, u"The read/write peers being considered.", ) _READONLY_PEERS = Field( u"readonly_peers", _serialize_peers, u"The read-only peers being considered.", ) def _serialize_existing_shares(existing_shares): return { ensure_str(server): list(shares) for (server, shares) in existing_shares.items() } _EXISTING_SHARES = Field( u"existing_shares", _serialize_existing_shares, u"The shares that are believed to already have been placed.", ) def _serialize_happiness_mappings(happiness_mappings): return { str(sharenum): ensure_str(base32.b2a(serverid)) for (sharenum, serverid) in happiness_mappings.items() } _HAPPINESS_MAPPINGS = Field( u"happiness_mappings", _serialize_happiness_mappings, u"The computed happiness mapping for a particular upload.", ) _HAPPINESS = Field.for_types( u"happiness", [int], u"The computed happiness of a certain placement.", ) _UPLOAD_TRACKERS = Field( u"upload_trackers", lambda trackers: list( dict( server=ensure_str(tracker.get_name()), shareids=sorted(tracker.buckets.keys()), ) for tracker in trackers ), u"Some servers which have agreed to hold some shares for us.", ) _ALREADY_SERVERIDS = Field( u"already_serverids", lambda d: {str(k): v for k, v in d.items()}, u"Some servers which are already holding some shares that we were interested in uploading.", ) LOCATE_ALL_SHAREHOLDERS = ActionType( u"immutable:upload:locate-all-shareholders", [], [_UPLOAD_TRACKERS, _ALREADY_SERVERIDS], u"Existing shareholders are being identified to plan upload actions.", ) GET_SHARE_PLACEMENTS = MessageType( u"immutable:upload:get-share-placements", [_TOTAL_SHARES, _PEERS, _READONLY_PEERS, _EXISTING_SHARES, _HAPPINESS_MAPPINGS, _HAPPINESS], u"Share placement is being computed for an upload.", ) _EFFECTIVE_HAPPINESS = Field.for_types( u"effective_happiness", [int], u"The computed happiness value of a share placement map.", ) CONVERGED_HAPPINESS = MessageType( u"immutable:upload:get-shareholders:converged-happiness", [_EFFECTIVE_HAPPINESS], u"The share placement algorithm has converged and placements efforts are complete.", ) # this wants to live in storage, not here class TooFullError(Exception): pass # HelperUploadResults are what we get from the Helper, and to retain # backwards compatibility with old Helpers we can't change the format. We # convert them into a local UploadResults upon receipt. class HelperUploadResults(Copyable, RemoteCopy): # note: don't change this string, it needs to match the value used on the # helper, and it does *not* need to match the fully-qualified # package/module/class name # # Needs to be native string to make Foolscap happy. typeToCopy = "allmydata.upload.UploadResults.tahoe.allmydata.com" copytype = typeToCopy # also, think twice about changing the shape of any existing attribute, # because instances of this class are sent from the helper to its client, # so changing this may break compatibility. Consider adding new fields # instead of modifying existing ones. def __init__(self): self.timings = {} # dict of name to number of seconds self.sharemap = dictutil.DictOfSets() # {shnum: set(serverid)} self.servermap = dictutil.DictOfSets() # {serverid: set(shnum)} self.file_size = None self.ciphertext_fetched = None # how much the helper fetched self.uri = None self.preexisting_shares = None # count of shares already present self.pushed_shares = None # count of shares we pushed @implementer(IUploadResults) class UploadResults(object): def __init__(self, file_size, ciphertext_fetched, # how much the helper fetched preexisting_shares, # count of shares already present pushed_shares, # count of shares we pushed sharemap, # {shnum: set(server)} servermap, # {server: set(shnum)} timings, # dict of name to number of seconds uri_extension_data, uri_extension_hash, verifycapstr): self._file_size = file_size self._ciphertext_fetched = ciphertext_fetched self._preexisting_shares = preexisting_shares self._pushed_shares = pushed_shares self._sharemap = sharemap self._servermap = servermap self._timings = timings self._uri_extension_data = uri_extension_data self._uri_extension_hash = uri_extension_hash self._verifycapstr = verifycapstr def set_uri(self, uri): self._uri = uri def get_file_size(self): return self._file_size def get_uri(self): return self._uri def get_ciphertext_fetched(self): return self._ciphertext_fetched def get_preexisting_shares(self): return self._preexisting_shares def get_pushed_shares(self): return self._pushed_shares def get_sharemap(self): return self._sharemap def get_servermap(self): return self._servermap def get_timings(self): return self._timings def get_uri_extension_data(self): return self._uri_extension_data def get_verifycapstr(self): return self._verifycapstr def pretty_print_shnum_to_servers(s): return ', '.join([ "sh%s: %s" % (k, '+'.join([idlib.shortnodeid_b2a(x) for x in v])) for k, v in s.items() ]) class ServerTracker(object): def __init__(self, server, sharesize, blocksize, num_segments, num_share_hashes, storage_index, bucket_renewal_secret, bucket_cancel_secret, uri_extension_size): self._server = server self.buckets = {} # k: shareid, v: IRemoteBucketWriter self.sharesize = sharesize self.uri_extension_size = uri_extension_size wbp = layout.make_write_bucket_proxy(None, None, sharesize, blocksize, num_segments, num_share_hashes, uri_extension_size) self.wbp_class = wbp.__class__ # to create more of them self.allocated_size = wbp.get_allocated_size() self.blocksize = blocksize self.num_segments = num_segments self.num_share_hashes = num_share_hashes self.storage_index = storage_index self.renew_secret = bucket_renewal_secret self.cancel_secret = bucket_cancel_secret def __repr__(self): return ("" % (self._server.get_name(), si_b2a(self.storage_index)[:5])) def get_server(self): return self._server def get_serverid(self): return self._server.get_serverid() def get_name(self): return self._server.get_name() def query(self, sharenums): storage_server = self._server.get_storage_server() d = storage_server.allocate_buckets( self.storage_index, self.renew_secret, self.cancel_secret, sharenums, self.allocated_size, canary=Referenceable(), ) d.addCallback(self._buckets_allocated) return d def ask_about_existing_shares(self): storage_server = self._server.get_storage_server() return storage_server.get_buckets(self.storage_index) def _buckets_allocated(self, alreadygot_and_buckets): #log.msg("%s._got_reply(%s)" % (self, (alreadygot, buckets))) (alreadygot, buckets) = alreadygot_and_buckets b = {} for sharenum, rref in list(buckets.items()): bp = self.wbp_class(rref, self._server, self.sharesize, self.blocksize, self.num_segments, self.num_share_hashes, self.uri_extension_size) b[sharenum] = bp self.buckets.update(b) return (alreadygot, set(b.keys())) def abort(self): """ I abort the remote bucket writers for all shares. This is a good idea to conserve space on the storage server. """ self.abort_some_buckets(list(self.buckets.keys())) def abort_some_buckets(self, sharenums): """ I abort the remote bucket writers for the share numbers in sharenums. """ for sharenum in sharenums: if sharenum in self.buckets: self.buckets[sharenum].abort() del self.buckets[sharenum] def str_shareloc(shnum, bucketwriter): return "%s: %s" % (shnum, ensure_str(bucketwriter.get_servername()),) @implementer(IPeerSelector) class PeerSelector(object): def __init__(self, num_segments, total_shares, needed_shares, min_happiness): self.num_segments = num_segments self.total_shares = total_shares self.needed_shares = needed_shares self.min_happiness = min_happiness self.existing_shares = {} self.peers = set() self.readonly_peers = set() self.bad_peers = set() def add_peer_with_share(self, peerid, shnum): try: self.existing_shares[peerid].add(shnum) except KeyError: self.existing_shares[peerid] = set([shnum]) def add_peer(self, peerid): self.peers.add(peerid) def mark_readonly_peer(self, peerid): self.readonly_peers.add(peerid) self.peers.remove(peerid) def mark_bad_peer(self, peerid): if peerid in self.peers: self.peers.remove(peerid) self.bad_peers.add(peerid) elif peerid in self.readonly_peers: self.readonly_peers.remove(peerid) self.bad_peers.add(peerid) def get_sharemap_of_preexisting_shares(self): preexisting = dictutil.DictOfSets() for server, shares in self.existing_shares.items(): for share in shares: preexisting.add(share, server) return preexisting def get_share_placements(self): shares = set(range(self.total_shares)) self.happiness_mappings = share_placement(self.peers, self.readonly_peers, shares, self.existing_shares) self.happiness = calculate_happiness(self.happiness_mappings) GET_SHARE_PLACEMENTS.log( total_shares=self.total_shares, peers=self.peers, readonly_peers=self.readonly_peers, existing_shares=self.existing_shares, happiness_mappings=self.happiness_mappings, happiness=self.happiness, ) return self.happiness_mappings def add_peers(self, peerids=None): raise NotImplementedError class _QueryStatistics(object): def __init__(self): self.total = 0 self.good = 0 self.bad = 0 self.full = 0 self.error = 0 self.contacted = 0 def __str__(self): return "QueryStatistics(total={} good={} bad={} full={} " \ "error={} contacted={})".format( self.total, self.good, self.bad, self.full, self.error, self.contacted, ) class Tahoe2ServerSelector(log.PrefixingLogMixin): def __init__(self, upload_id, logparent=None, upload_status=None, reactor=None): self.upload_id = upload_id self._query_stats = _QueryStatistics() self.last_failure_msg = None self._status = IUploadStatus(upload_status) log.PrefixingLogMixin.__init__(self, 'tahoe.immutable.upload', logparent, prefix=upload_id) self.log("starting", level=log.OPERATIONAL) if reactor is None: from twisted.internet import reactor self._reactor = reactor def __repr__(self): return "" % self.upload_id def _create_trackers(self, candidate_servers, allocated_size, file_renewal_secret, file_cancel_secret, create_server_tracker): # filter the list of servers according to which ones can accomodate # this request. This excludes older servers (which used a 4-byte size # field) from getting large shares (for files larger than about # 12GiB). See #439 for details. def _get_maxsize(server): v0 = server.get_version() v1 = v0[b"http://allmydata.org/tahoe/protocols/storage/v1"] return v1[b"maximum-immutable-share-size"] for server in candidate_servers: self.peer_selector.add_peer(server.get_serverid()) writeable_servers = [ server for server in candidate_servers if _get_maxsize(server) >= allocated_size ] readonly_servers = set(candidate_servers) - set(writeable_servers) for server in readonly_servers: self.peer_selector.mark_readonly_peer(server.get_serverid()) def _make_trackers(servers): trackers = [] for s in servers: seed = s.get_lease_seed() renew = bucket_renewal_secret_hash(file_renewal_secret, seed) cancel = bucket_cancel_secret_hash(file_cancel_secret, seed) st = create_server_tracker(s, renew, cancel) trackers.append(st) return trackers write_trackers = _make_trackers(writeable_servers) # We don't try to allocate shares to these servers, since they've # said that they're incapable of storing shares of the size that we'd # want to store. We ask them about existing shares for this storage # index, which we want to know about for accurate # servers_of_happiness accounting, then we forget about them. readonly_trackers = _make_trackers(readonly_servers) return readonly_trackers, write_trackers @inline_callbacks def get_shareholders(self, storage_broker, secret_holder, storage_index, share_size, block_size, num_segments, total_shares, needed_shares, min_happiness, uri_extension_size): """ @return: (upload_trackers, already_serverids), where upload_trackers is a set of ServerTracker instances that have agreed to hold some shares for us (the shareids are stashed inside the ServerTracker), and already_serverids is a dict mapping shnum to a set of serverids for servers which claim to already have the share. """ # re-initialize statistics self._query_status = _QueryStatistics() if self._status: self._status.set_status("Contacting Servers..") self.peer_selector = PeerSelector(num_segments, total_shares, needed_shares, min_happiness) self.total_shares = total_shares self.min_happiness = min_happiness self.needed_shares = needed_shares self.homeless_shares = set(range(total_shares)) self.use_trackers = set() # ServerTrackers that have shares assigned # to them self.preexisting_shares = {} # shareid => set(serverids) holding shareid # These servers have shares -- any shares -- for our SI. We keep # track of these to write an error message with them later. self.serverids_with_shares = set() # this needed_hashes computation should mirror # Encoder.send_all_share_hash_trees. We use an IncompleteHashTree # (instead of a HashTree) because we don't require actual hashing # just to count the levels. ht = hashtree.IncompleteHashTree(total_shares) num_share_hashes = len(ht.needed_hashes(0, include_leaf=True)) # figure out how much space to ask for wbp = layout.make_write_bucket_proxy(None, None, share_size, 0, num_segments, num_share_hashes, uri_extension_size) allocated_size = wbp.get_allocated_size() # decide upon the renewal/cancel secrets, to include them in the # allocate_buckets query. file_renewal_secret = file_renewal_secret_hash( secret_holder.get_renewal_secret(), storage_index, ) file_cancel_secret = file_cancel_secret_hash( secret_holder.get_cancel_secret(), storage_index, ) # see docs/specifications/servers-of-happiness.rst # 0. Start with an ordered list of servers. Maybe *2N* of them. # all_servers = storage_broker.get_servers_for_psi(storage_index, for_upload=True) if not all_servers: raise NoServersError("client gave us zero servers") def _create_server_tracker(server, renew, cancel): return ServerTracker( server, share_size, block_size, num_segments, num_share_hashes, storage_index, renew, cancel, uri_extension_size ) readonly_trackers, write_trackers = self._create_trackers( all_servers[:(2 * total_shares)], allocated_size, file_renewal_secret, file_cancel_secret, _create_server_tracker, ) # see docs/specifications/servers-of-happiness.rst # 1. Query all servers for existing shares. # # The spec doesn't say what to do for timeouts/errors. This # adds a timeout to each request, and rejects any that reply # with error (i.e. just removed from the list) ds = [] if self._status and readonly_trackers: self._status.set_status( "Contacting readonly servers to find any existing shares" ) # in the "pre servers-of-happiness" code, it was a little # ambigious whether "merely asking" counted as a "query" or # not, because "allocate_buckets" with nothing to allocate was # used to "ask" a write-able server what it held. Now we count # "actual allocation queries" only, because those are the only # things that actually affect what the server does. for tracker in readonly_trackers: assert isinstance(tracker, ServerTracker) d = timeout_call(self._reactor, tracker.ask_about_existing_shares(), 15) d.addBoth(self._handle_existing_response, tracker) ds.append(d) self.log("asking server %r for any existing shares" % (tracker.get_name(),), level=log.NOISY) for tracker in write_trackers: assert isinstance(tracker, ServerTracker) d = timeout_call(self._reactor, tracker.ask_about_existing_shares(), 15) def timed_out(f, tracker): # print("TIMEOUT {}: {}".format(tracker, f)) write_trackers.remove(tracker) readonly_trackers.append(tracker) return f d.addErrback(timed_out, tracker) d.addBoth(self._handle_existing_write_response, tracker, set()) ds.append(d) self.log("asking server %r for any existing shares" % (tracker.get_name(),), level=log.NOISY) trackers = set(write_trackers) | set(readonly_trackers) # these will always be (True, None) because errors are handled # in the _handle_existing_write_response etc callbacks yield defer.DeferredList(ds) # okay, we've queried the 2N servers, time to get the share # placements and attempt to actually place the shares (or # renew them on read-only servers). We want to run the loop # below *at least once* because even read-only servers won't # renew their shares until "allocate_buckets" is called (via # tracker.query()) # https://tahoe-lafs.org/trac/tahoe-lafs/ticket/778#comment:48 # min_happiness will be 0 for the repairer, so we set current # effective_happiness to less than zero so this loop runs at # least once for the repairer... def _bad_server(fail, tracker): self.last_failure_msg = fail return False # will mark it readonly def _make_readonly(tracker): # print("making {} read-only".format(tracker.get_serverid())) try: write_trackers.remove(tracker) except ValueError: pass # XXX can we just use a set() or does order matter? if tracker not in readonly_trackers: readonly_trackers.append(tracker) return None # so we *always* want to run this loop at least once, even if # we only have read-only servers -- because asking them to # allocate buckets renews those shares they already have. For # subsequent loops, we give up if we've achieved happiness OR # if we have zero writable servers left last_happiness = None effective_happiness = -1 while effective_happiness < min_happiness and \ (last_happiness is None or len(write_trackers)): errors_before = self._query_stats.bad self._share_placements = self.peer_selector.get_share_placements() placements = [] for tracker in trackers: shares_to_ask = self._allocation_for(tracker) # if we already tried to upload share X to this very # same server in a previous iteration, we should *not* # ask again. If we *do* ask, there's no real harm, but # the server will respond with an empty dict and that # confuses our statistics. However, if the server is a # readonly sever, we *do* want to ask so it refreshes # the share. if shares_to_ask != set(tracker.buckets.keys()) or tracker in readonly_trackers: self._query_stats.total += 1 self._query_stats.contacted += 1 d = timeout_call(self._reactor, tracker.query(shares_to_ask), 15) d.addBoth(self._buckets_allocated, tracker, shares_to_ask) d.addErrback(lambda f, tr: _bad_server(f, tr), tracker) d.addCallback(lambda x, tr: _make_readonly(tr) if not x else x, tracker) placements.append(d) yield defer.DeferredList(placements) merged = merge_servers(self.peer_selector.get_sharemap_of_preexisting_shares(), self.use_trackers) effective_happiness = servers_of_happiness(merged) if effective_happiness == last_happiness: # print("effective happiness still {}".format(last_happiness)) # we haven't improved over the last iteration; give up break; if errors_before == self._query_stats.bad: break; last_happiness = effective_happiness # print("write trackers left: {}".format(len(write_trackers))) # note: peer_selector.get_allocations() only maps "things we # uploaded in the above loop" and specificaly does *not* # include any pre-existing shares on read-only servers .. but # we *do* want to count those shares towards total happiness. # no more servers. If we haven't placed enough shares, we fail. # XXX note sometimes we're not running the loop at least once, # and so 'merged' must be (re-)computed here. merged = merge_servers(self.peer_selector.get_sharemap_of_preexisting_shares(), self.use_trackers) effective_happiness = servers_of_happiness(merged) # print("placements completed {} vs {}".format(effective_happiness, min_happiness)) # for k, v in merged.items(): # print(" {} -> {}".format(k, v)) CONVERGED_HAPPINESS.log( effective_happiness=effective_happiness, ) if effective_happiness < min_happiness: msg = failure_message( peer_count=len(self.serverids_with_shares), k=self.needed_shares, happy=min_happiness, effective_happy=effective_happiness, ) msg = ("server selection failed for %s: %s (%s), merged=%s" % (self, msg, self._get_progress_message(), pretty_print_shnum_to_servers(merged))) if self.last_failure_msg: msg += " (%s)" % (self.last_failure_msg,) self.log(msg, level=log.UNUSUAL) self._failed(msg) # raises UploadUnhappinessError return # we placed (or already had) enough to be happy, so we're done if self._status: self._status.set_status("Placed all shares") msg = ("server selection successful for %s: %s: pretty_print_merged: %s, " "self.use_trackers: %s, self.preexisting_shares: %s") \ % (self, self._get_progress_message(), pretty_print_shnum_to_servers(merged), [', '.join([str_shareloc(k,v) for k,v in st.buckets.items()]) for st in self.use_trackers], pretty_print_shnum_to_servers(self.preexisting_shares)) self.log(msg, level=log.OPERATIONAL) defer.returnValue((self.use_trackers, self.peer_selector.get_sharemap_of_preexisting_shares())) def _handle_existing_response(self, res, tracker): """ I handle responses to the queries sent by Tahoe2ServerSelector.get_shareholders. """ serverid = tracker.get_serverid() if isinstance(res, failure.Failure): self.log("%s got error during existing shares check: %s" % (tracker.get_name(), res), level=log.UNUSUAL) self.peer_selector.mark_bad_peer(serverid) else: buckets = res if buckets: self.serverids_with_shares.add(serverid) self.log("response to get_buckets() from server %r: alreadygot=%s" % (tracker.get_name(), tuple(sorted(buckets))), level=log.NOISY) for bucket in buckets: self.peer_selector.add_peer_with_share(serverid, bucket) self.preexisting_shares.setdefault(bucket, set()).add(serverid) self.homeless_shares.discard(bucket) def _handle_existing_write_response(self, res, tracker, shares_to_ask): """ Function handles the response from the write servers when inquiring about what shares each server already has. """ if isinstance(res, failure.Failure): self.peer_selector.mark_bad_peer(tracker.get_serverid()) self.log("%s got error during server selection: %s" % (tracker, res), level=log.UNUSUAL) self.homeless_shares |= shares_to_ask msg = ("last failure (from %s) was: %s" % (tracker, res)) self.last_failure_msg = msg else: for share in res.keys(): self.peer_selector.add_peer_with_share(tracker.get_serverid(), share) def _get_progress_message(self): if not self.homeless_shares: msg = "placed all %d shares, " % (self.total_shares) else: msg = ("placed %d shares out of %d total (%d homeless), " % (self.total_shares - len(self.homeless_shares), self.total_shares, len(self.homeless_shares))) assert self._query_stats.bad == (self._query_stats.full + self._query_stats.error) return ( msg + "want to place shares on at least {happy} servers such that " "any {needed} of them have enough shares to recover the file, " "sent {queries} queries to {servers} servers, " "{good} queries placed some shares, {bad} placed none " "(of which {full} placed none due to the server being" " full and {error} placed none due to an error)".format( happy=self.min_happiness, needed=self.needed_shares, queries=self._query_stats.total, servers=self._query_stats.contacted, good=self._query_stats.good, bad=self._query_stats.bad, full=self._query_stats.full, error=self._query_stats.error ) ) def _allocation_for(self, tracker): """ Given a ServerTracker, return a list of shares that we should store on that server. """ assert isinstance(tracker, ServerTracker) shares_to_ask = set() servermap = self._share_placements for shnum, tracker_id in list(servermap.items()): if tracker_id == None: continue if tracker.get_serverid() == tracker_id: shares_to_ask.add(shnum) if shnum in self.homeless_shares: self.homeless_shares.remove(shnum) if self._status: self._status.set_status("Contacting Servers [%r] (first query)," " %d shares left.." % (tracker.get_name(), len(self.homeless_shares))) return shares_to_ask def _buckets_allocated(self, res, tracker, shares_to_ask): """ Internal helper. If this returns an error or False, the server will be considered read-only for any future iterations. """ if isinstance(res, failure.Failure): # This is unusual, and probably indicates a bug or a network # problem. self.log("%s got error during server selection: %s" % (tracker, res), level=log.UNUSUAL) self._query_stats.error += 1 self._query_stats.bad += 1 self.homeless_shares |= shares_to_ask try: self.peer_selector.mark_readonly_peer(tracker.get_serverid()) except KeyError: pass return res else: (alreadygot, allocated) = res self.log("response to allocate_buckets() from server %r: alreadygot=%s, allocated=%s" % (tracker.get_name(), tuple(sorted(alreadygot)), tuple(sorted(allocated))), level=log.NOISY) progress = False for s in alreadygot: self.preexisting_shares.setdefault(s, set()).add(tracker.get_serverid()) if s in self.homeless_shares: self.homeless_shares.remove(s) progress = True elif s in shares_to_ask: progress = True # the ServerTracker will remember which shares were allocated on # that peer. We just have to remember to use them. if allocated: self.use_trackers.add(tracker) progress = True if allocated or alreadygot: self.serverids_with_shares.add(tracker.get_serverid()) not_yet_present = set(shares_to_ask) - set(alreadygot) still_homeless = not_yet_present - set(allocated) if still_homeless: # In networks with lots of space, this is very unusual and # probably indicates an error. In networks with servers that # are full, it is merely unusual. In networks that are very # full, it is common, and many uploads will fail. In most # cases, this is obviously not fatal, and we'll just use some # other servers. # some shares are still homeless, keep trying to find them a # home. The ones that were rejected get first priority. self.homeless_shares |= still_homeless # Since they were unable to accept all of our requests, so it # is safe to assume that asking them again won't help. if progress: # They accepted at least one of the shares that we asked # them to accept, or they had a share that we didn't ask # them to accept but that we hadn't placed yet, so this # was a productive query self._query_stats.good += 1 else: # if we asked for some allocations, but the server # didn't return any at all (i.e. empty dict) it must # be full self._query_stats.full += 1 self._query_stats.bad += 1 return progress def _failed(self, msg): """ I am called when server selection fails. I first abort all of the remote buckets that I allocated during my unsuccessful attempt to place shares for this file. I then raise an UploadUnhappinessError with my msg argument. """ for tracker in self.use_trackers: assert isinstance(tracker, ServerTracker) tracker.abort() raise UploadUnhappinessError(msg) @attr.s class _Accum(object): """ Accumulate up to some known amount of ciphertext. :ivar remaining: The number of bytes still expected. :ivar ciphertext: The bytes accumulated so far. """ remaining : int = attr.ib(validator=attr.validators.instance_of(int)) ciphertext : list[bytes] = attr.ib(default=attr.Factory(list)) def extend(self, size, # type: int ciphertext, # type: list[bytes] ): """ Accumulate some more ciphertext. :param size: The amount of data the new ciphertext represents towards the goal. This may be more than the actual size of the given ciphertext if the source has run out of data. :param ciphertext: The new ciphertext to accumulate. """ self.remaining -= size self.ciphertext.extend(ciphertext) @implementer(IEncryptedUploadable) class EncryptAnUploadable(object): """This is a wrapper that takes an IUploadable and provides IEncryptedUploadable.""" CHUNKSIZE = 50*1024 def __init__(self, original, log_parent=None, chunk_size=None): """ :param chunk_size: The number of bytes to read from the uploadable at a time, or None for some default. """ precondition(original.default_params_set, "set_default_encoding_parameters not called on %r before wrapping with EncryptAnUploadable" % (original,)) self.original = IUploadable(original) self._log_number = log_parent self._encryptor = None self._plaintext_hasher = plaintext_hasher() self._plaintext_segment_hasher = None self._plaintext_segment_hashes = [] self._encoding_parameters = None self._file_size = None self._ciphertext_bytes_read = 0 self._status = None if chunk_size is not None: self.CHUNKSIZE = chunk_size def set_upload_status(self, upload_status): self._status = IUploadStatus(upload_status) self.original.set_upload_status(upload_status) def log(self, *args, **kwargs): if "facility" not in kwargs: kwargs["facility"] = "upload.encryption" if "parent" not in kwargs: kwargs["parent"] = self._log_number return log.msg(*args, **kwargs) def get_size(self): if self._file_size is not None: return defer.succeed(self._file_size) d = self.original.get_size() def _got_size(size): self._file_size = size if self._status: self._status.set_size(size) return size d.addCallback(_got_size) return d def get_all_encoding_parameters(self): if self._encoding_parameters is not None: return defer.succeed(self._encoding_parameters) d = self.original.get_all_encoding_parameters() def _got(encoding_parameters): (k, happy, n, segsize) = encoding_parameters self._segment_size = segsize # used by segment hashers self._encoding_parameters = encoding_parameters self.log("my encoding parameters: %s" % (encoding_parameters,), level=log.NOISY) return encoding_parameters d.addCallback(_got) return d def _get_encryptor(self): if self._encryptor: return defer.succeed(self._encryptor) d = self.original.get_encryption_key() def _got(key): self._encryptor = aes.create_encryptor(key) storage_index = storage_index_hash(key) assert isinstance(storage_index, bytes) # There's no point to having the SI be longer than the key, so we # specify that it is truncated to the same 128 bits as the AES key. assert len(storage_index) == 16 # SHA-256 truncated to 128b self._storage_index = storage_index if self._status: self._status.set_storage_index(storage_index) return self._encryptor d.addCallback(_got) return d def get_storage_index(self): d = self._get_encryptor() d.addCallback(lambda res: self._storage_index) return d def _get_segment_hasher(self): p = self._plaintext_segment_hasher if p: left = self._segment_size - self._plaintext_segment_hashed_bytes return p, left p = plaintext_segment_hasher() self._plaintext_segment_hasher = p self._plaintext_segment_hashed_bytes = 0 return p, self._segment_size def _update_segment_hash(self, chunk): offset = 0 while offset < len(chunk): p, segment_left = self._get_segment_hasher() chunk_left = len(chunk) - offset this_segment = min(chunk_left, segment_left) p.update(chunk[offset:offset+this_segment]) self._plaintext_segment_hashed_bytes += this_segment if self._plaintext_segment_hashed_bytes == self._segment_size: # we've filled this segment self._plaintext_segment_hashes.append(p.digest()) self._plaintext_segment_hasher = None self.log("closed hash [%d]: %dB" % (len(self._plaintext_segment_hashes)-1, self._plaintext_segment_hashed_bytes), level=log.NOISY) self.log(format="plaintext leaf hash [%(segnum)d] is %(hash)s", segnum=len(self._plaintext_segment_hashes)-1, hash=base32.b2a(p.digest()), level=log.NOISY) offset += this_segment def read_encrypted(self, length, hash_only): # make sure our parameters have been set up first d = self.get_all_encoding_parameters() # and size d.addCallback(lambda ignored: self.get_size()) d.addCallback(lambda ignored: self._get_encryptor()) accum = _Accum(length) def action(): """ Read some bytes into the accumulator. """ return self._read_encrypted(accum, hash_only) def condition(): """ Check to see if the accumulator has all the data. """ return accum.remaining == 0 d.addCallback(lambda ignored: until(action, condition)) d.addCallback(lambda ignored: accum.ciphertext) return d def _read_encrypted(self, ciphertext_accum, # type: _Accum hash_only, # type: bool ): # type: (...) -> defer.Deferred """ Read the next chunk of plaintext, encrypt it, and extend the accumulator with the resulting ciphertext. """ # tolerate large length= values without consuming a lot of RAM by # reading just a chunk (say 50kB) at a time. This only really matters # when hash_only==True (i.e. resuming an interrupted upload), since # that's the case where we will be skipping over a lot of data. size = min(ciphertext_accum.remaining, self.CHUNKSIZE) # read a chunk of plaintext.. d = defer.maybeDeferred(self.original.read, size) def _good(plaintext): # and encrypt it.. # o/' over the fields we go, hashing all the way, sHA! sHA! sHA! o/' ct = self._hash_and_encrypt_plaintext(plaintext, hash_only) # Intentionally tell the accumulator about the expected size, not # the actual size. If we run out of data we still want remaining # to drop otherwise it will never reach 0 and the loop will never # end. ciphertext_accum.extend(size, ct) d.addCallback(_good) return d def _hash_and_encrypt_plaintext(self, data, hash_only): assert isinstance(data, (tuple, list)), type(data) data = list(data) cryptdata = [] # we use data.pop(0) instead of 'for chunk in data' to save # memory: each chunk is destroyed as soon as we're done with it. bytes_processed = 0 while data: chunk = data.pop(0) self.log(" read_encrypted handling %dB-sized chunk" % len(chunk), level=log.NOISY) bytes_processed += len(chunk) self._plaintext_hasher.update(chunk) self._update_segment_hash(chunk) # TODO: we have to encrypt the data (even if hash_only==True) # because the AES-CTR implementation doesn't offer a # way to change the counter value. Once it acquires # this ability, change this to simply update the counter # before each call to (hash_only==False) encrypt_data ciphertext = aes.encrypt_data(self._encryptor, chunk) if hash_only: self.log(" skipping encryption", level=log.NOISY) else: cryptdata.append(ciphertext) del ciphertext del chunk self._ciphertext_bytes_read += bytes_processed if self._status: progress = float(self._ciphertext_bytes_read) / self._file_size self._status.set_progress(1, progress) return cryptdata def get_plaintext_hashtree_leaves(self, first, last, num_segments): # this is currently unused, but will live again when we fix #453 if len(self._plaintext_segment_hashes) < num_segments: # close out the last one assert len(self._plaintext_segment_hashes) == num_segments-1 p, segment_left = self._get_segment_hasher() self._plaintext_segment_hashes.append(p.digest()) del self._plaintext_segment_hasher self.log("closing plaintext leaf hasher, hashed %d bytes" % self._plaintext_segment_hashed_bytes, level=log.NOISY) self.log(format="plaintext leaf hash [%(segnum)d] is %(hash)s", segnum=len(self._plaintext_segment_hashes)-1, hash=base32.b2a(p.digest()), level=log.NOISY) assert len(self._plaintext_segment_hashes) == num_segments return defer.succeed(tuple(self._plaintext_segment_hashes[first:last])) def get_plaintext_hash(self): h = self._plaintext_hasher.digest() return defer.succeed(h) def close(self): return self.original.close() @implementer(IUploadStatus) class UploadStatus(object): statusid_counter = itertools.count(0) def __init__(self): self.storage_index = None self.size = None self.helper = False self.status = "Not started" self.progress = [0.0, 0.0, 0.0] self.active = True self.results = None self.counter = next(self.statusid_counter) self.started = time.time() def get_started(self): return self.started def get_storage_index(self): return self.storage_index def get_size(self): return self.size def using_helper(self): return self.helper def get_status(self): return self.status def get_progress(self): return tuple(self.progress) def get_active(self): return self.active def get_results(self): return self.results def get_counter(self): return self.counter def set_storage_index(self, si): self.storage_index = si def set_size(self, size): self.size = size def set_helper(self, helper): self.helper = helper def set_status(self, status): self.status = status def set_progress(self, which, value): # [0]: chk, [1]: ciphertext, [2]: encode+push self.progress[which] = value def set_active(self, value): self.active = value def set_results(self, value): self.results = value class CHKUploader(object): def __init__(self, storage_broker, secret_holder, reactor=None): # server_selector needs storage_broker and secret_holder self._storage_broker = storage_broker self._secret_holder = secret_holder self._log_number = self.log("CHKUploader starting", parent=None) self._encoder = None self._storage_index = None self._upload_status = UploadStatus() self._upload_status.set_helper(False) self._upload_status.set_active(True) self._reactor = reactor # locate_all_shareholders() will create the following attribute: # self._server_trackers = {} # k: shnum, v: instance of ServerTracker def log(self, *args, **kwargs): if "parent" not in kwargs: kwargs["parent"] = self._log_number if "facility" not in kwargs: kwargs["facility"] = "tahoe.upload" return log.msg(*args, **kwargs) @log_call_deferred(action_type=u"immutable:upload:chk:start") def start(self, encrypted_uploadable): """Start uploading the file. Returns a Deferred that will fire with the UploadResults instance. """ self._started = time.time() eu = IEncryptedUploadable(encrypted_uploadable) self.log("starting upload of %s" % eu) eu.set_upload_status(self._upload_status) d = self.start_encrypted(eu) def _done(uploadresults): self._upload_status.set_active(False) return uploadresults d.addBoth(_done) return d def abort(self): """Call this if the upload must be abandoned before it completes. This will tell the shareholders to delete their partial shares. I return a Deferred that fires when these messages have been acked.""" if not self._encoder: # how did you call abort() before calling start() ? return defer.succeed(None) return self._encoder.abort() @log_call_deferred(action_type=u"immutable:upload:chk:start-encrypted") @inline_callbacks def start_encrypted(self, encrypted): """ Returns a Deferred that will fire with the UploadResults instance. """ eu = IEncryptedUploadable(encrypted) started = time.time() # would be Really Nice to make Encoder just a local; only # abort() really needs self._encoder ... self._encoder = encode.Encoder( self._log_number, self._upload_status, ) # this just returns itself yield self._encoder.set_encrypted_uploadable(eu) with LOCATE_ALL_SHAREHOLDERS() as action: (upload_trackers, already_serverids) = yield self.locate_all_shareholders(self._encoder, started) action.add_success_fields(upload_trackers=upload_trackers, already_serverids=already_serverids) self.set_shareholders(upload_trackers, already_serverids, self._encoder) verifycap = yield self._encoder.start() results = self._encrypted_done(verifycap) defer.returnValue(results) def locate_all_shareholders(self, encoder, started): server_selection_started = now = time.time() self._storage_index_elapsed = now - started storage_broker = self._storage_broker secret_holder = self._secret_holder storage_index = encoder.get_param("storage_index") self._storage_index = storage_index upload_id = si_b2a(storage_index)[:5] self.log("using storage index %r" % upload_id) server_selector = Tahoe2ServerSelector( upload_id, self._log_number, self._upload_status, reactor=self._reactor, ) share_size = encoder.get_param("share_size") block_size = encoder.get_param("block_size") num_segments = encoder.get_param("num_segments") k, desired, n = encoder.get_param("share_counts") self._server_selection_started = time.time() d = server_selector.get_shareholders(storage_broker, secret_holder, storage_index, share_size, block_size, num_segments, n, k, desired, encoder.get_uri_extension_size()) def _done(res): self._server_selection_elapsed = time.time() - server_selection_started return res d.addCallback(_done) return d def set_shareholders(self, upload_trackers, already_serverids, encoder): """ :param upload_trackers: a sequence of ServerTracker objects that have agreed to hold some shares for us (the shareids are stashed inside the ServerTracker) :param already_serverids: a dict mapping sharenum to a set of serverids for servers that claim to already have this share """ msgtempl = "set_shareholders; upload_trackers is %s, already_serverids is %s" values = ([', '.join([str_shareloc(k,v) for k,v in st.buckets.items()]) for st in upload_trackers], already_serverids) self.log(msgtempl % values, level=log.OPERATIONAL) # record already-present shares in self._results self._count_preexisting_shares = len(already_serverids) self._server_trackers = {} # k: shnum, v: instance of ServerTracker for tracker in upload_trackers: assert isinstance(tracker, ServerTracker) buckets = {} servermap = already_serverids.copy() for tracker in upload_trackers: buckets.update(tracker.buckets) for shnum in tracker.buckets: self._server_trackers[shnum] = tracker servermap.setdefault(shnum, set()).add(tracker.get_serverid()) assert len(buckets) == sum([len(tracker.buckets) for tracker in upload_trackers]), \ "%s (%s) != %s (%s)" % ( len(buckets), buckets, sum([len(tracker.buckets) for tracker in upload_trackers]), [(t.buckets, t.get_serverid()) for t in upload_trackers] ) encoder.set_shareholders(buckets, servermap) def _encrypted_done(self, verifycap): """ :return UploadResults: A description of the outcome of the upload. """ e = self._encoder sharemap = dictutil.DictOfSets() servermap = dictutil.DictOfSets() for shnum in e.get_shares_placed(): server = self._server_trackers[shnum].get_server() sharemap.add(shnum, server) servermap.add(server, shnum) now = time.time() timings = {} timings["total"] = now - self._started timings["storage_index"] = self._storage_index_elapsed timings["peer_selection"] = self._server_selection_elapsed timings.update(e.get_times()) ur = UploadResults(file_size=e.file_size, ciphertext_fetched=0, preexisting_shares=self._count_preexisting_shares, pushed_shares=len(e.get_shares_placed()), sharemap=sharemap, servermap=servermap, timings=timings, uri_extension_data=e.get_uri_extension_data(), uri_extension_hash=e.get_uri_extension_hash(), verifycapstr=verifycap.to_string()) self._upload_status.set_results(ur) return ur def get_upload_status(self): return self._upload_status def read_this_many_bytes(uploadable, size, prepend_data=None): if prepend_data is None: prepend_data = [] if size == 0: return defer.succeed([]) d = uploadable.read(size) def _got(data): assert isinstance(data, list) bytes = sum([len(piece) for piece in data]) assert bytes > 0 assert bytes <= size remaining = size - bytes if remaining: return read_this_many_bytes(uploadable, remaining, prepend_data + data) return prepend_data + data d.addCallback(_got) return d class LiteralUploader(object): def __init__(self): self._status = s = UploadStatus() s.set_storage_index(None) s.set_helper(False) s.set_progress(0, 1.0) s.set_active(False) def start(self, uploadable): uploadable = IUploadable(uploadable) d = uploadable.get_size() def _got_size(size): self._size = size self._status.set_size(size) return read_this_many_bytes(uploadable, size) d.addCallback(_got_size) d.addCallback(lambda data: uri.LiteralFileURI(b"".join(data))) d.addCallback(lambda u: u.to_string()) d.addCallback(self._build_results) return d def _build_results(self, uri): ur = UploadResults(file_size=self._size, ciphertext_fetched=0, preexisting_shares=0, pushed_shares=0, sharemap={}, servermap={}, timings={}, uri_extension_data=None, uri_extension_hash=None, verifycapstr=None) ur.set_uri(uri) self._status.set_status("Finished") self._status.set_progress(1, 1.0) self._status.set_progress(2, 1.0) self._status.set_results(ur) return ur def close(self): pass def get_upload_status(self): return self._status @implementer(RIEncryptedUploadable) class RemoteEncryptedUploadable(Referenceable): # type: ignore # warner/foolscap#78 def __init__(self, encrypted_uploadable, upload_status): self._eu = IEncryptedUploadable(encrypted_uploadable) self._offset = 0 self._bytes_sent = 0 self._status = IUploadStatus(upload_status) # we are responsible for updating the status string while we run, and # for setting the ciphertext-fetch progress. self._size = None def get_size(self): if self._size is not None: return defer.succeed(self._size) d = self._eu.get_size() def _got_size(size): self._size = size return size d.addCallback(_got_size) return d def remote_get_size(self): return self.get_size() def remote_get_all_encoding_parameters(self): return self._eu.get_all_encoding_parameters() def _read_encrypted(self, length, hash_only): d = self._eu.read_encrypted(length, hash_only) def _read(strings): if hash_only: self._offset += length else: size = sum([len(data) for data in strings]) self._offset += size return strings d.addCallback(_read) return d def remote_read_encrypted(self, offset, length): # we don't support seek backwards, but we allow skipping forwards precondition(offset >= 0, offset) precondition(length >= 0, length) lp = log.msg("remote_read_encrypted(%d-%d)" % (offset, offset+length), level=log.NOISY) precondition(offset >= self._offset, offset, self._offset) if offset > self._offset: # read the data from disk anyways, to build up the hash tree skip = offset - self._offset log.msg("remote_read_encrypted skipping ahead from %d to %d, skip=%d" % (self._offset, offset, skip), level=log.UNUSUAL, parent=lp) d = self._read_encrypted(skip, hash_only=True) else: d = defer.succeed(None) def _at_correct_offset(res): assert offset == self._offset, "%d != %d" % (offset, self._offset) return self._read_encrypted(length, hash_only=False) d.addCallback(_at_correct_offset) def _read(strings): size = sum([len(data) for data in strings]) self._bytes_sent += size return strings d.addCallback(_read) return d def remote_close(self): return self._eu.close() class AssistedUploader(object): def __init__(self, helper, storage_broker): self._helper = helper self._storage_broker = storage_broker self._log_number = log.msg("AssistedUploader starting") self._storage_index = None self._upload_status = s = UploadStatus() s.set_helper(True) s.set_active(True) def log(self, *args, **kwargs): if "parent" not in kwargs: kwargs["parent"] = self._log_number return log.msg(*args, **kwargs) def start(self, encrypted_uploadable, storage_index): """Start uploading the file. Returns a Deferred that will fire with the UploadResults instance. """ precondition(isinstance(storage_index, bytes), storage_index) self._started = time.time() eu = IEncryptedUploadable(encrypted_uploadable) eu.set_upload_status(self._upload_status) self._encuploadable = eu self._storage_index = storage_index d = eu.get_size() d.addCallback(self._got_size) d.addCallback(lambda res: eu.get_all_encoding_parameters()) d.addCallback(self._got_all_encoding_parameters) d.addCallback(self._contact_helper) d.addCallback(self._build_verifycap) def _done(res): self._upload_status.set_active(False) return res d.addBoth(_done) return d def _got_size(self, size): self._size = size self._upload_status.set_size(size) def _got_all_encoding_parameters(self, params): k, happy, n, segment_size = params # stash these for URI generation later self._needed_shares = k self._total_shares = n self._segment_size = segment_size def _contact_helper(self, res): now = self._time_contacting_helper_start = time.time() self._storage_index_elapsed = now - self._started self.log(format="contacting helper for SI %(si)s..", si=si_b2a(self._storage_index), level=log.NOISY) self._upload_status.set_status("Contacting Helper") d = self._helper.callRemote("upload_chk", self._storage_index) d.addCallback(self._contacted_helper) return d def _contacted_helper(self, helper_upload_results_and_upload_helper): (helper_upload_results, upload_helper) = helper_upload_results_and_upload_helper now = time.time() elapsed = now - self._time_contacting_helper_start self._elapsed_time_contacting_helper = elapsed if upload_helper: self.log("helper says we need to upload", level=log.NOISY) self._upload_status.set_status("Uploading Ciphertext") # we need to upload the file reu = RemoteEncryptedUploadable(self._encuploadable, self._upload_status) # let it pre-compute the size for progress purposes d = reu.get_size() d.addCallback(lambda ignored: upload_helper.callRemote("upload", reu)) # this Deferred will fire with the upload results return d self.log("helper says file is already uploaded", level=log.OPERATIONAL) self._upload_status.set_progress(1, 1.0) return helper_upload_results def _convert_old_upload_results(self, upload_results): # pre-1.3.0 helpers return upload results which contain a mapping # from shnum to a single human-readable string, containing things # like "Found on [x],[y],[z]" (for healthy files that were already in # the grid), "Found on [x]" (for files that needed upload but which # discovered pre-existing shares), and "Placed on [x]" (for newly # uploaded shares). The 1.3.0 helper returns a mapping from shnum to # set of binary serverid strings. # the old results are too hard to deal with (they don't even contain # as much information as the new results, since the nodeids are # abbreviated), so if we detect old results, just clobber them. sharemap = upload_results.sharemap if any(isinstance(v, (bytes, str)) for v in sharemap.values()): upload_results.sharemap = None def _build_verifycap(self, helper_upload_results): self.log("upload finished, building readcap", level=log.OPERATIONAL) self._convert_old_upload_results(helper_upload_results) self._upload_status.set_status("Building Readcap") hur = helper_upload_results assert hur.uri_extension_data["needed_shares"] == self._needed_shares assert hur.uri_extension_data["total_shares"] == self._total_shares assert hur.uri_extension_data["segment_size"] == self._segment_size assert hur.uri_extension_data["size"] == self._size # hur.verifycap doesn't exist if already found v = uri.CHKFileVerifierURI(self._storage_index, uri_extension_hash=hur.uri_extension_hash, needed_shares=self._needed_shares, total_shares=self._total_shares, size=self._size) timings = {} timings["storage_index"] = self._storage_index_elapsed timings["contacting_helper"] = self._elapsed_time_contacting_helper for key,val in hur.timings.items(): if key == "total": key = "helper_total" timings[key] = val now = time.time() timings["total"] = now - self._started # Note: older Helpers (<=1.11) sent tubids as serverids. Newer ones # send pubkeys. get_stub_server() knows how to map both into # IDisplayableServer instances. gss = self._storage_broker.get_stub_server sharemap = {} servermap = {} for shnum, serverids in hur.sharemap.items(): sharemap[shnum] = set([gss(serverid) for serverid in serverids]) # if the file was already in the grid, hur.servermap is an empty dict for serverid, shnums in hur.servermap.items(): servermap[gss(serverid)] = set(shnums) ur = UploadResults(file_size=self._size, # not if already found ciphertext_fetched=hur.ciphertext_fetched, preexisting_shares=hur.preexisting_shares, pushed_shares=hur.pushed_shares, sharemap=sharemap, servermap=servermap, timings=timings, uri_extension_data=hur.uri_extension_data, uri_extension_hash=hur.uri_extension_hash, verifycapstr=v.to_string()) self._upload_status.set_status("Finished") self._upload_status.set_results(ur) return ur def get_upload_status(self): return self._upload_status class BaseUploadable(object): # this is overridden by max_segment_size default_max_segment_size = DEFAULT_IMMUTABLE_MAX_SEGMENT_SIZE default_params_set = False max_segment_size = None encoding_param_k = None encoding_param_happy = None encoding_param_n = None _all_encoding_parameters = None _status = None def set_upload_status(self, upload_status): self._status = IUploadStatus(upload_status) def set_default_encoding_parameters(self, default_params): assert isinstance(default_params, dict) for k,v in default_params.items(): precondition(isinstance(k, (bytes, str)), k, v) precondition(isinstance(v, int), k, v) if "k" in default_params: self.default_encoding_param_k = default_params["k"] if "happy" in default_params: self.default_encoding_param_happy = default_params["happy"] if "n" in default_params: self.default_encoding_param_n = default_params["n"] if "max_segment_size" in default_params: self.default_max_segment_size = default_params["max_segment_size"] self.default_params_set = True def get_all_encoding_parameters(self): _assert(self.default_params_set, "set_default_encoding_parameters not called on %r" % (self,)) if self._all_encoding_parameters: return defer.succeed(self._all_encoding_parameters) max_segsize = self.max_segment_size or self.default_max_segment_size k = self.encoding_param_k or self.default_encoding_param_k happy = self.encoding_param_happy or self.default_encoding_param_happy n = self.encoding_param_n or self.default_encoding_param_n d = self.get_size() def _got_size(file_size): # for small files, shrink the segment size to avoid wasting space segsize = min(max_segsize, file_size) # this must be a multiple of 'required_shares'==k segsize = mathutil.next_multiple(segsize, k) encoding_parameters = (k, happy, n, segsize) self._all_encoding_parameters = encoding_parameters return encoding_parameters d.addCallback(_got_size) return d @implementer(IUploadable) class FileHandle(BaseUploadable): def __init__(self, filehandle, convergence): """ Upload the data from the filehandle. If convergence is None then a random encryption key will be used, else the plaintext will be hashed, then the hash will be hashed together with the string in the "convergence" argument to form the encryption key. """ assert convergence is None or isinstance(convergence, bytes), (convergence, type(convergence)) self._filehandle = filehandle self._key = None self.convergence = convergence self._size = None def _get_encryption_key_convergent(self): if self._key is not None: return defer.succeed(self._key) d = self.get_size() # that sets self._size as a side-effect d.addCallback(lambda size: self.get_all_encoding_parameters()) def _got(params): k, happy, n, segsize = params f = self._filehandle enckey_hasher = convergence_hasher(k, n, segsize, self.convergence) f.seek(0) BLOCKSIZE = 64*1024 bytes_read = 0 while True: data = f.read(BLOCKSIZE) if not data: break enckey_hasher.update(data) # TODO: setting progress in a non-yielding loop is kind of # pointless, but I'm anticipating (perhaps prematurely) the # day when we use a slowjob or twisted's CooperatorService to # make this yield time to other jobs. bytes_read += len(data) if self._status: self._status.set_progress(0, float(bytes_read)/self._size) f.seek(0) self._key = enckey_hasher.digest() if self._status: self._status.set_progress(0, 1.0) assert len(self._key) == 16 return self._key d.addCallback(_got) return d def _get_encryption_key_random(self): if self._key is None: self._key = os.urandom(16) return defer.succeed(self._key) def get_encryption_key(self): if self.convergence is not None: return self._get_encryption_key_convergent() else: return self._get_encryption_key_random() def get_size(self): if self._size is not None: return defer.succeed(self._size) self._filehandle.seek(0, os.SEEK_END) size = self._filehandle.tell() self._size = size self._filehandle.seek(0) return defer.succeed(size) def read(self, length): return defer.succeed([self._filehandle.read(length)]) def close(self): # the originator of the filehandle reserves the right to close it pass class FileName(FileHandle): def __init__(self, filename, convergence): """ Upload the data from the filename. If convergence is None then a random encryption key will be used, else the plaintext will be hashed, then the hash will be hashed together with the string in the "convergence" argument to form the encryption key. """ assert convergence is None or isinstance(convergence, bytes), (convergence, type(convergence)) FileHandle.__init__(self, open(filename, "rb"), convergence=convergence) def close(self): FileHandle.close(self) self._filehandle.close() class Data(FileHandle): def __init__(self, data, convergence): """ Upload the data from the data argument. If convergence is None then a random encryption key will be used, else the plaintext will be hashed, then the hash will be hashed together with the string in the "convergence" argument to form the encryption key. """ assert convergence is None or isinstance(convergence, bytes), (convergence, type(convergence)) FileHandle.__init__(self, BytesIO(data), convergence=convergence) @implementer(IUploader) class Uploader(service.MultiService, log.PrefixingLogMixin): """I am a service that allows file uploading. I am a service-child of the Client. """ # The type in Twisted for services is wrong in 22.10... # https://github.com/twisted/twisted/issues/10135 name = "uploader" # type: ignore[assignment] URI_LIT_SIZE_THRESHOLD = 55 def __init__(self, helper_furl=None, stats_provider=None, history=None): self._helper_furl = helper_furl self.stats_provider = stats_provider self._history = history self._helper = None self._all_uploads = weakref.WeakKeyDictionary() # for debugging log.PrefixingLogMixin.__init__(self, facility="tahoe.immutable.upload") service.MultiService.__init__(self) def startService(self): service.MultiService.startService(self) if self._helper_furl: self.parent.tub.connectTo(ensure_str(self._helper_furl), self._got_helper) def _got_helper(self, helper): self.log("got helper connection, getting versions") default = { b"http://allmydata.org/tahoe/protocols/helper/v1" : { }, b"application-version": b"unknown: no get_version()", } d = add_version_to_remote_reference(helper, default) d.addCallback(self._got_versioned_helper) def _got_versioned_helper(self, helper): needed = b"http://allmydata.org/tahoe/protocols/helper/v1" if needed not in helper.version: raise InsufficientVersionError(needed, helper.version) self._helper = helper helper.notifyOnDisconnect(self._lost_helper) def _lost_helper(self): self._helper = None def get_helper_info(self): # return a tuple of (helper_furl_or_None, connected_bool) return (self._helper_furl, bool(self._helper)) def upload(self, uploadable, reactor=None): """ Returns a Deferred that will fire with the UploadResults instance. """ assert self.parent assert self.running uploadable = IUploadable(uploadable) d = uploadable.get_size() def _got_size(size): default_params = self.parent.get_encoding_parameters() precondition(isinstance(default_params, dict), default_params) precondition("max_segment_size" in default_params, default_params) uploadable.set_default_encoding_parameters(default_params) if self.stats_provider: self.stats_provider.count('uploader.files_uploaded', 1) self.stats_provider.count('uploader.bytes_uploaded', size) if size <= self.URI_LIT_SIZE_THRESHOLD: uploader = LiteralUploader() return uploader.start(uploadable) else: eu = EncryptAnUploadable(uploadable, self._parentmsgid) d2 = defer.succeed(None) storage_broker = self.parent.get_storage_broker() if self._helper: uploader = AssistedUploader(self._helper, storage_broker) d2.addCallback(lambda x: eu.get_storage_index()) d2.addCallback(lambda si: uploader.start(eu, si)) else: storage_broker = self.parent.get_storage_broker() secret_holder = self.parent._secret_holder uploader = CHKUploader(storage_broker, secret_holder, reactor=reactor) d2.addCallback(lambda x: uploader.start(eu)) self._all_uploads[uploader] = None if self._history: self._history.add_upload(uploader.get_upload_status()) def turn_verifycap_into_read_cap(uploadresults): # Generate the uri from the verifycap plus the key. d3 = uploadable.get_encryption_key() def put_readcap_into_results(key): v = uri.from_string(uploadresults.get_verifycapstr()) r = uri.CHKFileURI(key, v.uri_extension_hash, v.needed_shares, v.total_shares, v.size) uploadresults.set_uri(r.to_string()) return uploadresults d3.addCallback(put_readcap_into_results) return d3 d2.addCallback(turn_verifycap_into_read_cap) return d2 d.addCallback(_got_size) def _done(res): uploadable.close() return res d.addBoth(_done) return d tahoe_lafs-1.20.0/src/allmydata/immutable/downloader/__init__.py0000644000000000000000000000003413615410400021601 0ustar00""" Ported to Python 3. """ tahoe_lafs-1.20.0/src/allmydata/immutable/downloader/common.py0000644000000000000000000000050513615410400021335 0ustar00""" Ported to Python 3. """ (AVAILABLE, PENDING, OVERDUE, COMPLETE, CORRUPT, DEAD, BADSEGNUM) = \ ("AVAILABLE", "PENDING", "OVERDUE", "COMPLETE", "CORRUPT", "DEAD", "BADSEGNUM") class BadSegmentNumberError(Exception): pass class WrongSegmentError(Exception): pass class BadCiphertextHashError(Exception): pass tahoe_lafs-1.20.0/src/allmydata/immutable/downloader/fetcher.py0000644000000000000000000002736513615410400021502 0ustar00""" Ported to Python 3. """ from twisted.python.failure import Failure from foolscap.api import eventually from allmydata.interfaces import NotEnoughSharesError, NoSharesError from allmydata.util import log from allmydata.util.dictutil import DictOfSets from .common import OVERDUE, COMPLETE, CORRUPT, DEAD, BADSEGNUM, \ BadSegmentNumberError class SegmentFetcher(object): """I am responsible for acquiring blocks for a single segment. I will use the Share instances passed to my add_shares() method to locate, retrieve, and validate those blocks. I expect my parent node to call my no_more_shares() method when there are no more shares available. I will call my parent's want_more_shares() method when I want more: I expect to see at least one call to add_shares or no_more_shares afterwards. When I have enough validated blocks, I will call my parent's process_blocks() method with a dictionary that maps shnum to blockdata. If I am unable to provide enough blocks, I will call my parent's fetch_failed() method with (self, f). After either of these events, I will shut down and do no further work. My parent can also call my stop() method to have me shut down early.""" def __init__(self, node, segnum, k, logparent): self._node = node # _Node self.segnum = segnum self._k = k self._shares = [] # unused Share instances, sorted by "goodness" # (RTT), then shnum. This is populated when DYHB # responses arrive, or (for later segments) at # startup. We remove shares from it when we call # sh.get_block() on them. self._shares_from_server = DictOfSets() # maps server to set of # Shares on that server for # which we have outstanding # get_block() calls. self._max_shares_per_server = 1 # how many Shares we're allowed to # pull from each server. This starts # at 1 and grows if we don't have # sufficient diversity. self._active_share_map = {} # maps shnum to outstanding (and not # OVERDUE) Share that provides it. self._overdue_share_map = DictOfSets() # shares in the OVERDUE state self._lp = logparent self._share_observers = {} # maps Share to EventStreamObserver for # active ones self._blocks = {} # maps shnum to validated block data self._no_more_shares = False self._last_failure = None self._running = True def stop(self): if self._running: log.msg("SegmentFetcher(%r).stop" % self._node._si_prefix, level=log.NOISY, parent=self._lp, umid="LWyqpg") self._cancel_all_requests() self._running = False # help GC ??? del self._shares, self._shares_from_server, self._active_share_map del self._share_observers # called by our parent _Node def add_shares(self, shares): # called when ShareFinder locates a new share, and when a non-initial # segment fetch is started and we already know about shares from the # previous segment self._shares.extend(shares) self._shares.sort(key=lambda s: (s._dyhb_rtt, s._shnum) ) eventually(self.loop) def no_more_shares(self): # ShareFinder tells us it's reached the end of its list self._no_more_shares = True eventually(self.loop) # internal methods def loop(self): try: # if any exception occurs here, kill the download self._do_loop() except BaseException: self._node.fetch_failed(self, Failure()) raise def _do_loop(self): k = self._k if not self._running: return numsegs, authoritative = self._node.get_num_segments() if authoritative and self.segnum >= numsegs: # oops, we were asking for a segment number beyond the end of the # file. This is an error. self.stop() e = BadSegmentNumberError("segnum=%d, numsegs=%d" % (self.segnum, self._node.num_segments)) f = Failure(e) self._node.fetch_failed(self, f) return #print("LOOP", self._blocks.keys(), "active:", self._active_share_map, "overdue:", self._overdue_share_map, "unused:", self._shares) # Should we sent out more requests? while len(set(self._blocks.keys()) | set(self._active_share_map.keys()) ) < k: # we don't have data or active requests for enough shares. Are # there any unused shares we can start using? (sent_something, want_more_diversity) = self._find_and_use_share() if sent_something: # great. loop back around in case we need to send more. continue if want_more_diversity: # we could have sent something if we'd been allowed to pull # more shares per server. Increase the limit and try again. self._max_shares_per_server += 1 log.msg("SegmentFetcher(%r) increasing diversity limit to %d" % (self._node._si_prefix, self._max_shares_per_server), level=log.NOISY, umid="xY2pBA") # Also ask for more shares, in the hopes of achieving better # diversity for the next segment. self._ask_for_more_shares() continue # we need more shares than the ones in self._shares to make # progress self._ask_for_more_shares() if self._no_more_shares: # But there are no more shares to be had. If we're going to # succeed, it will be with the shares we've already seen. # Will they be enough? if len(set(self._blocks.keys()) | set(self._active_share_map.keys()) | set(self._overdue_share_map.keys()) ) < k: # nope. bail. self._no_shares_error() # this calls self.stop() return # our outstanding or overdue requests may yet work. # more shares may be coming. Wait until then. return # are we done? if len(set(self._blocks.keys())) >= k: # yay! self.stop() self._node.process_blocks(self.segnum, self._blocks) return def _no_shares_error(self): if not (self._shares or self._active_share_map or self._overdue_share_map or self._blocks): format = ("no shares (need %(k)d)." " Last failure: %(last_failure)s") args = { "k": self._k, "last_failure": self._last_failure } error = NoSharesError else: format = ("ran out of shares: complete=%(complete)s" " pending=%(pending)s overdue=%(overdue)s" " unused=%(unused)s need %(k)d." " Last failure: %(last_failure)s") def join(shnums): return ",".join(["sh%d" % shnum for shnum in sorted(shnums)]) pending_s = ",".join([str(sh) for sh in self._active_share_map.values()]) overdue = set() for shares in self._overdue_share_map.values(): overdue |= shares overdue_s = ",".join([str(sh) for sh in overdue]) args = {"complete": join(self._blocks.keys()), "pending": pending_s, "overdue": overdue_s, # 'unused' should be zero "unused": ",".join([str(sh) for sh in self._shares]), "k": self._k, "last_failure": self._last_failure, } error = NotEnoughSharesError log.msg(format=format, level=log.UNUSUAL, parent=self._lp, umid="1DsnTg", **args) e = error(format % args) f = Failure(e) self.stop() self._node.fetch_failed(self, f) def _find_and_use_share(self): sent_something = False want_more_diversity = False for sh in self._shares: # find one good share to fetch shnum = sh._shnum ; server = sh._server # XXX if shnum in self._blocks: continue # don't request data we already have if shnum in self._active_share_map: # note: OVERDUE shares are removed from _active_share_map # and added to _overdue_share_map instead. continue # don't send redundant requests sfs = self._shares_from_server if len(sfs.get(server,set())) >= self._max_shares_per_server: # don't pull too much from a single server want_more_diversity = True continue # ok, we can use this share self._shares.remove(sh) self._active_share_map[shnum] = sh self._shares_from_server.add(server, sh) self._start_share(sh, shnum) sent_something = True break return (sent_something, want_more_diversity) def _start_share(self, share, shnum): self._share_observers[share] = o = share.get_block(self.segnum) o.subscribe(self._block_request_activity, share=share, shnum=shnum) def _ask_for_more_shares(self): if not self._no_more_shares: self._node.want_more_shares() # that will trigger the ShareFinder to keep looking, and call our # add_shares() or no_more_shares() later. def _cancel_all_requests(self): for o in list(self._share_observers.values()): o.cancel() self._share_observers = {} def _block_request_activity(self, share, shnum, state, block=None, f=None): # called by Shares, in response to our s.send_request() calls. if not self._running: return log.msg("SegmentFetcher(%r)._block_request_activity: %s -> %r" % (self._node._si_prefix, repr(share), state), level=log.NOISY, parent=self._lp, umid="vilNWA") # COMPLETE, CORRUPT, DEAD, BADSEGNUM are terminal. Remove the share # from all our tracking lists. if state in (COMPLETE, CORRUPT, DEAD, BADSEGNUM): self._share_observers.pop(share, None) server = share._server # XXX self._shares_from_server.discard(server, share) if self._active_share_map.get(shnum) is share: del self._active_share_map[shnum] self._overdue_share_map.discard(shnum, share) if state is COMPLETE: # 'block' is fully validated and complete self._blocks[shnum] = block if state is OVERDUE: # no longer active, but still might complete del self._active_share_map[shnum] self._overdue_share_map.add(shnum, share) # OVERDUE is not terminal: it will eventually transition to # COMPLETE, CORRUPT, or DEAD. if state is DEAD: self._last_failure = f if state is BADSEGNUM: # our main loop will ask the DownloadNode each time for the # number of segments, so we'll deal with this in the top of # _do_loop pass eventually(self.loop) tahoe_lafs-1.20.0/src/allmydata/immutable/downloader/finder.py0000644000000000000000000002212313615410400021314 0ustar00""" Ported to Python 3. """ from six import ensure_str import time now = time.time from foolscap.api import eventually from allmydata.util import base32, log from twisted.internet import reactor from .share import Share, CommonShare def incidentally(res, f, *args, **kwargs): """Add me to a Deferred chain like this: d.addBoth(incidentally, func, arg) and I'll behave as if you'd added the following function: def _(res): func(arg) return res This is useful if you want to execute an expression when the Deferred fires, but don't care about its value. """ f(*args, **kwargs) return res class RequestToken(object): def __init__(self, server): self.server = server class ShareFinder(object): OVERDUE_TIMEOUT = 10.0 def __init__(self, storage_broker, verifycap, node, download_status, logparent=None, max_outstanding_requests=10): self.running = True # stopped by Share.stop, from Terminator self.verifycap = verifycap self._started = False self._storage_broker = storage_broker self.share_consumer = self.node = node self.max_outstanding_requests = max_outstanding_requests self._hungry = False self._commonshares = {} # shnum to CommonShare instance self.pending_requests = set() self.overdue_requests = set() # subset of pending_requests self.overdue_timers = {} self._storage_index = verifycap.storage_index self._si_prefix = base32.b2a(self._storage_index[:8])[:12] self._node_logparent = logparent self._download_status = download_status self._lp = log.msg(format="ShareFinder[si=%(si)s] starting", si=self._si_prefix, level=log.NOISY, parent=logparent, umid="2xjj2A") def update_num_segments(self): (numsegs, authoritative) = self.node.get_num_segments() assert authoritative for cs in self._commonshares.values(): cs.set_authoritative_num_segments(numsegs) def start_finding_servers(self): # don't get servers until somebody uses us: creating the # ImmutableFileNode should not cause work to happen yet. Test case is # test_dirnode, which creates us with storage_broker=None if not self._started: si = self.verifycap.storage_index servers = self._storage_broker.get_servers_for_psi(si) self._servers = iter(servers) self._started = True def log(self, *args, **kwargs): if "parent" not in kwargs: kwargs["parent"] = self._lp return log.msg(*args, **kwargs) def stop(self): self.running = False while self.overdue_timers: req,t = self.overdue_timers.popitem() t.cancel() # called by our parent CiphertextDownloader def hungry(self): self.log(format="ShareFinder[si=%(si)s] hungry", si=self._si_prefix, level=log.NOISY, umid="NywYaQ") self.start_finding_servers() self._hungry = True eventually(self.loop) # internal methods def loop(self): pending_s = ",".join([ensure_str(rt.server.get_name()) for rt in self.pending_requests]) # sort? self.log(format="ShareFinder loop: running=%(running)s" " hungry=%(hungry)s, pending=%(pending)s", running=self.running, hungry=self._hungry, pending=pending_s, level=log.NOISY, umid="kRtS4Q") if not self.running: return if not self._hungry: return non_overdue = self.pending_requests - self.overdue_requests if len(non_overdue) >= self.max_outstanding_requests: # cannot send more requests, must wait for some to retire return server = None try: if self._servers: server = next(self._servers) except StopIteration: self._servers = None if server: self.send_request(server) # we loop again to get parallel queries. The check above will # prevent us from looping forever. eventually(self.loop) return if self.pending_requests: # no server, but there are still requests in flight: maybe one of # them will make progress return self.log(format="ShareFinder.loop: no_more_shares, ever", level=log.UNUSUAL, umid="XjQlzg") # we've run out of servers (so we can't send any more requests), and # we have nothing in flight. No further progress can be made. They # are destined to remain hungry. eventually(self.share_consumer.no_more_shares) def send_request(self, server): req = RequestToken(server) self.pending_requests.add(req) lp = self.log(format="sending DYHB to [%(name)s]", name=server.get_name(), level=log.NOISY, umid="Io7pyg") time_sent = now() d_ev = self._download_status.add_dyhb_request(server, time_sent) # TODO: get the timer from a Server object, it knows best self.overdue_timers[req] = reactor.callLater(self.OVERDUE_TIMEOUT, self.overdue, req) d = server.get_storage_server().get_buckets(self._storage_index) d.addBoth(incidentally, self._request_retired, req) d.addCallbacks(self._got_response, self._got_error, callbackArgs=(server, req, d_ev, time_sent, lp), errbackArgs=(server, req, d_ev, lp)) d.addErrback(log.err, format="error in send_request", level=log.WEIRD, parent=lp, umid="rpdV0w") d.addCallback(incidentally, eventually, self.loop) def _request_retired(self, req): self.pending_requests.discard(req) self.overdue_requests.discard(req) if req in self.overdue_timers: self.overdue_timers[req].cancel() del self.overdue_timers[req] def overdue(self, req): del self.overdue_timers[req] assert req in self.pending_requests # paranoia, should never be false self.overdue_requests.add(req) eventually(self.loop) def _got_response(self, buckets, server, req, d_ev, time_sent, lp): shnums = sorted([shnum for shnum in buckets]) time_received = now() d_ev.finished(shnums, time_received) dyhb_rtt = time_received - time_sent if not buckets: self.log(format="no shares from [%(name)s]", name=server.get_name(), level=log.NOISY, parent=lp, umid="U7d4JA") return shnums_s = ",".join([str(shnum) for shnum in shnums]) self.log(format="got shnums [%(shnums)s] from [%(name)s]", shnums=shnums_s, name=server.get_name(), level=log.NOISY, parent=lp, umid="0fcEZw") shares = [] for shnum, bucket in buckets.items(): s = self._create_share(shnum, bucket, server, dyhb_rtt) shares.append(s) self._deliver_shares(shares) def _create_share(self, shnum, bucket, server, dyhb_rtt): if shnum in self._commonshares: cs = self._commonshares[shnum] else: numsegs, authoritative = self.node.get_num_segments() cs = CommonShare(numsegs, self._si_prefix, shnum, self._node_logparent) if authoritative: cs.set_authoritative_num_segments(numsegs) # Share._get_satisfaction is responsible for updating # CommonShare.set_numsegs after we know the UEB. Alternatives: # 1: d = self.node.get_num_segments() # d.addCallback(cs.got_numsegs) # the problem is that the OneShotObserverList I was using # inserts an eventual-send between _get_satisfaction's # _satisfy_UEB and _satisfy_block_hash_tree, and the # CommonShare didn't get the num_segs message before # being asked to set block hash values. To resolve this # would require an immediate ObserverList instead of # an eventual-send -based one # 2: break _get_satisfaction into Deferred-attached pieces. # Yuck. self._commonshares[shnum] = cs s = Share(bucket, server, self.verifycap, cs, self.node, self._download_status, shnum, dyhb_rtt, self._node_logparent) return s def _deliver_shares(self, shares): # they will call hungry() again if they want more self._hungry = False shares_s = ",".join([str(sh) for sh in shares]) self.log(format="delivering shares: %s" % shares_s, level=log.NOISY, umid="2n1qQw") eventually(self.share_consumer.got_shares, shares) def _got_error(self, f, server, req, d_ev, lp): d_ev.error(now()) self.log(format="got error from [%(name)s]", name=server.get_name(), failure=f, level=log.UNUSUAL, parent=lp, umid="zUKdCw") tahoe_lafs-1.20.0/src/allmydata/immutable/downloader/node.py0000644000000000000000000005730713615410400021006 0ustar00""" Ported to Python 3. """ import time now = time.time from zope.interface import Interface from twisted.python.failure import Failure from twisted.internet import defer from foolscap.api import eventually from allmydata import uri from allmydata.codec import CRSDecoder from allmydata.util import base32, log, hashutil, mathutil, observer from allmydata.interfaces import DEFAULT_IMMUTABLE_MAX_SEGMENT_SIZE from allmydata.hashtree import IncompleteHashTree, BadHashError, \ NotEnoughHashesError # local imports from .finder import ShareFinder from .fetcher import SegmentFetcher from .segmentation import Segmentation from .common import BadCiphertextHashError class IDownloadStatusHandlingConsumer(Interface): def set_download_status_read_event(read_ev): """Record the DownloadStatus 'read event', to be updated with the time it takes to decrypt each chunk of data.""" class Cancel(object): def __init__(self, f): self._f = f self.active = True def cancel(self): if self.active: self.active = False self._f(self) class DownloadNode(object): """Internal class which manages downloads and holds state. External callers use CiphertextFileNode instead.""" default_max_segment_size = DEFAULT_IMMUTABLE_MAX_SEGMENT_SIZE # Share._node points to me def __init__(self, verifycap, storage_broker, secret_holder, terminator, history, download_status): assert isinstance(verifycap, uri.CHKFileVerifierURI) self._verifycap = verifycap self._storage_broker = storage_broker self._si_prefix = base32.b2a(verifycap.storage_index[:8])[:12] self.running = True if terminator: terminator.register(self) # calls self.stop() at stopService() # the rules are: # 1: Only send network requests if you're active (self.running is True) # 2: Use TimerService, not reactor.callLater # 3: You can do eventual-sends any time. # These rules should mean that once # stopService()+flushEventualQueue() fires, everything will be done. self._secret_holder = secret_holder self._history = history self._download_status = download_status self.share_hash_tree = IncompleteHashTree(self._verifycap.total_shares) # we guess the segment size, so Segmentation can pull non-initial # segments in a single roundtrip. This populates # .guessed_segment_size, .guessed_num_segments, and # .ciphertext_hash_tree (with a dummy, to let us guess which hashes # we'll need) self._build_guessed_tables(self.default_max_segment_size) # filled in when we parse a valid UEB self.have_UEB = False self.segment_size = None self.tail_segment_size = None self.tail_segment_padded = None self.num_segments = None self.block_size = None self.tail_block_size = None # things to track callers that want data # _segment_requests can have duplicates self._segment_requests = [] # (segnum, d, cancel_handle, seg_ev, lp) self._active_segment = None # a SegmentFetcher, with .segnum self._segsize_observers = observer.OneShotObserverList() # we create one top-level logparent for this _Node, and another one # for each read() call. Segmentation and get_segment() messages are # associated with the read() call, everything else is tied to the # _Node's log entry. lp = log.msg(format="Immutable.DownloadNode(%(si)s) created:" " size=%(size)d," " guessed_segsize=%(guessed_segsize)d," " guessed_numsegs=%(guessed_numsegs)d", si=self._si_prefix, size=verifycap.size, guessed_segsize=self.guessed_segment_size, guessed_numsegs=self.guessed_num_segments, level=log.OPERATIONAL, umid="uJ0zAQ") self._lp = lp self._sharefinder = ShareFinder(storage_broker, verifycap, self, self._download_status, lp) self._shares = set() def _build_guessed_tables(self, max_segment_size): size = min(self._verifycap.size, max_segment_size) s = mathutil.next_multiple(size, self._verifycap.needed_shares) self.guessed_segment_size = s r = self._calculate_sizes(self.guessed_segment_size) self.guessed_num_segments = r["num_segments"] # as with CommonShare, our ciphertext_hash_tree is a stub until we # get the real num_segments self.ciphertext_hash_tree = IncompleteHashTree(self.guessed_num_segments) self.ciphertext_hash_tree_leaves = self.guessed_num_segments def __repr__(self): return "ImmutableDownloadNode(%r)" % (self._si_prefix,) def stop(self): # called by the Terminator at shutdown, mostly for tests if self._active_segment: seg, self._active_segment = self._active_segment, None seg.stop() self._sharefinder.stop() # things called by outside callers, via CiphertextFileNode. get_segment() # may also be called by Segmentation. def read(self, consumer, offset, size): """I am the main entry point, from which FileNode.read() can get data. I feed the consumer with the desired range of ciphertext. I return a Deferred that fires (with the consumer) when the read is finished. Note that there is no notion of a 'file pointer': each call to read() uses an independent offset= value. """ # for concurrent operations: each gets its own Segmentation manager if size is None: size = self._verifycap.size # ignore overruns: clip size so offset+size does not go past EOF, and # so size is not negative (which indicates that offset >= EOF) size = max(0, min(size, self._verifycap.size-offset)) read_ev = self._download_status.add_read_event(offset, size, now()) if IDownloadStatusHandlingConsumer.providedBy(consumer): consumer.set_download_status_read_event(read_ev) consumer.set_download_status(self._download_status) lp = log.msg(format="imm Node(%(si)s).read(%(offset)d, %(size)d)", si=base32.b2a(self._verifycap.storage_index)[:8], offset=offset, size=size, level=log.OPERATIONAL, parent=self._lp, umid="l3j3Ww") if self._history: sp = self._history.stats_provider sp.count("downloader.files_downloaded", 1) # really read() calls sp.count("downloader.bytes_downloaded", size) if size == 0: read_ev.finished(now()) # no data, so no producer, so no register/unregisterProducer return defer.succeed(consumer) # for concurrent operations, each read() gets its own Segmentation # manager s = Segmentation(self, offset, size, consumer, read_ev, lp) # this raises an interesting question: what segments to fetch? if # offset=0, always fetch the first segment, and then allow # Segmentation to be responsible for pulling the subsequent ones if # the first wasn't large enough. If offset>0, we're going to need an # extra roundtrip to get the UEB (and therefore the segment size) # before we can figure out which segment to get. TODO: allow the # offset-table-guessing code (which starts by guessing the segsize) # to assist the offset>0 process. d = s.start() def _done(res): read_ev.finished(now()) return res d.addBoth(_done) return d def get_segment(self, segnum, logparent=None): """Begin downloading a segment. I return a tuple (d, c): 'd' is a Deferred that fires with (offset,data) when the desired segment is available, and c is an object on which c.cancel() can be called to disavow interest in the segment (after which 'd' will never fire). You probably need to know the segment size before calling this, unless you want the first few bytes of the file. If you ask for a segment number which turns out to be too large, the Deferred will errback with BadSegmentNumberError. The Deferred fires with the offset of the first byte of the data segment, so that you can call get_segment() before knowing the segment size, and still know which data you received. The Deferred can also errback with other fatal problems, such as NotEnoughSharesError, NoSharesError, or BadCiphertextHashError. """ lp = log.msg(format="imm Node(%(si)s).get_segment(%(segnum)d)", si=base32.b2a(self._verifycap.storage_index)[:8], segnum=segnum, level=log.OPERATIONAL, parent=logparent, umid="UKFjDQ") seg_ev = self._download_status.add_segment_request(segnum, now()) d = defer.Deferred() c = Cancel(self._cancel_request) self._segment_requests.append( (segnum, d, c, seg_ev, lp) ) self._start_new_segment() return (d, c) def get_segsize(self): """Return a Deferred that fires when we know the real segment size.""" if self.segment_size: return defer.succeed(self.segment_size) # TODO: this downloads (and discards) the first segment of the file. # We could make this more efficient by writing # fetcher.SegmentSizeFetcher, with the job of finding a single valid # share and extracting the UEB. We'd add Share.get_UEB() to request # just the UEB. (d,c) = self.get_segment(0) # this ensures that an error during get_segment() will errback the # caller, so Repair won't wait forever on completely missing files d.addCallback(lambda ign: self._segsize_observers.when_fired()) return d # things called by the Segmentation object used to transform # arbitrary-sized read() calls into quantized segment fetches def _start_new_segment(self): if self._active_segment is None and self._segment_requests: (segnum, d, c, seg_ev, lp) = self._segment_requests[0] k = self._verifycap.needed_shares log.msg(format="%(node)s._start_new_segment: segnum=%(segnum)d", node=repr(self), segnum=segnum, level=log.NOISY, parent=lp, umid="wAlnHQ") self._active_segment = fetcher = SegmentFetcher(self, segnum, k, lp) seg_ev.activate(now()) active_shares = [s for s in self._shares if s.is_alive()] fetcher.add_shares(active_shares) # this triggers the loop # called by our child ShareFinder def got_shares(self, shares): self._shares.update(shares) if self._active_segment: self._active_segment.add_shares(shares) def no_more_shares(self): self._no_more_shares = True if self._active_segment: self._active_segment.no_more_shares() # things called by our Share instances def validate_and_store_UEB(self, UEB_s): log.msg("validate_and_store_UEB", level=log.OPERATIONAL, parent=self._lp, umid="7sTrPw") h = hashutil.uri_extension_hash(UEB_s) if h != self._verifycap.uri_extension_hash: raise BadHashError self._parse_and_store_UEB(UEB_s) # sets self._stuff # TODO: a malformed (but authentic) UEB could throw an assertion in # _parse_and_store_UEB, and we should abandon the download. self.have_UEB = True # inform the ShareFinder about our correct number of segments. This # will update the block-hash-trees in all existing CommonShare # instances, and will populate new ones with the correct value. self._sharefinder.update_num_segments() def _parse_and_store_UEB(self, UEB_s): # Note: the UEB contains needed_shares and total_shares. These are # redundant and inferior (the filecap contains the authoritative # values). However, because it is possible to encode the same file in # multiple ways, and the encoders might choose (poorly) to use the # same key for both (therefore getting the same SI), we might # encounter shares for both types. The UEB hashes will be different, # however, and we'll disregard the "other" encoding's shares as # corrupted. # therefore, we ignore d['total_shares'] and d['needed_shares']. d = uri.unpack_extension(UEB_s) log.msg(format="UEB=%(ueb)s, vcap=%(vcap)s", ueb=repr(uri.unpack_extension_readable(UEB_s)), vcap=self._verifycap.to_string(), level=log.NOISY, parent=self._lp, umid="cVqZnA") k, N = self._verifycap.needed_shares, self._verifycap.total_shares self.segment_size = d['segment_size'] self._segsize_observers.fire(self.segment_size) r = self._calculate_sizes(self.segment_size) self.tail_segment_size = r["tail_segment_size"] self.tail_segment_padded = r["tail_segment_padded"] self.num_segments = r["num_segments"] self.block_size = r["block_size"] self.tail_block_size = r["tail_block_size"] log.msg("actual sizes: %s" % (r,), level=log.NOISY, parent=self._lp, umid="PY6P5Q") if (self.segment_size == self.guessed_segment_size and self.num_segments == self.guessed_num_segments): log.msg("my guess was right!", level=log.NOISY, parent=self._lp, umid="x340Ow") else: log.msg("my guess was wrong! Extra round trips for me.", level=log.NOISY, parent=self._lp, umid="tb7RJw") # zfec.Decode() instantiation is fast, but still, let's use the same # codec instance for all but the last segment. 3-of-10 takes 15us on # my laptop, 25-of-100 is 900us, 3-of-255 is 97us, 25-of-255 is # 2.5ms, worst-case 254-of-255 is 9.3ms self._codec = CRSDecoder() self._codec.set_params(self.segment_size, k, N) # Ciphertext hash tree root is mandatory, so that there is at most # one ciphertext that matches this read-cap or verify-cap. The # integrity check on the shares is not sufficient to prevent the # original encoder from creating some shares of file A and other # shares of file B. self.ciphertext_hash_tree was a guess before: # this is where we create it for real. self.ciphertext_hash_tree = IncompleteHashTree(self.num_segments) self.ciphertext_hash_tree_leaves = self.num_segments self.ciphertext_hash_tree.set_hashes({0: d['crypttext_root_hash']}) self.share_hash_tree.set_hashes({0: d['share_root_hash']}) # Our job is a fast download, not verification, so we ignore any # redundant fields. The Verifier uses a different code path which # does not ignore them. def _calculate_sizes(self, segment_size): # segments of ciphertext size = self._verifycap.size k = self._verifycap.needed_shares # this assert matches the one in encode.py:127 inside # Encoded._got_all_encoding_parameters, where the UEB is constructed assert segment_size % k == 0 # the last segment is usually short. We don't store a whole segsize, # but we do pad the segment up to a multiple of k, because the # encoder requires that. tail_segment_size = size % segment_size if tail_segment_size == 0: tail_segment_size = segment_size padded = mathutil.next_multiple(tail_segment_size, k) tail_segment_padded = padded num_segments = mathutil.div_ceil(size, segment_size) # each segment is turned into N blocks. All but the last are of size # block_size, and the last is of size tail_block_size block_size = segment_size // k tail_block_size = tail_segment_padded // k return { "tail_segment_size": tail_segment_size, "tail_segment_padded": tail_segment_padded, "num_segments": num_segments, "block_size": block_size, "tail_block_size": tail_block_size } def process_share_hashes(self, share_hashes): for hashnum in share_hashes: if hashnum >= len(self.share_hash_tree): # "BadHashError" is normally for e.g. a corrupt block. We # sort of abuse it here to mean a badly numbered hash (which # indicates corruption in the number bytes, rather than in # the data bytes). raise BadHashError("hashnum %d doesn't fit in hashtree(%d)" % (hashnum, len(self.share_hash_tree))) self.share_hash_tree.set_hashes(share_hashes) def get_desired_ciphertext_hashes(self, segnum): if segnum < self.ciphertext_hash_tree_leaves: return self.ciphertext_hash_tree.needed_hashes(segnum, include_leaf=True) return [] def get_needed_ciphertext_hashes(self, segnum): cht = self.ciphertext_hash_tree return cht.needed_hashes(segnum, include_leaf=True) def process_ciphertext_hashes(self, hashes): assert self.num_segments is not None # this may raise BadHashError or NotEnoughHashesError self.ciphertext_hash_tree.set_hashes(hashes) # called by our child SegmentFetcher def want_more_shares(self): self._sharefinder.hungry() def fetch_failed(self, sf, f): assert sf is self._active_segment self._active_segment = None # deliver error upwards for (d,c,seg_ev) in self._extract_requests(sf.segnum): seg_ev.error(now()) eventually(self._deliver, d, c, f) self._start_new_segment() def process_blocks(self, segnum, blocks): start = now() d = self._decode_blocks(segnum, blocks) d.addCallback(self._check_ciphertext_hash, segnum) def _deliver(result): log.msg(format="delivering segment(%(segnum)d)", segnum=segnum, level=log.OPERATIONAL, parent=self._lp, umid="j60Ojg") when = now() if isinstance(result, Failure): # this catches failures in decode or ciphertext hash for (d,c,seg_ev) in self._extract_requests(segnum): seg_ev.error(when) eventually(self._deliver, d, c, result) else: (offset, segment, decodetime) = result self._active_segment = None for (d,c,seg_ev) in self._extract_requests(segnum): # when we have two requests for the same segment, the # second one will not be "activated" before the data is # delivered, so to allow the status-reporting code to see # consistent behavior, we activate them all now. The # SegmentEvent will ignore duplicate activate() calls. # Note that this will result in an inaccurate "receive # speed" for the second request. seg_ev.activate(when) seg_ev.deliver(when, offset, len(segment), decodetime) eventually(self._deliver, d, c, result) self._download_status.add_misc_event("process_block", start, now()) self._start_new_segment() d.addBoth(_deliver) d.addErrback(log.err, "unhandled error during process_blocks", level=log.WEIRD, parent=self._lp, umid="MkEsCg") def _decode_blocks(self, segnum, blocks): start = now() tail = (segnum == self.num_segments-1) codec = self._codec block_size = self.block_size decoded_size = self.segment_size if tail: # account for the padding in the last segment codec = CRSDecoder() k, N = self._verifycap.needed_shares, self._verifycap.total_shares codec.set_params(self.tail_segment_padded, k, N) block_size = self.tail_block_size decoded_size = self.tail_segment_padded shares = [] shareids = [] for (shareid, share) in blocks.items(): assert len(share) == block_size shareids.append(shareid) shares.append(share) del blocks d = codec.decode(shares, shareids) # segment del shares def _process(buffers): decodetime = now() - start segment = b"".join(buffers) assert len(segment) == decoded_size del buffers if tail: segment = segment[:self.tail_segment_size] self._download_status.add_misc_event("decode", start, now()) return (segment, decodetime) d.addCallback(_process) return d def _check_ciphertext_hash(self, segment_and_decodetime, segnum): (segment, decodetime) = segment_and_decodetime start = now() assert self._active_segment.segnum == segnum assert self.segment_size is not None offset = segnum * self.segment_size h = hashutil.crypttext_segment_hash(segment) try: self.ciphertext_hash_tree.set_hashes(leaves={segnum: h}) self._download_status.add_misc_event("CThash", start, now()) return (offset, segment, decodetime) except (BadHashError, NotEnoughHashesError): format = ("hash failure in ciphertext_hash_tree:" " segnum=%(segnum)d, SI=%(si)r") log.msg(format=format, segnum=segnum, si=self._si_prefix, failure=Failure(), level=log.WEIRD, parent=self._lp, umid="MTwNnw") # this is especially weird, because we made it past the share # hash tree. It implies that we're using the wrong encoding, or # that the uploader deliberately constructed a bad UEB. msg = format % {"segnum": segnum, "si": self._si_prefix} raise BadCiphertextHashError(msg) def _deliver(self, d, c, result): # this method exists to handle cancel() that occurs between # _got_segment and _deliver if c.active: c.active = False # it is now too late to cancel d.callback(result) # might actually be an errback def _extract_requests(self, segnum): """Remove matching requests and return their (d,c) tuples so that the caller can retire them.""" retire = [(d,c,seg_ev) for (segnum0,d,c,seg_ev,lp) in self._segment_requests if segnum0 == segnum] self._segment_requests = [t for t in self._segment_requests if t[0] != segnum] return retire def _cancel_request(self, cancel): self._segment_requests = [t for t in self._segment_requests if t[2] != cancel] segnums = [segnum for (segnum,d,c,seg_ev,lp) in self._segment_requests] # self._active_segment might be None in rare circumstances, so make # sure we tolerate it if self._active_segment and self._active_segment.segnum not in segnums: seg, self._active_segment = self._active_segment, None seg.stop() self._start_new_segment() # called by ShareFinder to choose hashtree sizes in CommonShares, and by # SegmentFetcher to tell if it is still fetching a valid segnum. def get_num_segments(self): # returns (best_num_segments, authoritative) if self.num_segments is None: return (self.guessed_num_segments, False) return (self.num_segments, True) tahoe_lafs-1.20.0/src/allmydata/immutable/downloader/segmentation.py0000644000000000000000000001547513615410400022556 0ustar00""" Ported to Python 3. """ import time now = time.time from zope.interface import implementer from twisted.internet import defer from twisted.internet.interfaces import IPushProducer from foolscap.api import eventually from allmydata.util import log from allmydata.util.spans import overlap from allmydata.interfaces import DownloadStopped from .common import BadSegmentNumberError, WrongSegmentError @implementer(IPushProducer) class Segmentation(object): """I am responsible for a single offset+size read of the file. I handle segmentation: I figure out which segments are necessary, request them (from my CiphertextDownloader) in order, and trim the segments down to match the offset+size span. I use the Producer/Consumer interface to only request one segment at a time. """ def __init__(self, node, offset, size, consumer, read_ev, logparent=None): self._node = node self._hungry = True self._active_segnum = None self._cancel_segment_request = None # these are updated as we deliver data. At any given time, we still # want to download file[offset:offset+size] self._offset = offset self._size = size assert offset+size <= node._verifycap.size self._consumer = consumer self._read_ev = read_ev self._start_pause = None self._lp = logparent def start(self): self._alive = True self._deferred = defer.Deferred() self._deferred.addBoth(self._done) self._consumer.registerProducer(self, True) self._maybe_fetch_next() return self._deferred def _done(self, res): self._consumer.unregisterProducer() return res def _maybe_fetch_next(self): if not self._alive or not self._hungry: return if self._active_segnum is not None: return self._fetch_next() def _fetch_next(self): if self._size == 0: # done! self._alive = False self._hungry = False self._deferred.callback(self._consumer) return n = self._node have_actual_segment_size = n.segment_size is not None guess_s = "" if not have_actual_segment_size: guess_s = "probably " segment_size = n.segment_size or n.guessed_segment_size if self._offset == 0: # great! we want segment0 for sure wanted_segnum = 0 else: # this might be a guess wanted_segnum = self._offset // segment_size log.msg(format="_fetch_next(offset=%(offset)d) %(guess)swants segnum=%(segnum)d", offset=self._offset, guess=guess_s, segnum=wanted_segnum, level=log.NOISY, parent=self._lp, umid="5WfN0w") self._active_segnum = wanted_segnum d,c = n.get_segment(wanted_segnum, self._lp) self._cancel_segment_request = c d.addBoth(self._request_retired) d.addCallback(self._got_segment, wanted_segnum) if not have_actual_segment_size: # we can retry once d.addErrback(self._retry_bad_segment) d.addErrback(self._error) def _request_retired(self, res): self._active_segnum = None self._cancel_segment_request = None return res def _got_segment(self, segment_args, wanted_segnum): (segment_start, segment, decodetime) = segment_args self._cancel_segment_request = None # we got file[segment_start:segment_start+len(segment)] # we want file[self._offset:self._offset+self._size] log.msg(format="Segmentation got data:" " want [%(wantstart)d-%(wantend)d)," " given [%(segstart)d-%(segend)d), for segnum=%(segnum)d", wantstart=self._offset, wantend=self._offset+self._size, segstart=segment_start, segend=segment_start+len(segment), segnum=wanted_segnum, level=log.OPERATIONAL, parent=self._lp, umid="32dHcg") o = overlap(segment_start, len(segment), self._offset, self._size) # the overlap is file[o[0]:o[0]+o[1]] if not o or o[0] != self._offset: # we didn't get the first byte, so we can't use this segment log.msg("Segmentation handed wrong data:" " want [%d-%d), given [%d-%d), for segnum=%d," " for si=%r" % (self._offset, self._offset+self._size, segment_start, segment_start+len(segment), wanted_segnum, self._node._si_prefix), level=log.UNUSUAL, parent=self._lp, umid="STlIiA") # we may retry if the segnum we asked was based on a guess raise WrongSegmentError("I was given the wrong data.") offset_in_segment = self._offset - segment_start desired_data = segment[offset_in_segment:offset_in_segment+o[1]] self._offset += len(desired_data) self._size -= len(desired_data) self._consumer.write(desired_data) # the consumer might call our .pauseProducing() inside that write() # call, setting self._hungry=False self._read_ev.update(len(desired_data), 0, 0) # note: filenode.DecryptingConsumer is responsible for calling # _read_ev.update with how much decrypt_time was consumed self._maybe_fetch_next() def _retry_bad_segment(self, f): f.trap(WrongSegmentError, BadSegmentNumberError) # we guessed the segnum wrong: either one that doesn't overlap with # the start of our desired region, or one that's beyond the end of # the world. Now that we have the right information, we're allowed to # retry once. assert self._node.segment_size is not None return self._maybe_fetch_next() def _error(self, f): log.msg("Error in Segmentation", failure=f, level=log.WEIRD, parent=self._lp, umid="EYlXBg") self._alive = False self._hungry = False self._deferred.errback(f) def stopProducing(self): log.msg("asked to stopProducing", level=log.NOISY, parent=self._lp, umid="XIyL9w") self._hungry = False self._alive = False # cancel any outstanding segment request if self._cancel_segment_request: self._cancel_segment_request.cancel() self._cancel_segment_request = None e = DownloadStopped("our Consumer called stopProducing()") self._deferred.errback(e) def pauseProducing(self): self._hungry = False self._start_pause = now() def resumeProducing(self): self._hungry = True eventually(self._maybe_fetch_next) if self._start_pause is not None: paused = now() - self._start_pause self._read_ev.update(0, 0, paused) self._start_pause = None tahoe_lafs-1.20.0/src/allmydata/immutable/downloader/share.py0000644000000000000000000012564613615410400021165 0ustar00""" Ported to Python 3. """ import struct import time now = time.time from twisted.python.failure import Failure from foolscap.api import eventually from allmydata.util import base32, log, hashutil, mathutil from allmydata.util.spans import Spans, DataSpans from allmydata.interfaces import HASH_SIZE from allmydata.hashtree import IncompleteHashTree, BadHashError, \ NotEnoughHashesError from allmydata.immutable.layout import make_write_bucket_proxy from allmydata.util.observer import EventStreamObserver from .common import COMPLETE, CORRUPT, DEAD, BADSEGNUM class LayoutInvalid(Exception): pass class DataUnavailable(Exception): pass class Share(object): """I represent a single instance of a single share (e.g. I reference the shnum2 for share SI=abcde on server xy12t, not the one on server ab45q). I am associated with a CommonShare that remembers data that is held in common among e.g. SI=abcde/shnum2 across all servers. I am also associated with a CiphertextFileNode for e.g. SI=abcde (all shares, all servers). """ # this is a specific implementation of IShare for tahoe's native storage # servers. A different backend would use a different class. def __init__(self, rref, server, verifycap, commonshare, node, download_status, shnum, dyhb_rtt, logparent): self._rref = rref self._server = server self._node = node # holds share_hash_tree and UEB self.actual_segment_size = node.segment_size # might still be None # XXX change node.guessed_segment_size to # node.best_guess_segment_size(), which should give us the real ones # if known, else its guess. self._guess_offsets(verifycap, node.guessed_segment_size) self.actual_offsets = None self._UEB_length = None self._commonshare = commonshare # holds block_hash_tree self._download_status = download_status self._storage_index = verifycap.storage_index self._si_prefix = base32.b2a(verifycap.storage_index)[:8] self._shnum = shnum self._dyhb_rtt = dyhb_rtt # self._alive becomes False upon fatal corruption or server error self._alive = True self._loop_scheduled = False self._lp = log.msg(format="%(share)s created", share=repr(self), level=log.NOISY, parent=logparent, umid="P7hv2w") self._pending = Spans() # request sent but no response received yet self._received = DataSpans() # ACK response received, with data self._unavailable = Spans() # NAK response received, no data # any given byte of the share can be in one of four states: # in: _wanted, _requested, _received # FALSE FALSE FALSE : don't care about it at all # TRUE FALSE FALSE : want it, haven't yet asked for it # TRUE TRUE FALSE : request is in-flight # or didn't get it # FALSE TRUE TRUE : got it, haven't used it yet # FALSE TRUE FALSE : got it and used it # FALSE FALSE FALSE : block consumed, ready to ask again # # when we request data and get a NAK, we leave it in _requested # to remind ourself to not ask for it again. We don't explicitly # remove it from anything (maybe this should change). # # We retain the hashtrees in the Node, so we leave those spans in # _requested (and never ask for them again, as long as the Node is # alive). But we don't retain data blocks (too big), so when we # consume a data block, we remove it from _requested, so a later # download can re-fetch it. self._requested_blocks = [] # (segnum, set(observer2..)) v = server.get_version() ver = v[b"http://allmydata.org/tahoe/protocols/storage/v1"] self._overrun_ok = ver[b"tolerates-immutable-read-overrun"] # If _overrun_ok and we guess the offsets correctly, we can get # everything in one RTT. If _overrun_ok and we guess wrong, we might # need two RTT (but we could get lucky and do it in one). If overrun # is *not* ok (tahoe-1.3.0 or earlier), we need four RTT: 1=version, # 2=offset table, 3=UEB_length and everything else (hashes, block), # 4=UEB. self.had_corruption = False # for unit tests def __repr__(self): return "Share(sh%d-on-%s)" % (self._shnum, str(self._server.get_name(), "utf-8")) def is_alive(self): # XXX: reconsider. If the share sees a single error, should it remain # dead for all time? Or should the next segment try again? This DEAD # state is stored elsewhere too (SegmentFetcher per-share states?) # and needs to be consistent. We clear _alive in self._fail(), which # is called upon a network error, or layout failure, or hash failure # in the UEB or a hash tree. We do not _fail() for a hash failure in # a block, but of course we still tell our callers about # state=CORRUPT so they'll find a different share. return self._alive def _guess_offsets(self, verifycap, guessed_segment_size): self.guessed_segment_size = guessed_segment_size size = verifycap.size k = verifycap.needed_shares N = verifycap.total_shares r = self._node._calculate_sizes(guessed_segment_size) # num_segments, block_size/tail_block_size # guessed_segment_size/tail_segment_size/tail_segment_padded share_size = mathutil.div_ceil(size, k) # share_size is the amount of block data that will be put into each # share, summed over all segments. It does not include hashes, the # UEB, or other overhead. # use the upload-side code to get this as accurate as possible ht = IncompleteHashTree(N) num_share_hashes = len(ht.needed_hashes(0, include_leaf=True)) wbp = make_write_bucket_proxy(None, None, share_size, r["block_size"], r["num_segments"], num_share_hashes, 0) self._fieldsize = wbp.fieldsize self._fieldstruct = wbp.fieldstruct self.guessed_offsets = wbp._offsets # called by our client, the SegmentFetcher def get_block(self, segnum): """Add a block number to the list of requests. This will eventually result in a fetch of the data necessary to validate the block, then the block itself. The fetch order is generally first-come-first-served, but requests may be answered out-of-order if data becomes available sooner. I return an EventStreamObserver, which has two uses. The first is to call o.subscribe(), which gives me a place to send state changes and eventually the data block. The second is o.cancel(), which removes the request (if it is still active). I will distribute the following events through my EventStreamObserver: - state=OVERDUE: ?? I believe I should have had an answer by now. You may want to ask another share instead. - state=BADSEGNUM: the segnum you asked for is too large. I must fetch a valid UEB before I can determine this, so the notification is asynchronous - state=COMPLETE, block=data: here is a valid block - state=CORRUPT: this share contains corrupted data - state=DEAD, f=Failure: the server reported an error, this share is unusable """ log.msg("%s.get_block(%d)" % (repr(self), segnum), level=log.NOISY, parent=self._lp, umid="RTo9MQ") assert segnum >= 0 o = EventStreamObserver() o.set_canceler(self, "_cancel_block_request") for i,(segnum0,observers) in enumerate(self._requested_blocks): if segnum0 == segnum: observers.add(o) break else: self._requested_blocks.append( (segnum, set([o])) ) self.schedule_loop() return o def _cancel_block_request(self, o): new_requests = [] for e in self._requested_blocks: (segnum0, observers) = e observers.discard(o) if observers: new_requests.append(e) self._requested_blocks = new_requests # internal methods def _active_segnum_and_observers(self): if self._requested_blocks: # we only retrieve information for one segment at a time, to # minimize alacrity (first come, first served) return self._requested_blocks[0] return None, [] def schedule_loop(self): if self._loop_scheduled: return self._loop_scheduled = True eventually(self.loop) def loop(self): self._loop_scheduled = False if not self._alive: return try: # if any exceptions occur here, kill the download log.msg("%s.loop, reqs=[%s], pending=%s, received=%s," " unavailable=%s" % (repr(self), ",".join([str(req[0]) for req in self._requested_blocks]), self._pending.dump(), self._received.dump(), self._unavailable.dump() ), level=log.NOISY, parent=self._lp, umid="BaL1zw") self._do_loop() # all exception cases call self._fail(), which clears self._alive except (BadHashError, NotEnoughHashesError, LayoutInvalid) as e: # Abandon this share. We do this if we see corruption in the # offset table, the UEB, or a hash tree. We don't abandon the # whole share if we see corruption in a data block (we abandon # just the one block, and still try to get data from other blocks # on the same server). In theory, we could get good data from a # share with a corrupt UEB (by first getting the UEB from some # other share), or corrupt hash trees, but the logic to decide # when this is safe is non-trivial. So for now, give up at the # first sign of corruption. # # _satisfy_*() code which detects corruption should first call # self._signal_corruption(), and then raise the exception. log.msg(format="corruption detected in %(share)s", share=repr(self), level=log.UNUSUAL, parent=self._lp, umid="gWspVw") self._fail(Failure(e), log.UNUSUAL) except DataUnavailable as e: # Abandon this share. log.msg(format="need data that will never be available" " from %s: pending=%s, received=%s, unavailable=%s" % (repr(self), self._pending.dump(), self._received.dump(), self._unavailable.dump() ), level=log.UNUSUAL, parent=self._lp, umid="F7yJnQ") self._fail(Failure(e), log.UNUSUAL) except BaseException: self._fail(Failure()) raise log.msg("%s.loop done, reqs=[%s], pending=%s, received=%s," " unavailable=%s" % (repr(self), ",".join([str(req[0]) for req in self._requested_blocks]), self._pending.dump(), self._received.dump(), self._unavailable.dump() ), level=log.NOISY, parent=self._lp, umid="9lRaRA") def _do_loop(self): # we are (eventually) called after all state transitions: # new segments added to self._requested_blocks # new data received from servers (responses to our read() calls) # impatience timer fires (server appears slow) # First, consume all of the information that we currently have, for # all the segments people currently want. start = now() while self._get_satisfaction(): pass self._download_status.add_misc_event("satisfy", start, now()) # When we get no satisfaction (from the data we've received so far), # we determine what data we desire (to satisfy more requests). The # number of segments is finite, so I can't get no satisfaction # forever. start = now() wanted, needed = self._desire() self._download_status.add_misc_event("desire", start, now()) # Finally, send out requests for whatever we need (desire minus # have). You can't always get what you want, but if you try # sometimes, you just might find, you get what you need. self._send_requests(wanted + needed) # and sometimes you can't even get what you need start = now() disappointment = needed & self._unavailable if disappointment.len(): self.had_corruption = True raise DataUnavailable("need %s but will never get it" % disappointment.dump()) self._download_status.add_misc_event("checkdis", start, now()) def _get_satisfaction(self): # return True if we retired a data block, and should therefore be # called again. Return False if we don't retire a data block (even if # we do retire some other data, like hash chains). if self.actual_offsets is None: if not self._satisfy_offsets(): # can't even look at anything without the offset table return False if not self._node.have_UEB: if not self._satisfy_UEB(): # can't check any hashes without the UEB return False # the call to _satisfy_UEB() will immediately set the # authoritative num_segments in all our CommonShares. If we # guessed wrong, we might stil be working on a bogus segnum # (beyond the real range). We catch this and signal BADSEGNUM # before invoking any further code that touches hashtrees. self.actual_segment_size = self._node.segment_size # might be updated assert self.actual_segment_size is not None # knowing the UEB means knowing num_segments assert self._node.num_segments is not None segnum, observers = self._active_segnum_and_observers() # if segnum is None, we don't really need to do anything (we have no # outstanding readers right now), but we'll fill in the bits that # aren't tied to any particular segment. if segnum is not None and segnum >= self._node.num_segments: for o in observers: o.notify(state=BADSEGNUM) self._requested_blocks.pop(0) return True if self._node.share_hash_tree.needed_hashes(self._shnum): if not self._satisfy_share_hash_tree(): # can't check block_hash_tree without a root return False if self._commonshare.need_block_hash_root(): block_hash_root = self._node.share_hash_tree.get_leaf(self._shnum) self._commonshare.set_block_hash_root(block_hash_root) if segnum is None: return False # we don't want any particular segment right now # block_hash_tree needed_hashes = self._commonshare.get_needed_block_hashes(segnum) if needed_hashes: if not self._satisfy_block_hash_tree(needed_hashes): # can't check block without block_hash_tree return False # ciphertext_hash_tree needed_hashes = self._node.get_needed_ciphertext_hashes(segnum) if needed_hashes: if not self._satisfy_ciphertext_hash_tree(needed_hashes): # can't check decoded blocks without ciphertext_hash_tree return False # data blocks return self._satisfy_data_block(segnum, observers) def _satisfy_offsets(self): version_s = self._received.get(0, 4) if version_s is None: return False (version,) = struct.unpack(">L", version_s) if version == 1: table_start = 0x0c self._fieldsize = 0x4 self._fieldstruct = "L" elif version == 2: table_start = 0x14 self._fieldsize = 0x8 self._fieldstruct = "Q" else: self.had_corruption = True raise LayoutInvalid("unknown version %d (I understand 1 and 2)" % version) offset_table_size = 6 * self._fieldsize table_s = self._received.pop(table_start, offset_table_size) if table_s is None: return False fields = struct.unpack(">"+6*self._fieldstruct, table_s) offsets = {} for i,field in enumerate(['data', 'plaintext_hash_tree', # UNUSED 'crypttext_hash_tree', 'block_hashes', 'share_hashes', 'uri_extension', ] ): offsets[field] = fields[i] self.actual_offsets = offsets log.msg("actual offsets: data=%d, plaintext_hash_tree=%d, crypttext_hash_tree=%d, block_hashes=%d, share_hashes=%d, uri_extension=%d" % tuple(fields), level=log.NOISY, parent=self._lp, umid="jedQcw") self._received.remove(0, 4) # don't need this anymore # validate the offsets a bit share_hashes_size = offsets["uri_extension"] - offsets["share_hashes"] if share_hashes_size < 0 or share_hashes_size % (2+HASH_SIZE) != 0: # the share hash chain is stored as (hashnum,hash) pairs self.had_corruption = True raise LayoutInvalid("share hashes malformed -- should be a" " multiple of %d bytes -- not %d" % (2+HASH_SIZE, share_hashes_size)) block_hashes_size = offsets["share_hashes"] - offsets["block_hashes"] if block_hashes_size < 0 or block_hashes_size % (HASH_SIZE) != 0: # the block hash tree is stored as a list of hashes self.had_corruption = True raise LayoutInvalid("block hashes malformed -- should be a" " multiple of %d bytes -- not %d" % (HASH_SIZE, block_hashes_size)) # we only look at 'crypttext_hash_tree' if the UEB says we're # actually using it. Same with 'plaintext_hash_tree'. This gives us # some wiggle room: a place to stash data for later extensions. return True def _satisfy_UEB(self): o = self.actual_offsets fsize = self._fieldsize UEB_length_s = self._received.get(o["uri_extension"], fsize) if not UEB_length_s: return False (UEB_length,) = struct.unpack(">"+self._fieldstruct, UEB_length_s) UEB_s = self._received.pop(o["uri_extension"]+fsize, UEB_length) if not UEB_s: return False self._received.remove(o["uri_extension"], fsize) try: self._node.validate_and_store_UEB(UEB_s) return True except (LayoutInvalid, BadHashError) as e: # TODO: if this UEB was bad, we'll keep trying to validate it # over and over again. Only log.err on the first one, or better # yet skip all but the first f = Failure(e) self._signal_corruption(f, o["uri_extension"], fsize+UEB_length) self.had_corruption = True raise def _satisfy_share_hash_tree(self): # the share hash chain is stored as (hashnum,hash) tuples, so you # can't fetch just the pieces you need, because you don't know # exactly where they are. So fetch everything, and parse the results # later. o = self.actual_offsets hashlen = o["uri_extension"] - o["share_hashes"] assert hashlen % (2+HASH_SIZE) == 0 hashdata = self._received.get(o["share_hashes"], hashlen) if not hashdata: return False share_hashes = {} for i in range(0, hashlen, 2+HASH_SIZE): (hashnum,) = struct.unpack(">H", hashdata[i:i+2]) hashvalue = hashdata[i+2:i+2+HASH_SIZE] share_hashes[hashnum] = hashvalue # TODO: if they give us an empty set of hashes, # process_share_hashes() won't fail. We must ensure that this # situation doesn't allow unverified shares through. Manual testing # shows that set_block_hash_root() throws an assert because an # internal node is None instead of an actual hash, but we want # something better. It's probably best to add a method to # IncompleteHashTree which takes a leaf number and raises an # exception unless that leaf is present and fully validated. try: self._node.process_share_hashes(share_hashes) # adds to self._node.share_hash_tree except (BadHashError, NotEnoughHashesError) as e: f = Failure(e) self._signal_corruption(f, o["share_hashes"], hashlen) self.had_corruption = True raise self._received.remove(o["share_hashes"], hashlen) return True def _signal_corruption(self, f, start, offset): # there was corruption somewhere in the given range reason = "corruption in share[%d-%d): %s" % (start, start+offset, str(f.value)) return self._rref.callRemote( "advise_corrupt_share", reason.encode("utf-8") ).addErrback(log.err, "Error from remote call to advise_corrupt_share") def _satisfy_block_hash_tree(self, needed_hashes): o_bh = self.actual_offsets["block_hashes"] block_hashes = {} for hashnum in needed_hashes: hashdata = self._received.get(o_bh+hashnum*HASH_SIZE, HASH_SIZE) if hashdata: block_hashes[hashnum] = hashdata else: return False # missing some hashes # note that we don't submit any hashes to the block_hash_tree until # we've gotten them all, because the hash tree will throw an # exception if we only give it a partial set (which it therefore # cannot validate) try: self._commonshare.process_block_hashes(block_hashes) except (BadHashError, NotEnoughHashesError) as e: f = Failure(e) hashnums = ",".join([str(n) for n in sorted(block_hashes.keys())]) log.msg(format="hash failure in block_hashes=(%(hashnums)s)," " from %(share)s", hashnums=hashnums, shnum=self._shnum, share=repr(self), failure=f, level=log.WEIRD, parent=self._lp, umid="yNyFdA") hsize = max(0, max(needed_hashes)) * HASH_SIZE self._signal_corruption(f, o_bh, hsize) self.had_corruption = True raise for hashnum in needed_hashes: self._received.remove(o_bh+hashnum*HASH_SIZE, HASH_SIZE) return True def _satisfy_ciphertext_hash_tree(self, needed_hashes): start = self.actual_offsets["crypttext_hash_tree"] hashes = {} for hashnum in needed_hashes: hashdata = self._received.get(start+hashnum*HASH_SIZE, HASH_SIZE) if hashdata: hashes[hashnum] = hashdata else: return False # missing some hashes # we don't submit any hashes to the ciphertext_hash_tree until we've # gotten them all try: self._node.process_ciphertext_hashes(hashes) except (BadHashError, NotEnoughHashesError) as e: f = Failure(e) hashnums = ",".join([str(n) for n in sorted(hashes.keys())]) log.msg(format="hash failure in ciphertext_hashes=(%(hashnums)s)," " from %(share)s", hashnums=hashnums, share=repr(self), failure=f, level=log.WEIRD, parent=self._lp, umid="iZI0TA") hsize = max(0, max(needed_hashes))*HASH_SIZE self._signal_corruption(f, start, hsize) self.had_corruption = True raise for hashnum in needed_hashes: self._received.remove(start+hashnum*HASH_SIZE, HASH_SIZE) return True def _satisfy_data_block(self, segnum, observers): tail = (segnum == self._node.num_segments-1) datastart = self.actual_offsets["data"] blockstart = datastart + segnum * self._node.block_size blocklen = self._node.block_size if tail: blocklen = self._node.tail_block_size block = self._received.pop(blockstart, blocklen) if not block: log.msg("no data for block %s (want [%d:+%d])" % (repr(self), blockstart, blocklen), level=log.NOISY, parent=self._lp, umid="aK0RFw") return False log.msg(format="%(share)s._satisfy_data_block [%(start)d:+%(length)d]", share=repr(self), start=blockstart, length=blocklen, level=log.NOISY, parent=self._lp, umid="uTDNZg") # this block is being retired, either as COMPLETE or CORRUPT, since # no further data reads will help assert self._requested_blocks[0][0] == segnum try: self._commonshare.check_block(segnum, block) # hurrah, we have a valid block. Deliver it. for o in observers: # goes to SegmentFetcher._block_request_activity o.notify(state=COMPLETE, block=block) # now clear our received data, to dodge the #1170 spans.py # complexity bug self._received = DataSpans() except (BadHashError, NotEnoughHashesError) as e: # rats, we have a corrupt block. Notify our clients that they # need to look elsewhere, and advise the server. Unlike # corruption in other parts of the share, this doesn't cause us # to abandon the whole share. f = Failure(e) log.msg(format="hash failure in block %(segnum)d, from %(share)s", segnum=segnum, share=repr(self), failure=f, level=log.WEIRD, parent=self._lp, umid="mZjkqA") for o in observers: o.notify(state=CORRUPT) self._signal_corruption(f, blockstart, blocklen) self.had_corruption = True # in either case, we've retired this block self._requested_blocks.pop(0) # popping the request keeps us from turning around and wanting the # block again right away return True # got satisfaction def _desire(self): segnum, observers = self._active_segnum_and_observers() # maybe None # 'want_it' is for data we merely want: we know that we don't really # need it. This includes speculative reads, like the first 1KB of the # share (for the offset table) and the first 2KB of the UEB. # # 'need_it' is for data that, if we have the real offset table, we'll # need. If we are only guessing at the offset table, it's merely # wanted. (The share is abandoned if we can't get data that we really # need). # # 'gotta_gotta_have_it' is for data that we absolutely need, # independent of whether we're still guessing about the offset table: # the version number and the offset table itself. # # Mr. Popeil, I'm in trouble, need your assistance on the double. Aww.. desire = Spans(), Spans(), Spans() (want_it, need_it, gotta_gotta_have_it) = desire self.actual_segment_size = self._node.segment_size # might be updated o = self.actual_offsets or self.guessed_offsets segsize = self.actual_segment_size or self.guessed_segment_size r = self._node._calculate_sizes(segsize) if not self.actual_offsets: # all _desire functions add bits to the three desire[] spans self._desire_offsets(desire) # we can use guessed offsets as long as this server tolerates # overrun. Otherwise, we must wait for the offsets to arrive before # we try to read anything else. if self.actual_offsets or self._overrun_ok: if not self._node.have_UEB: self._desire_UEB(desire, o) self._desire_share_hashes(desire, o) if segnum is not None: # They might be asking for a segment number that is beyond # what we guess the file contains, but _desire_block_hashes # and _desire_data will tolerate that. self._desire_block_hashes(desire, o, segnum) self._desire_data(desire, o, r, segnum, segsize) log.msg("end _desire: want_it=%s need_it=%s gotta=%s" % (want_it.dump(), need_it.dump(), gotta_gotta_have_it.dump()), level=log.NOISY, parent=self._lp, umid="IG7CgA") if self.actual_offsets: return (want_it, need_it+gotta_gotta_have_it) else: return (want_it+need_it, gotta_gotta_have_it) def _desire_offsets(self, desire): (want_it, need_it, gotta_gotta_have_it) = desire if self._overrun_ok: # easy! this includes version number, sizes, and offsets want_it.add(0, 1024) return # v1 has an offset table that lives [0x0,0x24). v2 lives [0x0,0x44). # To be conservative, only request the data that we know lives there, # even if that means more roundtrips. gotta_gotta_have_it.add(0, 4) # version number, always safe version_s = self._received.get(0, 4) if not version_s: return (version,) = struct.unpack(">L", version_s) # The code in _satisfy_offsets will have checked this version # already. There is no code path to get this far with version>2. assert 1 <= version <= 2, "can't get here, version=%d" % version if version == 1: table_start = 0x0c fieldsize = 0x4 elif version == 2: table_start = 0x14 fieldsize = 0x8 offset_table_size = 6 * fieldsize gotta_gotta_have_it.add(table_start, offset_table_size) def _desire_UEB(self, desire, o): (want_it, need_it, gotta_gotta_have_it) = desire # UEB data is stored as (length,data). if self._overrun_ok: # We can pre-fetch 2kb, which should probably cover it. If it # turns out to be larger, we'll come back here later with a known # length and fetch the rest. want_it.add(o["uri_extension"], 2048) # now, while that is probably enough to fetch the whole UEB, it # might not be, so we need to do the next few steps as well. In # most cases, the following steps will not actually add anything # to need_it need_it.add(o["uri_extension"], self._fieldsize) # only use a length if we're sure it's correct, otherwise we'll # probably fetch a huge number if not self.actual_offsets: return UEB_length_s = self._received.get(o["uri_extension"], self._fieldsize) if UEB_length_s: (UEB_length,) = struct.unpack(">"+self._fieldstruct, UEB_length_s) # we know the length, so make sure we grab everything need_it.add(o["uri_extension"]+self._fieldsize, UEB_length) def _desire_share_hashes(self, desire, o): (want_it, need_it, gotta_gotta_have_it) = desire if self._node.share_hash_tree.needed_hashes(self._shnum): hashlen = o["uri_extension"] - o["share_hashes"] need_it.add(o["share_hashes"], hashlen) def _desire_block_hashes(self, desire, o, segnum): (want_it, need_it, gotta_gotta_have_it) = desire # block hash chain for hashnum in self._commonshare.get_desired_block_hashes(segnum): need_it.add(o["block_hashes"]+hashnum*HASH_SIZE, HASH_SIZE) # ciphertext hash chain for hashnum in self._node.get_desired_ciphertext_hashes(segnum): need_it.add(o["crypttext_hash_tree"]+hashnum*HASH_SIZE, HASH_SIZE) def _desire_data(self, desire, o, r, segnum, segsize): if segnum > r["num_segments"]: # they're asking for a segment that's beyond what we think is the # end of the file. We won't get here if we've already learned the # real UEB: _get_satisfaction() will notice the out-of-bounds and # terminate the loop. So we must still be guessing, which means # that they might be correct in asking for such a large segnum. # But if they're right, then our segsize/segnum guess is # certainly wrong, which means we don't know what data blocks to # ask for yet. So don't bother adding anything. When the UEB # comes back and we learn the correct segsize/segnums, we'll # either reject the request or have enough information to proceed # normally. This costs one roundtrip. log.msg("_desire_data: segnum(%d) looks wrong (numsegs=%d)" % (segnum, r["num_segments"]), level=log.UNUSUAL, parent=self._lp, umid="tuYRQQ") return (want_it, need_it, gotta_gotta_have_it) = desire tail = (segnum == r["num_segments"]-1) datastart = o["data"] blockstart = datastart + segnum * r["block_size"] blocklen = r["block_size"] if tail: blocklen = r["tail_block_size"] need_it.add(blockstart, blocklen) def _send_requests(self, desired): ask = desired - self._pending - self._received.get_spans() log.msg("%s._send_requests, desired=%s, pending=%s, ask=%s" % (repr(self), desired.dump(), self._pending.dump(), ask.dump()), level=log.NOISY, parent=self._lp, umid="E94CVA") # XXX At one time, this code distinguished between data blocks and # hashes, and made sure to send (small) requests for hashes before # sending (big) requests for blocks. The idea was to make sure that # all hashes arrive before the blocks, so the blocks can be consumed # and released in a single turn. I removed this for simplicity. # Reconsider the removal: maybe bring it back. ds = self._download_status for (start, length) in ask: # TODO: quantize to reasonably-large blocks self._pending.add(start, length) lp = log.msg(format="%(share)s._send_request" " [%(start)d:+%(length)d]", share=repr(self), start=start, length=length, level=log.NOISY, parent=self._lp, umid="sgVAyA") block_ev = ds.add_block_request(self._server, self._shnum, start, length, now()) d = self._send_request(start, length) d.addCallback(self._got_data, start, length, block_ev, lp) d.addErrback(self._got_error, start, length, block_ev, lp) d.addCallback(self._trigger_loop) d.addErrback(lambda f: log.err(format="unhandled error during send_request", failure=f, parent=self._lp, level=log.WEIRD, umid="qZu0wg")) def _send_request(self, start, length): return self._rref.callRemote("read", start, length) def _got_data(self, data, start, length, block_ev, lp): block_ev.finished(len(data), now()) if not self._alive: return log.msg(format="%(share)s._got_data [%(start)d:+%(length)d] -> %(datalen)d", share=repr(self), start=start, length=length, datalen=len(data), level=log.NOISY, parent=lp, umid="5Qn6VQ") self._pending.remove(start, length) self._received.add(start, data) # if we ask for [a:c], and we get back [a:b] (b self.last_timestamp: self.last_timestamp = when def add_known_share(self, server, shnum): # XXX use me self.known_shares.append( (server, shnum) ) def add_problem(self, p): self.problems.append(p) # IDownloadStatus methods def get_counter(self): return self.counter def get_storage_index(self): return self.storage_index def get_size(self): return self.size def get_status(self): # mention all outstanding segment requests outstanding = set([s_ev["segment_number"] for s_ev in self.segment_events if s_ev["finish_time"] is None]) errorful = set([s_ev["segment_number"] for s_ev in self.segment_events if s_ev["success"] is False]) def join(segnums): if len(segnums) == 1: return "segment %s" % list(segnums)[0] else: return "segments %s" % (",".join([str(i) for i in sorted(segnums)])) error_s = "" if errorful: error_s = "; errors on %s" % join(errorful) if outstanding: s = "fetching %s" % join(outstanding) else: s = "idle" return s + error_s def get_progress(self): # measure all read events that aren't completely done, return the # total percentage complete for them if not self.read_events: return 0.0 total_outstanding, total_received = 0, 0 for r_ev in self.read_events: if r_ev["finish_time"] is None: total_outstanding += r_ev["length"] total_received += r_ev["bytes_returned"] # else ignore completed requests if not total_outstanding: return 1.0 return total_received / total_outstanding def using_helper(self): return False def get_active(self): # a download is considered active if it has at least one outstanding # read() call for r_ev in self.read_events: if r_ev["finish_time"] is None: return True return False def get_started(self): return self.first_timestamp def get_results(self): return None # TODO tahoe_lafs-1.20.0/src/allmydata/introducer/__init__.py0000644000000000000000000000057613615410400017655 0ustar00""" Ported to Python 3. """ from allmydata.introducer.server import create_introducer # apparently need to support "old .tac files" that may have # "allmydata.introducer.IntroducerNode" burned in -- don't use this in # new code from allmydata.introducer.server import _IntroducerNode as IntroducerNode # hush pyflakes __all__ = ( "create_introducer", "IntroducerNode", ) tahoe_lafs-1.20.0/src/allmydata/introducer/client.py0000644000000000000000000003577313615410400017403 0ustar00""" Ported to Python 3. """ from six import ensure_text, ensure_str import time from zope.interface import implementer from twisted.application import service from foolscap.api import Referenceable from allmydata.interfaces import InsufficientVersionError from allmydata.introducer.interfaces import IIntroducerClient, \ RIIntroducerSubscriberClient_v2 from allmydata.introducer.common import sign_to_foolscap, unsign_from_foolscap,\ get_tubid_string_from_ann from allmydata.util import log, yamlutil, connection_status from allmydata.util.rrefutil import add_version_to_remote_reference from allmydata.util.observer import ( ObserverList, ) from allmydata.crypto.error import BadSignature from allmydata.util.assertutil import precondition class InvalidCacheError(Exception): pass V2 = b"http://allmydata.org/tahoe/protocols/introducer/v2" @implementer(RIIntroducerSubscriberClient_v2, IIntroducerClient) # type: ignore[misc] class IntroducerClient(service.Service, Referenceable): def __init__(self, tub, introducer_furl, nickname, my_version, oldest_supported, sequencer, cache_filepath): self._tub = tub self.introducer_furl = introducer_furl assert isinstance(nickname, str) self._nickname = nickname self._my_version = my_version self._oldest_supported = oldest_supported self._sequencer = sequencer self._cache_filepath = cache_filepath self._my_subscriber_info = { b"version": 0, b"nickname": self._nickname, b"app-versions": [], b"my-version": self._my_version, b"oldest-supported": self._oldest_supported, } self._outbound_announcements = {} # not signed self._published_announcements = {} # signed self._canary = Referenceable() self._publisher = None self._since = None self._local_subscribers = {} # {servicename: ObserverList} self._subscriptions = set() # requests we've actually sent # _inbound_announcements remembers one announcement per # (servicename,serverid) pair. Anything that arrives with the same # pair will displace the previous one. This stores tuples of # (unpacked announcement dictionary, verifyingkey, rxtime). The ann # dicts can be compared for equality to distinguish re-announcement # from updates. It also provides memory for clients who subscribe # after startup. self._inbound_announcements = {} # hooks for unit tests self._debug_counts = { "inbound_message": 0, "inbound_announcement": 0, "wrong_service": 0, "duplicate_announcement": 0, "update": 0, "new_announcement": 0, "outbound_message": 0, } self._debug_outstanding = 0 def _debug_retired(self, res): self._debug_outstanding -= 1 return res def startService(self): service.Service.startService(self) self._introducer_error = None rc = self._tub.connectTo(ensure_str(self.introducer_furl), self._got_introducer) self._introducer_reconnector = rc def connect_failed(failure): self.log("Initial Introducer connection failed: perhaps it's down", level=log.WEIRD, failure=failure, umid="c5MqUQ") self._load_announcements() d = self._tub.getReference(self.introducer_furl) d.addErrback(connect_failed) def _load_announcements(self): try: with self._cache_filepath.open() as f: servers = yamlutil.safe_load(f) except EnvironmentError: return # no cache file if not isinstance(servers, list): log.err(InvalidCacheError("not a list"), level=log.WEIRD) return self.log("Using server data from cache", level=log.UNUSUAL) for server_params in servers: if not isinstance(server_params, dict): log.err(InvalidCacheError("not a dict: %r" % (server_params,)), level=log.WEIRD) continue # everything coming from yamlutil.safe_load is unicode key_s = server_params['key_s'].encode("ascii") self._deliver_announcements(key_s, server_params['ann']) def _save_announcements(self): announcements = [] for value in self._inbound_announcements.values(): ann, key_s, time_stamp = value # On Python 2, bytes strings are encoded into YAML Unicode strings. # On Python 3, bytes are encoded as YAML bytes. To minimize # changes, Python 3 for now ensures the same is true. server_params = { "ann" : ann, "key_s" : ensure_text(key_s), } announcements.append(server_params) announcement_cache_yaml = yamlutil.safe_dump(announcements) if isinstance(announcement_cache_yaml, str): announcement_cache_yaml = announcement_cache_yaml.encode("utf-8") self._cache_filepath.setContent(announcement_cache_yaml) def _got_introducer(self, publisher): self.log("connected to introducer, getting versions") default = { b"http://allmydata.org/tahoe/protocols/introducer/v1": { }, b"application-version": b"unknown: no get_version()", } d = add_version_to_remote_reference(publisher, default) d.addCallback(self._got_versioned_introducer) d.addErrback(self._got_error) def _got_error(self, f): # TODO: for the introducer, perhaps this should halt the application self._introducer_error = f # polled by tests def _got_versioned_introducer(self, publisher): self.log("got introducer version: %s" % (publisher.version,)) # we require an introducer that speaks at least V2 assert all(type(V2) == type(v) for v in publisher.version) if V2 not in publisher.version: raise InsufficientVersionError("V2", publisher.version) self._publisher = publisher self._since = int(time.time()) publisher.notifyOnDisconnect(self._disconnected) self._maybe_publish() self._maybe_subscribe() def _disconnected(self): self.log("bummer, we've lost our connection to the introducer") self._publisher = None self._since = int(time.time()) self._subscriptions.clear() def log(self, *args, **kwargs): if "facility" not in kwargs: kwargs["facility"] = "tahoe.introducer.client" return log.msg(*args, **kwargs) def subscribe_to(self, service_name, callback, *args, **kwargs): obs = self._local_subscribers.setdefault(service_name, ObserverList()) obs.subscribe(lambda key_s, ann: callback(key_s, ann, *args, **kwargs)) self._maybe_subscribe() for index,(ann,key_s,when) in list(self._inbound_announcements.items()): precondition(isinstance(key_s, bytes), key_s) servicename = index[0] if servicename == service_name: obs.notify(key_s, ann) def _maybe_subscribe(self): if not self._publisher: self.log("want to subscribe, but no introducer yet", level=log.NOISY) return for service_name in self._local_subscribers: if service_name in self._subscriptions: continue self._subscriptions.add(service_name) self._debug_outstanding += 1 d = self._publisher.callRemote("subscribe_v2", self, service_name.encode("utf-8"), self._my_subscriber_info) d.addBoth(self._debug_retired) d.addErrback(log.err, facility="tahoe.introducer.client", level=log.WEIRD, umid="2uMScQ") def create_announcement_dict(self, service_name, ann): ann_d = { "version": 0, # "seqnum" and "nonce" will be populated with new values in # publish(), each time we make a change "nickname": self._nickname, "app-versions": [], "my-version": self._my_version, "oldest-supported": self._oldest_supported, "service-name": service_name, } ann_d.update(ann) return ann_d def publish(self, service_name, ann, signing_key): # we increment the seqnum every time we publish something new current_seqnum, current_nonce = self._sequencer() ann_d = self.create_announcement_dict(service_name, ann) self._outbound_announcements[service_name] = ann_d # publish all announcements with the new seqnum and nonce for service_name,ann_d in list(self._outbound_announcements.items()): ann_d["seqnum"] = current_seqnum ann_d["nonce"] = current_nonce ann_t = sign_to_foolscap(ann_d, signing_key) self._published_announcements[service_name] = ann_t self._maybe_publish() def _maybe_publish(self): if not self._publisher: self.log("want to publish, but no introducer yet", level=log.NOISY) return # this re-publishes everything. The Introducer ignores duplicates for ann_t in list(self._published_announcements.values()): self._debug_counts["outbound_message"] += 1 self._debug_outstanding += 1 d = self._publisher.callRemote("publish_v2", ann_t, self._canary) d.addBoth(self._debug_retired) d.addErrback(log.err, ann_t=ann_t, facility="tahoe.introducer.client", level=log.WEIRD, umid="xs9pVQ") def remote_announce_v2(self, announcements): lp = self.log("received %d announcements (v2)" % len(announcements)) return self.got_announcements(announcements, lp) def got_announcements(self, announcements, lp=None): self._debug_counts["inbound_message"] += 1 for ann_t in announcements: try: # this might raise UnknownKeyError or bad-sig error ann, key_s = unsign_from_foolscap(ann_t) # key is "v0-base32abc123" precondition(isinstance(key_s, bytes), key_s) except BadSignature: self.log("bad signature on inbound announcement: %s" % (ann_t,), parent=lp, level=log.WEIRD, umid="ZAU15Q") # process other announcements that arrived with the bad one continue self._process_announcement(ann, key_s) def _process_announcement(self, ann, key_s): precondition(isinstance(key_s, bytes), key_s) self._debug_counts["inbound_announcement"] += 1 service_name = str(ann["service-name"]) if service_name not in self._local_subscribers: self.log("announcement for a service we don't care about [%s]" % (service_name,), level=log.UNUSUAL, umid="dIpGNA") self._debug_counts["wrong_service"] += 1 return # for ASCII values, simplejson might give us unicode *or* bytes if "nickname" in ann and isinstance(ann["nickname"], bytes): ann["nickname"] = str(ann["nickname"]) nick_s = ann.get("nickname",u"").encode("utf-8") lp2 = self.log(format="announcement for nickname '%(nick)s', service=%(svc)s: %(ann)s", nick=nick_s, svc=service_name, ann=ann, umid="BoKEag") # how do we describe this node in the logs? desc_bits = [] assert key_s desc_bits.append(b"serverid=" + key_s[:20]) if "anonymous-storage-FURL" in ann: tubid_s = get_tubid_string_from_ann(ann) desc_bits.append(b"tubid=" + tubid_s[:8]) description = b"/".join(desc_bits) # the index is used to track duplicates index = (service_name, key_s) # is this announcement a duplicate? if (index in self._inbound_announcements and self._inbound_announcements[index][0] == ann): self.log(format="reannouncement for [%(service)s]:%(description)s, ignoring", service=service_name, description=description, parent=lp2, level=log.UNUSUAL, umid="B1MIdA") self._debug_counts["duplicate_announcement"] += 1 return # does it update an existing one? if index in self._inbound_announcements: old,_,_ = self._inbound_announcements[index] if "seqnum" in old: # must beat previous sequence number to replace if ("seqnum" not in ann or not isinstance(ann["seqnum"], int)): self.log("not replacing old announcement, no valid seqnum: %s" % (ann,), parent=lp2, level=log.NOISY, umid="zFGH3Q") return if ann["seqnum"] <= old["seqnum"]: # note that exact replays are caught earlier, by # comparing the entire signed announcement. self.log("not replacing old announcement, " "new seqnum is too old (%s <= %s) " "(replay attack?): %s" % (ann["seqnum"], old["seqnum"], ann), parent=lp2, level=log.UNUSUAL, umid="JAAAoQ") return # ok, seqnum is newer, allow replacement self._debug_counts["update"] += 1 self.log("replacing old announcement: %s" % (ann,), parent=lp2, level=log.NOISY, umid="wxwgIQ") else: self._debug_counts["new_announcement"] += 1 self.log("new announcement[%s]" % service_name, parent=lp2, level=log.NOISY) self._inbound_announcements[index] = (ann, key_s, time.time()) self._save_announcements() # note: we never forget an index, but we might update its value self._deliver_announcements(key_s, ann) def _deliver_announcements(self, key_s, ann): precondition(isinstance(key_s, bytes), key_s) service_name = str(ann["service-name"]) obs = self._local_subscribers.get(service_name) if obs is not None: obs.notify(key_s, ann) def connection_status(self): assert self.running # startService builds _introducer_reconnector irc = self._introducer_reconnector last_received = (self._publisher.getDataLastReceivedAt() if self._publisher else None) return connection_status.from_foolscap_reconnector(irc, last_received) def connected_to_introducer(self): return bool(self._publisher) def get_since(self): return self._since tahoe_lafs-1.20.0/src/allmydata/introducer/common.py0000644000000000000000000001126613615410400017404 0ustar00""" Ported to Python 3. """ import re from foolscap.furl import decode_furl from allmydata.crypto.util import remove_prefix from allmydata.crypto import ed25519 from allmydata.util import base32, jsonbytes as json def get_tubid_string_from_ann(ann): furl = ann.get("anonymous-storage-FURL") or ann.get("FURL") return get_tubid_string(furl) def get_tubid_string(furl): m = re.match(r'pb://(\w+)@', furl) assert m return m.group(1).lower().encode("ascii") def sign_to_foolscap(announcement, signing_key): """ :param signing_key: a (private) signing key, as returned from e.g. :func:`allmydata.crypto.ed25519.signing_keypair_from_string` :returns: 3-tuple of (msg, sig, vk) where msg is a UTF8 JSON serialization of the `announcement` (bytes), sig is bytes (a signature of msg) and vk is the verifying key bytes """ # return (bytes, sig-str, pubkey-str). A future HTTP-based serialization # will use JSON({msg:b64(JSON(msg).utf8), sig:v0-b64(sig), # pubkey:v0-b64(pubkey)}) . msg = json.dumps(announcement).encode("utf-8") sig = b"v0-" + base32.b2a( ed25519.sign_data(signing_key, msg) ) verifying_key_string = ed25519.string_from_verifying_key( ed25519.verifying_key_from_signing_key(signing_key) ) ann_t = (msg, sig, remove_prefix(verifying_key_string, b"pub-")) return ann_t class UnknownKeyError(Exception): pass def unsign_from_foolscap(ann_t): (msg, sig_vs, claimed_key_vs) = ann_t if not sig_vs or not claimed_key_vs: raise UnknownKeyError("only signed announcements recognized") if not sig_vs.startswith(b"v0-"): raise UnknownKeyError("only v0- signatures recognized") if not claimed_key_vs.startswith(b"v0-"): raise UnknownKeyError("only v0- keys recognized") claimed_key = ed25519.verifying_key_from_string(b"pub-" + claimed_key_vs) sig_bytes = base32.a2b(remove_prefix(sig_vs, b"v0-")) ed25519.verify_signature(claimed_key, sig_bytes, msg) key_vs = claimed_key_vs ann = json.loads(msg.decode("utf-8")) return (ann, key_vs) class SubscriberDescriptor(object): """This describes a subscriber, for status display purposes. It contains the following attributes: .service_name: what they subscribed to (string) .when: time when they subscribed (seconds since epoch) .nickname: their self-provided nickname, or "?" (unicode) .version: their self-provided version (string) .app_versions: versions of each library they use (dict str->str) .remote_address: the external address from which they connected (string) .tubid: for subscribers connecting with Foolscap, their tubid (string) """ def __init__(self, service_name, when, nickname, version, app_versions, remote_address, tubid): self.service_name = service_name self.when = when self.nickname = nickname self.version = version self.app_versions = app_versions self.remote_address = remote_address self.tubid = tubid class AnnouncementDescriptor(object): """This describes an announcement, for status display purposes. It contains the following attributes, which will be empty ("" for strings) if the client did not provide them: .when: time the announcement was first received (seconds since epoch) .index: the announcements 'index', a tuple of (string-or-None). The server remembers one announcement per index. .canary: a Referenceable on the announcer, so the server can learn when they disconnect (for the status display) .announcement: raw dictionary of announcement data .service_name: which service they are announcing (string) .version: 'my-version' portion of announcement (string) .nickname: their self-provided nickname, or "" (unicode) .serverid: the server identifier. This is a pubkey (for V2 clients), or a tubid (for V1 clients). .connection_hints: where they listen (list of strings) if the announcement included a key for 'anonymous-storage-FURL', else an empty list. """ def __init__(self, when, index, canary, ann_d): self.when = when self.index = index self.canary = canary self.announcement = ann_d self.service_name = ann_d["service-name"] self.version = ann_d.get("my-version", "") self.nickname = ann_d.get("nickname", u"") (_, key_s) = index self.serverid = key_s furl = ann_d.get("anonymous-storage-FURL") if furl: _, self.connection_hints, _ = decode_furl(furl) else: self.connection_hints = [] tahoe_lafs-1.20.0/src/allmydata/introducer/interfaces.py0000644000000000000000000001223113615410400020230 0ustar00""" Ported to Python 3. """ from zope.interface import Interface from foolscap.api import StringConstraint, SetOf, DictOf, Any, \ RemoteInterface, Referenceable FURL = StringConstraint(1000) # v2 protocol over foolscap: Announcements are 3-tuples of (msg, sig_vs, # claimed_key_vs): # * msg (bytes): UTF-8(json(ann_dict)) # * ann_dict has IntroducerClient-provided keys like "version", "nickname", # "app-versions", "my-version", "oldest-supported", and "service-name". # Plus service-specific keys like "anonymous-storage-FURL" and # "permutation-seed-base32" (both for service="storage"). # * sig_vs (bytes): "v0-"+base32(signature(msg)) # * claimed_key_vs (bytes): "v0-"+base32(pubkey) # (nickname, my_version, oldest_supported) refer to the client as a whole. # The my_version/oldest_supported strings can be parsed by an # allmydata.util.version.Version instance, and then compared. The first goal # is to make sure that nodes are not confused by speaking to an incompatible # peer. The second goal is to enable the development of # backwards-compatibility code. # Note that old v1 clients (which are gone now) did not sign messages, so v2 # servers would deliver v2-format messages with sig_vs=claimed_key_vs=None. # These days we should always get a signature and a pubkey. Announcement_v2 = Any() class RIIntroducerSubscriberClient_v2(RemoteInterface): __remote_name__ = "RIIntroducerSubscriberClient_v2.tahoe.allmydata.com" def announce_v2(announcements=SetOf(Announcement_v2)): """I accept announcements from the publisher.""" return None SubscriberInfo = DictOf(bytes, Any()) class RIIntroducerPublisherAndSubscriberService_v2(RemoteInterface): """To publish a service to the world, connect to me and give me your announcement message. I will deliver a copy to all connected subscribers. To hear about services, connect to me and subscribe to a specific service_name.""" __remote_name__ = "RIIntroducerPublisherAndSubscriberService_v2.tahoe.allmydata.com" def get_version(): return DictOf(bytes, Any()) def publish_v2(announcement=Announcement_v2, canary=Referenceable): return None def subscribe_v2(subscriber=RIIntroducerSubscriberClient_v2, service_name=bytes, subscriber_info=SubscriberInfo): """Give me a subscriber reference, and I will call its announce_v2() method with any announcements that match the desired service name. I will ignore duplicate subscriptions. The subscriber_info dictionary tells me about the subscriber, and is used for diagnostic/status displays.""" return None class IIntroducerClient(Interface): """I provide service introduction facilities for a node. I help nodes publish their services to the rest of the world, and I help them learn about services available on other nodes.""" def publish(service_name, ann, signing_key): """Publish the given announcement dictionary (which must be JSON-serializable), plus some additional keys, to the world. Each announcement is characterized by a (service_name, serverid) pair. When the server sees two announcements with the same pair, the later one will replace the earlier one. The serverid is derived from the signing_key, if present, otherwise it is derived from the 'anonymous-storage-FURL' key. signing_key (a SigningKey) will be used to sign the announcement.""" def subscribe_to(service_name, callback, *args, **kwargs): """Call this if you will eventually want to use services with the given SERVICE_NAME. This will prompt me to subscribe to announcements of those services. Your callback will be invoked with at least two arguments: a pubkey and an announcement dictionary, followed by any additional callback args/kwargs you gave me. The pubkey will be None unless the announcement was signed by the corresponding pubkey, in which case it will be a printable string like 'v0-base32..'. I will run your callback for both new announcements and for announcements that have changed, but you must be prepared to tolerate duplicates. The announcement that I give you comes from some other client. It will be a JSON-serializable dictionary which (by convention) is expected to have at least the following keys: version: 0 nickname: unicode app-versions: {} my-version: bytes oldest-supported: bytes service-name: bytes('storage') anonymous-storage-FURL: bytes(furl) In order to be JSON-serializable, all byte strings are assumed to be ASCII-encoded, and the receiver can therefore decode them into Unicode strings if they wish. Representation of these fields elsewhere in Tahoe may differ, e.g. by being unicode strings. Note that app-version will be an empty dictionary if either the publishing client or the Introducer are running older code. """ def connected_to_introducer(): """Returns a boolean, True if we are currently connected to the introducer, False if not.""" tahoe_lafs-1.20.0/src/allmydata/introducer/server.py0000644000000000000000000003532513615410400017424 0ustar00""" Ported to Python 3. """ from __future__ import annotations from six import ensure_text import time, os.path, textwrap from typing import Any, Union from zope.interface import implementer from twisted.application import service from twisted.internet import defer from twisted.internet.address import IPv4Address from twisted.python.failure import Failure from foolscap.api import Referenceable import allmydata from allmydata import node from allmydata.util import log, dictutil from allmydata.util.i2p_provider import create as create_i2p_provider from allmydata.util.tor_provider import create as create_tor_provider from allmydata.introducer.interfaces import \ RIIntroducerPublisherAndSubscriberService_v2 from allmydata.introducer.common import unsign_from_foolscap, \ SubscriberDescriptor, AnnouncementDescriptor from allmydata.node import read_config from allmydata.node import create_node_dir from allmydata.node import create_connection_handlers from allmydata.node import create_tub_options from allmydata.node import create_main_tub # this is put into README in new node-directories INTRODUCER_README = """ This directory contains files which contain private data for the Tahoe node, such as private keys. On Unix-like systems, the permissions on this directory are set to disallow users other than its owner from reading the contents of the files. See the 'configuration.rst' documentation file for details. """ _valid_config = node._common_valid_config class FurlFileConflictError(Exception): pass def create_introducer(basedir=u"."): """ :returns: a Deferred that yields a new _IntroducerNode instance """ try: # see https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2946 from twisted.internet import reactor if not os.path.exists(basedir): create_node_dir(basedir, INTRODUCER_README) config = read_config( basedir, u"client.port", generated_files=["introducer.furl"], _valid_config=_valid_config(), ) i2p_provider = create_i2p_provider(reactor, config) tor_provider = create_tor_provider(reactor, config) default_connection_handlers, foolscap_connection_handlers = create_connection_handlers(config, i2p_provider, tor_provider) tub_options = create_tub_options(config) main_tub = create_main_tub( config, tub_options, default_connection_handlers, foolscap_connection_handlers, i2p_provider, tor_provider, ) node = _IntroducerNode( config, main_tub, i2p_provider, tor_provider, ) i2p_provider.setServiceParent(node) tor_provider.setServiceParent(node) return defer.succeed(node) except Exception: return Failure() class _IntroducerNode(node.Node): NODETYPE = "introducer" def __init__(self, config, main_tub, i2p_provider, tor_provider): node.Node.__init__(self, config, main_tub, i2p_provider, tor_provider) self.init_introducer() webport = self.get_config("node", "web.port", None) if webport: self.init_web(webport) # strports string def init_introducer(self): if not self._is_tub_listening(): raise ValueError("config error: we are Introducer, but tub " "is not listening ('tub.port=' is empty)") introducerservice = IntroducerService() introducerservice.setServiceParent(self) old_public_fn = self.config.get_config_path(u"introducer.furl") private_fn = self.config.get_private_path(u"introducer.furl") if os.path.exists(old_public_fn): if os.path.exists(private_fn): msg = """This directory (%s) contains both an old public 'introducer.furl' file, and a new-style 'private/introducer.furl', so I cannot safely remove the old one. Please make sure your desired FURL is in private/introducer.furl, and remove the public file. If this causes your Introducer's FURL to change, you need to inform all grid members so they can update their tahoe.cfg. """ raise FurlFileConflictError(textwrap.dedent(msg)) os.rename(old_public_fn, private_fn) furl = self.tub.registerReference(introducerservice, furlFile=private_fn) self.log(" introducer can be found in {!r}".format(private_fn), umid="qF2L9A") self.introducer_url = furl # for tests def init_web(self, webport): self.log("init_web(webport=%s)", args=(webport,), umid="2bUygA") from allmydata.webish import IntroducerWebishServer nodeurl_path = self.config.get_config_path(u"node.url") config_staticdir = self.get_config("node", "web.static", "public_html") staticdir = self.config.get_config_path(config_staticdir) ws = IntroducerWebishServer(self, webport, nodeurl_path, staticdir) ws.setServiceParent(self) def stringify_remote_address(rref): remote = rref.getPeer() if isinstance(remote, IPv4Address): return "%s:%d" % (remote.host, remote.port) # loopback is a non-IPv4Address return str(remote) # MyPy doesn't work well with remote interfaces... @implementer(RIIntroducerPublisherAndSubscriberService_v2) class IntroducerService(service.MultiService, Referenceable): # type: ignore[misc] # The type in Twisted for services is wrong in 22.10... # https://github.com/twisted/twisted/issues/10135 name = "introducer" # type: ignore[assignment] # v1 is the original protocol, added in 1.0 (but only advertised starting # in 1.3), removed in 1.12. v2 is the new signed protocol, added in 1.10 # TODO: reconcile bytes/str for keys VERSION : dict[Union[bytes, str], Any]= { #"http://allmydata.org/tahoe/protocols/introducer/v1": { }, b"http://allmydata.org/tahoe/protocols/introducer/v2": { }, b"application-version": allmydata.__full_version__.encode("utf-8"), } def __init__(self): service.MultiService.__init__(self) self.introducer_url = None # 'index' is (service_name, key_s, tubid), where key_s or tubid is # None self._announcements = {} # dict of index -> # (ann_t, canary, ann, timestamp) # ann (the announcement dictionary) is cleaned up: nickname is always # unicode, servicename is always ascii, etc, even though # simplejson.loads sometimes returns either # self._subscribers is a dict mapping servicename to subscriptions # 'subscriptions' is a dict mapping rref to a subscription # 'subscription' is a tuple of (subscriber_info, timestamp) # 'subscriber_info' is a dict, provided directly by v2 clients. The # expected keys are: version, nickname, app-versions, my-version, # oldest-supported self._subscribers = dictutil.UnicodeKeyDict({}) self._debug_counts = {"inbound_message": 0, "inbound_duplicate": 0, "inbound_no_seqnum": 0, "inbound_old_replay": 0, "inbound_update": 0, "outbound_message": 0, "outbound_announcements": 0, "inbound_subscribe": 0} self._debug_outstanding = 0 def _debug_retired(self, res): self._debug_outstanding -= 1 return res def log(self, *args, **kwargs): if "facility" not in kwargs: kwargs["facility"] = "tahoe.introducer.server" return log.msg(*args, **kwargs) def get_announcements(self): """Return a list of AnnouncementDescriptor for all announcements""" announcements = [] for (index, (_, canary, ann, when)) in list(self._announcements.items()): ad = AnnouncementDescriptor(when, index, canary, ann) announcements.append(ad) return announcements def get_subscribers(self): """Return a list of SubscriberDescriptor objects for all subscribers""" s = [] for service_name, subscriptions in list(self._subscribers.items()): for rref,(subscriber_info,when) in list(subscriptions.items()): # note that if the subscriber didn't do Tub.setLocation, # tubid will be None. Also, subscribers do not tell us which # pubkey they use; only publishers do that. tubid = rref.getRemoteTubID() or "?" remote_address = stringify_remote_address(rref) # these three assume subscriber_info["version"]==0, but # should tolerate other versions nickname = subscriber_info.get("nickname", u"?") version = subscriber_info.get("my-version", u"?") app_versions = subscriber_info.get("app-versions", {}) # 'when' is the time they subscribed sd = SubscriberDescriptor(service_name, when, nickname, version, app_versions, remote_address, tubid) s.append(sd) return s def remote_get_version(self): return self.VERSION def remote_publish_v2(self, ann_t, canary): lp = self.log("introducer: announcement (v2) published", umid="L2QXkQ") return self.publish(ann_t, canary, lp) def publish(self, ann_t, canary, lp): try: self._publish(ann_t, canary, lp) except: log.err(format="Introducer.remote_publish failed on %(ann)s", ann=ann_t, level=log.UNUSUAL, parent=lp, umid="620rWA") raise def _publish(self, ann_t, canary, lp): self._debug_counts["inbound_message"] += 1 self.log("introducer: announcement published: %s" % (ann_t,), umid="wKHgCw") ann, key = unsign_from_foolscap(ann_t) # might raise BadSignature service_name = str(ann["service-name"]) index = (service_name, key) old = self._announcements.get(index) if old: (old_ann_t, canary, old_ann, timestamp) = old if old_ann == ann: self.log("but we already knew it, ignoring", level=log.NOISY, umid="myxzLw") self._debug_counts["inbound_duplicate"] += 1 return else: if "seqnum" in old_ann: # must beat previous sequence number to replace if ("seqnum" not in ann or not isinstance(ann["seqnum"], int)): self.log("not replacing old ann, no valid seqnum", level=log.NOISY, umid="ySbaVw") self._debug_counts["inbound_no_seqnum"] += 1 return if ann["seqnum"] <= old_ann["seqnum"]: self.log("not replacing old ann, new seqnum is too old" " (%s <= %s) (replay attack?)" % (ann["seqnum"], old_ann["seqnum"]), level=log.UNUSUAL, umid="sX7yqQ") self._debug_counts["inbound_old_replay"] += 1 return # ok, seqnum is newer, allow replacement self.log("old announcement being updated", level=log.NOISY, umid="304r9g") self._debug_counts["inbound_update"] += 1 self._announcements[index] = (ann_t, canary, ann, time.time()) #if canary: # canary.notifyOnDisconnect ... # use a CanaryWatcher? with cw.is_connected()? # actually we just want foolscap to give rref.is_connected(), since # this is only for the status display for s in self._subscribers.get(service_name, []): self._debug_counts["outbound_message"] += 1 self._debug_counts["outbound_announcements"] += 1 self._debug_outstanding += 1 d = s.callRemote("announce_v2", set([ann_t])) d.addBoth(self._debug_retired) d.addErrback(log.err, format="subscriber errored on announcement %(ann)s", ann=ann_t, facility="tahoe.introducer", level=log.UNUSUAL, umid="jfGMXQ") def remote_subscribe_v2(self, subscriber, service_name, subscriber_info): self.log("introducer: subscription[%r] request at %r" % (service_name, subscriber), umid="U3uzLg") service_name = ensure_text(service_name) subscriber_info = dictutil.UnicodeKeyDict({ ensure_text(k): v for (k, v) in subscriber_info.items() }) return self.add_subscriber(subscriber, service_name, subscriber_info) def add_subscriber(self, subscriber, service_name, subscriber_info): self._debug_counts["inbound_subscribe"] += 1 if service_name not in self._subscribers: self._subscribers[service_name] = {} subscribers = self._subscribers[service_name] if subscriber in subscribers: self.log("but they're already subscribed, ignoring", level=log.UNUSUAL, umid="Sy9EfA") return assert subscriber_info subscribers[subscriber] = (subscriber_info, time.time()) def _remove(): self.log("introducer: unsubscribing[%s] %s" % (service_name, subscriber), umid="vYGcJg") subscribers.pop(subscriber, None) subscriber.notifyOnDisconnect(_remove) # Make sure types are correct: for k in self._announcements: assert isinstance(k[0], type(service_name)) # now tell them about any announcements they're interested in announcements = set( [ ann_t for idx,(ann_t,canary,ann,when) in self._announcements.items() if idx[0] == service_name] ) if announcements: self._debug_counts["outbound_message"] += 1 self._debug_counts["outbound_announcements"] += len(announcements) self._debug_outstanding += 1 d = subscriber.callRemote("announce_v2", announcements) d.addBoth(self._debug_retired) d.addErrback(log.err, format="subscriber errored during subscribe %(anns)s", anns=announcements, facility="tahoe.introducer", level=log.UNUSUAL, umid="mtZepQ") return d tahoe_lafs-1.20.0/src/allmydata/mutable/__init__.py0000644000000000000000000000000013615410400017106 0ustar00tahoe_lafs-1.20.0/src/allmydata/mutable/checker.py0000644000000000000000000003063313615410400016772 0ustar00""" Ported to Python 3. """ from six import ensure_str from allmydata.uri import from_string from allmydata.util import base32, log, dictutil from allmydata.util.happinessutil import servers_of_happiness from allmydata.check_results import CheckAndRepairResults, CheckResults from allmydata.mutable.common import MODE_CHECK, MODE_WRITE, CorruptShareError from allmydata.mutable.servermap import ServerMap, ServermapUpdater from allmydata.mutable.retrieve import Retrieve # for verifying class MutableChecker(object): SERVERMAP_MODE = MODE_CHECK def __init__(self, node, storage_broker, history, monitor): self._node = node self._storage_broker = storage_broker self._history = history self._monitor = monitor self.bad_shares = [] # list of (server,shnum,failure) self._storage_index = self._node.get_storage_index() self.need_repair = False self.responded = set() # set of (binary) nodeids def check(self, verify=False, add_lease=False): servermap = ServerMap() # Updating the servermap in MODE_CHECK will stand a good chance # of finding all of the shares, and getting a good idea of # recoverability, etc, without verifying. u = ServermapUpdater(self._node, self._storage_broker, self._monitor, servermap, self.SERVERMAP_MODE, add_lease=add_lease) if self._history: self._history.notify_mapupdate(u.get_status()) d = u.update() d.addCallback(self._got_mapupdate_results) if verify: d.addCallback(self._verify_all_shares) d.addCallback(lambda res: servermap) d.addCallback(self._make_checker_results) return d def _got_mapupdate_results(self, servermap): # the file is healthy if there is exactly one recoverable version, it # has at least N distinct shares, and there are no unrecoverable # versions: all existing shares will be for the same version. self._monitor.raise_if_cancelled() self.best_version = None num_recoverable = len(servermap.recoverable_versions()) if num_recoverable: self.best_version = servermap.best_recoverable_version() # The file is unhealthy and needs to be repaired if: # - There are unrecoverable versions. if servermap.unrecoverable_versions(): self.need_repair = True # - There isn't a recoverable version. if num_recoverable != 1: self.need_repair = True # - The best recoverable version is missing some shares. if self.best_version: available_shares = servermap.shares_available() (num_distinct_shares, k, N) = available_shares[self.best_version] if num_distinct_shares < N: self.need_repair = True return servermap def _verify_all_shares(self, servermap): # read every byte of each share # # This logic is going to be very nearly the same as the # downloader. I bet we could pass the downloader a flag that # makes it do this, and piggyback onto that instead of # duplicating a bunch of code. # # Like: # r = Retrieve(blah, blah, blah, verify=True) # d = r.download() # (wait, wait, wait, d.callback) # # Then, when it has finished, we can check the servermap (which # we provided to Retrieve) to figure out which shares are bad, # since the Retrieve process will have updated the servermap as # it went along. # # By passing the verify=True flag to the constructor, we are # telling the downloader a few things. # # 1. It needs to download all N shares, not just K shares. # 2. It doesn't need to decrypt or decode the shares, only # verify them. if not self.best_version: return r = Retrieve(self._node, self._storage_broker, servermap, self.best_version, verify=True) d = r.download() d.addCallback(self._process_bad_shares) return d def _process_bad_shares(self, bad_shares): if bad_shares: self.need_repair = True self.bad_shares = bad_shares def _count_shares(self, smap, version): available_shares = smap.shares_available() (num_distinct_shares, k, N) = available_shares[version] counters = {} counters["count-shares-good"] = num_distinct_shares counters["count-shares-needed"] = k counters["count-shares-expected"] = N good_hosts = smap.all_servers_for_version(version) counters["count-good-share-hosts"] = len(good_hosts) vmap = smap.make_versionmap() counters["count-wrong-shares"] = sum([len(shares) for verinfo,shares in vmap.items() if verinfo != version]) return counters def _make_checker_results(self, smap): self._monitor.raise_if_cancelled() healthy = True report = [] summary = [] vmap = smap.make_versionmap() recoverable = smap.recoverable_versions() unrecoverable = smap.unrecoverable_versions() if recoverable: report.append("Recoverable Versions: " + "/".join(["%d*%s" % (len(vmap[v]), smap.summarize_version(v)) for v in recoverable])) if unrecoverable: report.append("Unrecoverable Versions: " + "/".join(["%d*%s" % (len(vmap[v]), smap.summarize_version(v)) for v in unrecoverable])) if smap.unrecoverable_versions(): healthy = False summary.append("some versions are unrecoverable") report.append("Unhealthy: some versions are unrecoverable") if len(recoverable) == 0: healthy = False summary.append("no versions are recoverable") report.append("Unhealthy: no versions are recoverable") if len(recoverable) > 1: healthy = False summary.append("multiple versions are recoverable") report.append("Unhealthy: there are multiple recoverable versions") if recoverable: best_version = smap.best_recoverable_version() report.append("Best Recoverable Version: " + smap.summarize_version(best_version)) counters = self._count_shares(smap, best_version) s = counters["count-shares-good"] k = counters["count-shares-needed"] N = counters["count-shares-expected"] if s < N: healthy = False report.append("Unhealthy: best version has only %d shares " "(encoding is %d-of-%d)" % (s, k, N)) summary.append("%d shares (enc %d-of-%d)" % (s, k, N)) elif unrecoverable: healthy = False # find a k and N from somewhere first = list(unrecoverable)[0] # not exactly the best version, but that doesn't matter too much counters = self._count_shares(smap, first) else: # couldn't find anything at all counters = { "count-shares-good": 0, "count-shares-needed": 3, # arbitrary defaults "count-shares-expected": 10, "count-good-share-hosts": 0, "count-wrong-shares": 0, } corrupt_share_locators = [] problems = [] if self.bad_shares: report.append("Corrupt Shares:") summary.append("Corrupt Shares:") for (server, shnum, f) in sorted(self.bad_shares, key=id): serverid = server.get_serverid() locator = (server, self._storage_index, shnum) corrupt_share_locators.append(locator) s = "%s-sh%d" % (ensure_str(server.get_name()), shnum) if f.check(CorruptShareError): ft = f.value.reason else: ft = str(f) report.append(" %s: %s" % (s, ft)) summary.append(s) p = (serverid, self._storage_index, shnum, f) problems.append(p) msg = ("CorruptShareError during mutable verify, " "serverid=%(serverid)s, si=%(si)s, shnum=%(shnum)d, " "where=%(where)s") log.msg(format=msg, serverid=server.get_name(), si=base32.b2a(self._storage_index), shnum=shnum, where=ft, level=log.WEIRD, umid="EkK8QA") sharemap = dictutil.DictOfSets() for verinfo in vmap: for (shnum, server, timestamp) in vmap[verinfo]: shareid = "%s-sh%d" % (smap.summarize_version(verinfo), shnum) sharemap.add(shareid, server) if healthy: summary = "Healthy" else: summary = "Unhealthy: " + " ".join(summary) count_happiness = servers_of_happiness(sharemap) cr = CheckResults(from_string(self._node.get_uri()), self._storage_index, healthy=healthy, recoverable=bool(recoverable), count_happiness=count_happiness, count_shares_needed=counters["count-shares-needed"], count_shares_expected=counters["count-shares-expected"], count_shares_good=counters["count-shares-good"], count_good_share_hosts=counters["count-good-share-hosts"], count_recoverable_versions=len(recoverable), count_unrecoverable_versions=len(unrecoverable), servers_responding=list(smap.get_reachable_servers()), sharemap=sharemap, count_wrong_shares=counters["count-wrong-shares"], list_corrupt_shares=corrupt_share_locators, count_corrupt_shares=len(corrupt_share_locators), list_incompatible_shares=[], count_incompatible_shares=0, summary=summary, report=report, share_problems=problems, servermap=smap.copy()) return cr class MutableCheckAndRepairer(MutableChecker): SERVERMAP_MODE = MODE_WRITE # needed to get the privkey def __init__(self, node, storage_broker, history, monitor): MutableChecker.__init__(self, node, storage_broker, history, monitor) self.cr_results = CheckAndRepairResults(self._storage_index) self.need_repair = False def check(self, verify=False, add_lease=False): d = MutableChecker.check(self, verify, add_lease) d.addCallback(self._stash_pre_repair_results) d.addCallback(self._maybe_repair) d.addCallback(lambda res: self.cr_results) return d def _stash_pre_repair_results(self, pre_repair_results): self.cr_results.pre_repair_results = pre_repair_results return pre_repair_results def _maybe_repair(self, pre_repair_results): crr = self.cr_results self._monitor.raise_if_cancelled() if not self.need_repair: crr.post_repair_results = pre_repair_results return if self._node.is_readonly(): # ticket #625: we cannot yet repair read-only mutable files crr.post_repair_results = pre_repair_results crr.repair_attempted = False return crr.repair_attempted = True d = self._node.repair(pre_repair_results, monitor=self._monitor) def _repair_finished(rr): crr.repair_successful = rr.get_successful() crr.post_repair_results = self._make_checker_results(rr.servermap) crr.repair_results = rr # TODO? return def _repair_error(f): # I'm not sure if I want to pass through a failure or not. crr.repair_successful = False crr.repair_failure = f # TODO? #crr.post_repair_results = ?? return f d.addCallbacks(_repair_finished, _repair_error) return d tahoe_lafs-1.20.0/src/allmydata/mutable/common.py0000644000000000000000000000716113615410400016656 0ustar00""" Ported to Python 3. """ from __future__ import annotations MODE_CHECK = "MODE_CHECK" # query all peers MODE_ANYTHING = "MODE_ANYTHING" # one recoverable version MODE_WRITE = "MODE_WRITE" # replace all shares, probably.. not for initial # creation MODE_READ = "MODE_READ" MODE_REPAIR = "MODE_REPAIR" # query all peers, get the privkey from allmydata.crypto import aes, rsa from allmydata.util import hashutil class NotWriteableError(Exception): pass class BadShareError(Exception): """This represents an error discovered in a particular share, during retrieve, from which we can recover by using some other share. This does *not* include local coding errors. """ class NeedMoreDataError(BadShareError): def __init__(self, needed_bytes, encprivkey_offset, encprivkey_length): Exception.__init__(self) self.needed_bytes = needed_bytes # up through EOF self.encprivkey_offset = encprivkey_offset self.encprivkey_length = encprivkey_length def __repr__(self): return "" % self.needed_bytes class UncoordinatedWriteError(Exception): def __repr__(self): return ("<%s -- You, oh user, tried to change a file or directory " "at the same time as another process was trying to change it. " " To avoid data loss, don't do this. Please see " "docs/write_coordination.rst for details.>" % (self.__class__.__name__,)) class UnrecoverableFileError(Exception): pass class NotEnoughServersError(Exception): """There were not enough functioning servers available to place shares upon. This might result from all servers being full or having an error, a local bug which causes all server requests to fail in the same way, or from there being zero servers. The first error received (if any) is stored in my .first_error attribute.""" def __init__(self, why, first_error=None): Exception.__init__(self, why, first_error) self.first_error = first_error class CorruptShareError(BadShareError): def __init__(self, server, shnum, reason): self.args = (server, shnum, reason) self.server = server self.shnum = shnum self.reason = reason def __str__(self): return " bytes: """ For SSK, encrypt a private ("signature") key using the writekey. """ encryptor = aes.create_encryptor(writekey) crypttext = aes.encrypt_data(encryptor, privkey) return crypttext def decrypt_privkey(writekey: bytes, enc_privkey: bytes) -> bytes: """ The inverse of ``encrypt_privkey``. """ decryptor = aes.create_decryptor(writekey) privkey = aes.decrypt_data(decryptor, enc_privkey) return privkey def derive_mutable_keys(keypair: tuple[rsa.PublicKey, rsa.PrivateKey]) -> tuple[bytes, bytes, bytes]: """ Derive the SSK writekey, encrypted writekey, and fingerprint from the public/private ("verification" / "signature") keypair. """ pubkey, privkey = keypair pubkey_s = rsa.der_string_from_verifying_key(pubkey) privkey_s = rsa.der_string_from_signing_key(privkey) writekey = hashutil.ssk_writekey_hash(privkey_s) encprivkey = encrypt_privkey(writekey, privkey_s) fingerprint = hashutil.ssk_pubkey_fingerprint_hash(pubkey_s) return writekey, encprivkey, fingerprint tahoe_lafs-1.20.0/src/allmydata/mutable/filenode.py0000644000000000000000000013202113615410400017145 0ustar00""" Ported to Python 3. """ from __future__ import annotations import random from zope.interface import implementer from twisted.internet import defer, reactor from foolscap.api import eventually from allmydata.interfaces import IMutableFileNode, ICheckable, ICheckResults, \ NotEnoughSharesError, MDMF_VERSION, SDMF_VERSION, IMutableUploadable, \ IMutableFileVersion, IWriteable from allmydata.util import hashutil, log, consumer, deferredutil, mathutil from allmydata.util.assertutil import precondition from allmydata.util.cputhreadpool import defer_to_thread from allmydata.uri import WriteableSSKFileURI, ReadonlySSKFileURI, \ WriteableMDMFFileURI, ReadonlyMDMFFileURI from allmydata.monitor import Monitor from allmydata.mutable.publish import Publish, MutableData,\ TransformingUploadable from allmydata.mutable.common import ( MODE_READ, MODE_WRITE, MODE_CHECK, UnrecoverableFileError, UncoordinatedWriteError, derive_mutable_keys, ) from allmydata.mutable.servermap import ServerMap, ServermapUpdater from allmydata.mutable.retrieve import Retrieve from allmydata.mutable.checker import MutableChecker, MutableCheckAndRepairer from allmydata.mutable.repairer import Repairer class BackoffAgent(object): # these parameters are copied from foolscap.reconnector, which gets them # from twisted.internet.protocol.ReconnectingClientFactory initialDelay = 1.0 factor = 2.7182818284590451 # (math.e) jitter = 0.11962656492 # molar Planck constant times c, Joule meter/mole maxRetries = 4 def __init__(self): self._delay = self.initialDelay self._count = 0 def delay(self, node, f): self._count += 1 if self._count == 4: return f self._delay = self._delay * self.factor self._delay = random.normalvariate(self._delay, self._delay * self.jitter) d = defer.Deferred() reactor.callLater(self._delay, d.callback, None) return d # use nodemaker.create_mutable_file() to make one of these @implementer(IMutableFileNode, ICheckable) class MutableFileNode(object): def __init__(self, storage_broker, secret_holder, default_encoding_parameters, history): self._storage_broker = storage_broker self._secret_holder = secret_holder self._default_encoding_parameters = default_encoding_parameters self._history = history self._pubkey = None # filled in upon first read self._privkey = None # filled in if we're mutable # we keep track of the last encoding parameters that we use. These # are updated upon retrieve, and used by publish. If we publish # without ever reading (i.e. overwrite()), then we use these values. self._required_shares = default_encoding_parameters["k"] self._total_shares = default_encoding_parameters["n"] self._sharemap = {} # known shares, shnum-to-[nodeids] self._most_recent_size = None # filled in after __init__ if we're being created for the first time; # filled in by the servermap updater before publishing, otherwise. # set to this default value in case neither of those things happen, # or in case the servermap can't find any shares to tell us what # to publish as. self._protocol_version = None # all users of this MutableFileNode go through the serializer. This # takes advantage of the fact that Deferreds discard the callbacks # that they're done with, so we can keep using the same Deferred # forever without consuming more and more memory. self._serializer = defer.succeed(None) # Starting with MDMF, we can get these from caps if they're # there. Leave them alone for now; they'll be filled in by my # init_from_cap method if necessary. self._downloader_hints = {} def __repr__(self): if hasattr(self, '_uri'): return "<%s %x %s %r>" % (self.__class__.__name__, id(self), self.is_readonly() and 'RO' or 'RW', self._uri.abbrev()) else: return "<%s %x %s %s>" % (self.__class__.__name__, id(self), None, None) def init_from_cap(self, filecap): # we have the URI, but we have not yet retrieved the public # verification key, nor things like 'k' or 'N'. If and when someone # wants to get our contents, we'll pull from shares and fill those # in. if isinstance(filecap, (WriteableMDMFFileURI, ReadonlyMDMFFileURI)): self._protocol_version = MDMF_VERSION elif isinstance(filecap, (ReadonlySSKFileURI, WriteableSSKFileURI)): self._protocol_version = SDMF_VERSION self._uri = filecap self._writekey = None if not filecap.is_readonly() and filecap.is_mutable(): self._writekey = self._uri.writekey self._readkey = self._uri.readkey self._storage_index = self._uri.storage_index self._fingerprint = self._uri.fingerprint # the following values are learned during Retrieval # self._pubkey # self._required_shares # self._total_shares # and these are needed for Publish. They are filled in by Retrieval # if possible, otherwise by the first peer that Publish talks to. self._privkey = None self._encprivkey = None return self @deferredutil.async_to_deferred async def create_with_keys(self, keypair, contents, version=SDMF_VERSION): """Call this to create a brand-new mutable file. It will create the shares, find homes for them, and upload the initial contents (created with the same rules as IClient.create_mutable_file() ). Returns a Deferred that fires (with the MutableFileNode instance you should use) when it completes. """ self._pubkey, self._privkey = keypair self._writekey, self._encprivkey, self._fingerprint = await defer_to_thread( derive_mutable_keys, keypair ) if version == MDMF_VERSION: self._uri = WriteableMDMFFileURI(self._writekey, self._fingerprint) self._protocol_version = version elif version == SDMF_VERSION: self._uri = WriteableSSKFileURI(self._writekey, self._fingerprint) self._protocol_version = version self._readkey = self._uri.readkey self._storage_index = self._uri.storage_index initial_contents = self._get_initial_contents(contents) return await self._upload(initial_contents, None) def _get_initial_contents(self, contents): if contents is None: return MutableData(b"") if isinstance(contents, bytes): return MutableData(contents) if IMutableUploadable.providedBy(contents): return contents assert callable(contents), "%s should be callable, not %s" % \ (contents, type(contents)) return contents(self) def _populate_pubkey(self, pubkey): self._pubkey = pubkey def _populate_required_shares(self, required_shares): self._required_shares = required_shares def _populate_total_shares(self, total_shares): self._total_shares = total_shares def _populate_privkey(self, privkey): self._privkey = privkey def _populate_encprivkey(self, encprivkey): self._encprivkey = encprivkey def get_write_enabler(self, server): seed = server.get_foolscap_write_enabler_seed() assert len(seed) == 20 return hashutil.ssk_write_enabler_hash(self._writekey, seed) def get_renewal_secret(self, server): crs = self._secret_holder.get_renewal_secret() frs = hashutil.file_renewal_secret_hash(crs, self._storage_index) lease_seed = server.get_lease_seed() assert len(lease_seed) == 20 return hashutil.bucket_renewal_secret_hash(frs, lease_seed) def get_cancel_secret(self, server): ccs = self._secret_holder.get_cancel_secret() fcs = hashutil.file_cancel_secret_hash(ccs, self._storage_index) lease_seed = server.get_lease_seed() assert len(lease_seed) == 20 return hashutil.bucket_cancel_secret_hash(fcs, lease_seed) def get_writekey(self): return self._writekey def get_readkey(self): return self._readkey def get_storage_index(self): return self._storage_index def get_fingerprint(self): return self._fingerprint def get_privkey(self): return self._privkey def get_encprivkey(self): return self._encprivkey def get_pubkey(self): return self._pubkey def get_required_shares(self): return self._required_shares def get_total_shares(self): return self._total_shares #################################### # IFilesystemNode def get_size(self): return self._most_recent_size def get_current_size(self): d = self.get_size_of_best_version() d.addCallback(self._stash_size) return d def _stash_size(self, size): self._most_recent_size = size return size def get_cap(self): return self._uri def get_readcap(self): return self._uri.get_readonly() def get_verify_cap(self): return self._uri.get_verify_cap() def get_repair_cap(self): if self._uri.is_readonly(): return None return self._uri def get_uri(self): return self._uri.to_string() def get_write_uri(self): if self.is_readonly(): return None return self._uri.to_string() def get_readonly_uri(self): return self._uri.get_readonly().to_string() def get_readonly(self): if self.is_readonly(): return self ro = MutableFileNode(self._storage_broker, self._secret_holder, self._default_encoding_parameters, self._history) ro.init_from_cap(self._uri.get_readonly()) return ro def is_mutable(self): return self._uri.is_mutable() def is_readonly(self): return self._uri.is_readonly() def is_unknown(self): return False def is_allowed_in_immutable_directory(self): return not self._uri.is_mutable() def raise_error(self): pass def __hash__(self): return hash((self.__class__, self._uri)) def __eq__(self, them): if type(self) != type(them): return False return self._uri == them._uri def __ne__(self, them): return not (self == them) ################################# # ICheckable def check(self, monitor, verify=False, add_lease=False): checker = MutableChecker(self, self._storage_broker, self._history, monitor) return checker.check(verify, add_lease) def check_and_repair(self, monitor, verify=False, add_lease=False): checker = MutableCheckAndRepairer(self, self._storage_broker, self._history, monitor) return checker.check(verify, add_lease) ################################# # IRepairable def repair(self, check_results, force=False, monitor=None): assert ICheckResults(check_results) r = Repairer(self, check_results, self._storage_broker, self._history, monitor) d = r.start(force) return d ################################# # IFileNode def get_best_readable_version(self): """ I return a Deferred that fires with a MutableFileVersion representing the best readable version of the file that I represent """ return self.get_readable_version() def get_readable_version(self, servermap=None, version=None): """ I return a Deferred that fires with an MutableFileVersion for my version argument, if there is a recoverable file of that version on the grid. If there is no recoverable version, I fire with an UnrecoverableFileError. If a servermap is provided, I look in there for the requested version. If no servermap is provided, I create and update a new one. If no version is provided, then I return a MutableFileVersion representing the best recoverable version of the file. """ d = self._get_version_from_servermap(MODE_READ, servermap, version) def _build_version(servermap_and_their_version): (servermap, their_version) = servermap_and_their_version assert their_version in servermap.recoverable_versions() assert their_version in servermap.make_versionmap() mfv = MutableFileVersion(self, servermap, their_version, self._storage_index, self._storage_broker, self._readkey, history=self._history) assert mfv.is_readonly() mfv.set_downloader_hints(self._downloader_hints) # our caller can use this to download the contents of the # mutable file. return mfv return d.addCallback(_build_version) def _get_version_from_servermap(self, mode, servermap=None, version=None): """ I return a Deferred that fires with (servermap, version). This function performs validation and a servermap update. If it returns (servermap, version), the caller can assume that: - servermap was last updated in mode. - version is recoverable, and corresponds to the servermap. If version and servermap are provided to me, I will validate that version exists in the servermap, and that the servermap was updated correctly. If version is not provided, but servermap is, I will validate the servermap and return the best recoverable version that I can find in the servermap. If the version is provided but the servermap isn't, I will obtain a servermap that has been updated in the correct mode and validate that version is found and recoverable. If neither servermap nor version are provided, I will obtain a servermap updated in the correct mode, and return the best recoverable version that I can find in there. """ # XXX: wording ^^^^ if servermap and servermap.get_last_update()[0] == mode: d = defer.succeed(servermap) else: d = self._get_servermap(mode) def _get_version(servermap, v): if v and v not in servermap.recoverable_versions(): v = None elif not v: v = servermap.best_recoverable_version() if not v: raise UnrecoverableFileError("no recoverable versions") return (servermap, v) return d.addCallback(_get_version, version) def download_best_version(self): """ I return a Deferred that fires with the contents of the best version of this mutable file. """ return self._do_serialized(self._download_best_version) def _download_best_version(self): """ I am the serialized sibling of download_best_version. """ d = self.get_best_readable_version() d.addCallback(self._record_size) d.addCallback(lambda version: version.download_to_data()) # It is possible that the download will fail because there # aren't enough shares to be had. If so, we will try again after # updating the servermap in MODE_WRITE, which may find more # shares than updating in MODE_READ, as we just did. We can do # this by getting the best mutable version and downloading from # that -- the best mutable version will be a MutableFileVersion # with a servermap that was last updated in MODE_WRITE, as we # want. If this fails, then we give up. def _maybe_retry(failure): failure.trap(NotEnoughSharesError) d = self.get_best_mutable_version() d.addCallback(self._record_size) d.addCallback(lambda version: version.download_to_data()) return d d.addErrback(_maybe_retry) return d def _record_size(self, mfv): """ I record the size of a mutable file version. """ self._most_recent_size = mfv.get_size() return mfv def get_size_of_best_version(self): """ I return the size of the best version of this mutable file. This is equivalent to calling get_size() on the result of get_best_readable_version(). """ d = self.get_best_readable_version() return d.addCallback(lambda mfv: mfv.get_size()) ################################# # IMutableFileNode def get_best_mutable_version(self, servermap=None): """ I return a Deferred that fires with a MutableFileVersion representing the best readable version of the file that I represent. I am like get_best_readable_version, except that I will try to make a writeable version if I can. """ return self.get_mutable_version(servermap=servermap) def get_mutable_version(self, servermap=None, version=None): """ I return a version of this mutable file. I return a Deferred that fires with a MutableFileVersion If version is provided, the Deferred will fire with a MutableFileVersion initailized with that version. Otherwise, it will fire with the best version that I can recover. If servermap is provided, I will use that to find versions instead of performing my own servermap update. """ if self.is_readonly(): return self.get_readable_version(servermap=servermap, version=version) # get_mutable_version => write intent, so we require that the # servermap is updated in MODE_WRITE d = self._get_version_from_servermap(MODE_WRITE, servermap, version) def _build_version(servermap_and_smap_version): # these should have been set by the servermap update. (servermap, smap_version) = servermap_and_smap_version assert self._secret_holder assert self._writekey mfv = MutableFileVersion(self, servermap, smap_version, self._storage_index, self._storage_broker, self._readkey, self._writekey, self._secret_holder, history=self._history) assert not mfv.is_readonly() mfv.set_downloader_hints(self._downloader_hints) return mfv return d.addCallback(_build_version) # XXX: I'm uncomfortable with the difference between upload and # overwrite, which, FWICT, is basically that you don't have to # do a servermap update before you overwrite. We split them up # that way anyway, so I guess there's no real difficulty in # offering both ways to callers, but it also makes the # public-facing API cluttery, and makes it hard to discern the # right way of doing things. # In general, we leave it to callers to ensure that they aren't # going to cause UncoordinatedWriteErrors when working with # MutableFileVersions. We know that the next three operations # (upload, overwrite, and modify) will all operate on the same # version, so we say that only one of them can be going on at once, # and serialize them to ensure that that actually happens, since as # the caller in this situation it is our job to do that. def overwrite(self, new_contents): """ I overwrite the contents of the best recoverable version of this mutable file with new_contents. This is equivalent to calling overwrite on the result of get_best_mutable_version with new_contents as an argument. I return a Deferred that eventually fires with the results of my replacement process. """ # TODO: Update downloader hints. return self._do_serialized(self._overwrite, new_contents) def _overwrite(self, new_contents): """ I am the serialized sibling of overwrite. """ d = self.get_best_mutable_version() d.addCallback(lambda mfv: mfv.overwrite(new_contents)) d.addCallback(self._did_upload, new_contents.get_size()) return d def upload(self, new_contents, servermap): """ I overwrite the contents of the best recoverable version of this mutable file with new_contents, using servermap instead of creating/updating our own servermap. I return a Deferred that fires with the results of my upload. """ # TODO: Update downloader hints return self._do_serialized(self._upload, new_contents, servermap) def modify(self, modifier, backoffer=None): """ I modify the contents of the best recoverable version of this mutable file with the modifier. This is equivalent to calling modify on the result of get_best_mutable_version. I return a Deferred that eventually fires with an UploadResults instance describing this process. """ # TODO: Update downloader hints. return self._do_serialized(self._modify, modifier, backoffer) def _modify(self, modifier, backoffer): """ I am the serialized sibling of modify. """ d = self.get_best_mutable_version() d.addCallback(lambda mfv: mfv.modify(modifier, backoffer)) return d def download_version(self, servermap, version, fetch_privkey=False): """ Download the specified version of this mutable file. I return a Deferred that fires with the contents of the specified version as a bytestring, or errbacks if the file is not recoverable. """ d = self.get_readable_version(servermap, version) return d.addCallback(lambda mfv: mfv.download_to_data(fetch_privkey)) def get_servermap(self, mode): """ I return a servermap that has been updated in mode. mode should be one of MODE_READ, MODE_WRITE, MODE_CHECK or MODE_ANYTHING. See servermap.py for more on what these mean. """ return self._do_serialized(self._get_servermap, mode) def _get_servermap(self, mode): """ I am a serialized twin to get_servermap. """ servermap = ServerMap() d = self._update_servermap(servermap, mode) # The servermap will tell us about the most recent size of the # file, so we may as well set that so that callers might get # more data about us. if not self._most_recent_size: d.addCallback(self._get_size_from_servermap) return d def _get_size_from_servermap(self, servermap): """ I extract the size of the best version of this file and record it in self._most_recent_size. I return the servermap that I was given. """ if servermap.recoverable_versions(): v = servermap.best_recoverable_version() size = v[4] # verinfo[4] == size self._most_recent_size = size return servermap def _update_servermap(self, servermap, mode): u = ServermapUpdater(self, self._storage_broker, Monitor(), servermap, mode) if self._history: self._history.notify_mapupdate(u.get_status()) return u.update() #def set_version(self, version): # I can be set in two ways: # 1. When the node is created. # 2. (for an existing share) when the Servermap is updated # before I am read. # assert version in (MDMF_VERSION, SDMF_VERSION) # self._protocol_version = version def get_version(self): return self._protocol_version def _do_serialized(self, cb, *args, **kwargs): # note: to avoid deadlock, this callable is *not* allowed to invoke # other serialized methods within this (or any other) # MutableFileNode. The callable should be a bound method of this same # MFN instance. d = defer.Deferred() self._serializer.addCallback(lambda ignore: cb(*args, **kwargs)) # we need to put off d.callback until this Deferred is finished being # processed. Otherwise the caller's subsequent activities (like, # doing other things with this node) can cause reentrancy problems in # the Deferred code itself self._serializer.addBoth(lambda res: eventually(d.callback, res)) # add a log.err just in case something really weird happens, because # self._serializer stays around forever, therefore we won't see the # usual Unhandled Error in Deferred that would give us a hint. self._serializer.addErrback(log.err) return d def _upload(self, new_contents, servermap): """ A MutableFileNode still has to have some way of getting published initially, which is what I am here for. After that, all publishing, updating, modifying and so on happens through MutableFileVersions. """ assert self._pubkey, "update_servermap must be called before publish" # Define IPublishInvoker with a set_downloader_hints method? # Then have the publisher call that method when it's done publishing? p = Publish(self, self._storage_broker, servermap) if self._history: self._history.notify_publish(p.get_status(), new_contents.get_size()) d = p.publish(new_contents) d.addCallback(self._did_upload, new_contents.get_size()) return d def set_downloader_hints(self, hints): self._downloader_hints = hints def _did_upload(self, res, size): self._most_recent_size = size return res @implementer(IMutableFileVersion, IWriteable) class MutableFileVersion(object): """ I represent a specific version (most likely the best version) of a mutable file. Since I implement IReadable, instances which hold a reference to an instance of me are guaranteed the ability (absent connection difficulties or unrecoverable versions) to read the file that I represent. Depending on whether I was initialized with a write capability or not, I may also provide callers the ability to overwrite or modify the contents of the mutable file that I reference. """ def __init__(self, node, servermap, version, storage_index, storage_broker, readcap, writekey=None, write_secrets=None, history=None): self._node = node self._servermap = servermap self._version = version self._storage_index = storage_index self._write_secrets = write_secrets self._history = history self._storage_broker = storage_broker #assert isinstance(readcap, IURI) self._readcap = readcap self._writekey = writekey self._serializer = defer.succeed(None) def get_sequence_number(self): """ Get the sequence number of the mutable version that I represent. """ return self._version[0] # verinfo[0] == the sequence number # TODO: Terminology? def get_writekey(self): """ I return a writekey or None if I don't have a writekey. """ return self._writekey def set_downloader_hints(self, hints): """ I set the downloader hints. """ assert isinstance(hints, dict) self._downloader_hints = hints def get_downloader_hints(self): """ I return the downloader hints. """ return self._downloader_hints def overwrite(self, new_contents): """ I overwrite the contents of this mutable file version with the data in new_contents. """ assert not self.is_readonly() return self._do_serialized(self._overwrite, new_contents) def _overwrite(self, new_contents): assert IMutableUploadable.providedBy(new_contents) assert self._servermap.get_last_update()[0] == MODE_WRITE return self._upload(new_contents) def modify(self, modifier, backoffer=None): """I use a modifier callback to apply a change to the mutable file. I implement the following pseudocode:: obtain_mutable_filenode_lock() first_time = True while True: update_servermap(MODE_WRITE) old = retrieve_best_version() new = modifier(old, servermap, first_time) first_time = False if new == old: break try: publish(new) except UncoordinatedWriteError, e: backoffer(e) continue break release_mutable_filenode_lock() The idea is that your modifier function can apply a delta of some sort, and it will be re-run as necessary until it succeeds. The modifier must inspect the old version to see whether its delta has already been applied: if so it should return the contents unmodified. Note that the modifier is required to run synchronously, and must not invoke any methods on this MutableFileNode instance. The backoff-er is a callable that is responsible for inserting a random delay between subsequent attempts, to help competing updates from colliding forever. It is also allowed to give up after a while. The backoffer is given two arguments: this MutableFileNode, and the Failure object that contains the UncoordinatedWriteError. It should return a Deferred that will fire when the next attempt should be made, or return the Failure if the loop should give up. If backoffer=None, a default one is provided which will perform exponential backoff, and give up after 4 tries. Note that the backoffer should not invoke any methods on this MutableFileNode instance, and it needs to be highly conscious of deadlock issues. """ assert not self.is_readonly() return self._do_serialized(self._modify, modifier, backoffer) def _modify(self, modifier, backoffer): if backoffer is None: backoffer = BackoffAgent().delay return self._modify_and_retry(modifier, backoffer, True) def _modify_and_retry(self, modifier, backoffer, first_time): """ I try to apply modifier to the contents of this version of the mutable file. If I succeed, I return an UploadResults instance describing my success. If I fail, I try again after waiting for a little bit. """ log.msg("doing modify") if first_time: d = self._update_servermap() else: # We ran into trouble; do MODE_CHECK so we're a little more # careful on subsequent tries. d = self._update_servermap(mode=MODE_CHECK) d.addCallback(lambda ignored: self._modify_once(modifier, first_time)) def _retry(f): f.trap(UncoordinatedWriteError) # Uh oh, it broke. We're allowed to trust the servermap for our # first try, but after that we need to update it. It's # possible that we've failed due to a race with another # uploader, and if the race is to converge correctly, we # need to know about that upload. d2 = defer.maybeDeferred(backoffer, self, f) d2.addCallback(lambda ignored: self._modify_and_retry(modifier, backoffer, False)) return d2 d.addErrback(_retry) return d def _modify_once(self, modifier, first_time): """ I attempt to apply a modifier to the contents of the mutable file. """ assert self._servermap.get_last_update()[0] != MODE_READ # download_to_data is serialized, so we have to call this to # avoid deadlock. d = self._try_to_download_data() def _apply(old_contents): new_contents = modifier(old_contents, self._servermap, first_time) precondition((isinstance(new_contents, bytes) or new_contents is None), "Modifier function must return bytes " "or None") if new_contents is None or new_contents == old_contents: log.msg("no changes") # no changes need to be made if first_time: return # However, since Publish is not automatically doing a # recovery when it observes UCWE, we need to do a second # publish. See #551 for details. We'll basically loop until # we managed an uncontested publish. old_uploadable = MutableData(old_contents) new_contents = old_uploadable else: new_contents = MutableData(new_contents) return self._upload(new_contents) d.addCallback(_apply) return d def is_readonly(self): """ I return True if this MutableFileVersion provides no write access to the file that it encapsulates, and False if it provides the ability to modify the file. """ return self._writekey is None def is_mutable(self): """ I return True, since mutable files are always mutable by somebody. """ return True def get_storage_index(self): """ I return the storage index of the reference that I encapsulate. """ return self._storage_index def get_size(self): """ I return the length, in bytes, of this readable object. """ return self._servermap.size_of_version(self._version) def download_to_data(self, fetch_privkey=False): # type: ignore # fixme """ I return a Deferred that fires with the contents of this readable object as a byte string. """ c = consumer.MemoryConsumer() d = self.read(c, fetch_privkey=fetch_privkey) d.addCallback(lambda mc: b"".join(mc.chunks)) return d def _try_to_download_data(self): """ I am an unserialized cousin of download_to_data; I am called from the children of modify() to download the data associated with this mutable version. """ c = consumer.MemoryConsumer() # modify will almost certainly write, so we need the privkey. d = self._read(c, fetch_privkey=True) d.addCallback(lambda mc: b"".join(mc.chunks)) return d def read(self, consumer, offset=0, size=None, fetch_privkey=False): """ I read a portion (possibly all) of the mutable file that I reference into consumer. """ return self._do_serialized(self._read, consumer, offset, size, fetch_privkey) def _read(self, consumer, offset=0, size=None, fetch_privkey=False): """ I am the serialized companion of read. """ r = Retrieve(self._node, self._storage_broker, self._servermap, self._version, fetch_privkey) if self._history: self._history.notify_retrieve(r.get_status()) d = r.download(consumer, offset, size) return d def _do_serialized(self, cb, *args, **kwargs): # note: to avoid deadlock, this callable is *not* allowed to invoke # other serialized methods within this (or any other) # MutableFileNode. The callable should be a bound method of this same # MFN instance. d = defer.Deferred() self._serializer.addCallback(lambda ignore: cb(*args, **kwargs)) # we need to put off d.callback until this Deferred is finished being # processed. Otherwise the caller's subsequent activities (like, # doing other things with this node) can cause reentrancy problems in # the Deferred code itself self._serializer.addBoth(lambda res: eventually(d.callback, res)) # add a log.err just in case something really weird happens, because # self._serializer stays around forever, therefore we won't see the # usual Unhandled Error in Deferred that would give us a hint. self._serializer.addErrback(log.err) return d def _upload(self, new_contents): #assert self._pubkey, "update_servermap must be called before publish" p = Publish(self._node, self._storage_broker, self._servermap) if self._history: self._history.notify_publish(p.get_status(), new_contents.get_size()) d = p.publish(new_contents) d.addCallback(self._did_upload, new_contents.get_size()) return d def _did_upload(self, res, size): self._most_recent_size = size return res def update(self, data, offset): """ Do an update of this mutable file version by inserting data at offset within the file. If offset is the EOF, this is an append operation. I return a Deferred that fires with the results of the update operation when it has completed. In cases where update does not append any data, or where it does not append so many blocks that the block count crosses a power-of-two boundary, this operation will use roughly O(data.get_size()) memory/bandwidth/CPU to perform the update. Otherwise, it must download, re-encode, and upload the entire file again, which will use O(filesize) resources. """ return self._do_serialized(self._update, data, offset) def _update(self, data, offset): """ I update the mutable file version represented by this particular IMutableVersion by inserting the data in data at the offset offset. I return a Deferred that fires when this has been completed. """ new_size = data.get_size() + offset old_size = self.get_size() segment_size = self._version[3] num_old_segments = mathutil.div_ceil(old_size, segment_size) num_new_segments = mathutil.div_ceil(new_size, segment_size) log.msg("got %d old segments, %d new segments" % \ (num_old_segments, num_new_segments)) # We do a whole file re-encode if the file is an SDMF file. if self._version[2]: # version[2] == SDMF salt, which MDMF lacks log.msg("doing re-encode instead of in-place update") return self._do_modify_update(data, offset) # Otherwise, we can replace just the parts that are changing. log.msg("updating in place") d = self._do_update_update(data, offset) d.addCallback(self._decode_and_decrypt_segments, data, offset) d.addCallback(self._build_uploadable_and_finish, data, offset) return d def _do_modify_update(self, data, offset): """ I perform a file update by modifying the contents of the file after downloading it, then reuploading it. I am less efficient than _do_update_update, but am necessary for certain updates. """ def m(old, servermap, first_time): start = offset rest = offset + data.get_size() new = old[:start] new += b"".join(data.read(data.get_size())) new += old[rest:] return new return self._modify(m, None) def _do_update_update(self, data, offset): """ I start the Servermap update that gets us the data we need to continue the update process. I return a Deferred that fires when the servermap update is done. """ assert IMutableUploadable.providedBy(data) assert self.is_mutable() # offset == self.get_size() is valid and means that we are # appending data to the file. assert offset <= self.get_size() segsize = self._version[3] # We'll need the segment that the data starts in, regardless of # what we'll do later. start_segment = offset // segsize # We only need the end segment if the data we append does not go # beyond the current end-of-file. end_segment = start_segment if offset + data.get_size() < self.get_size(): end_data = offset + data.get_size() # The last byte we touch is the end_data'th byte, which is actually # byte end_data - 1 because bytes are zero-indexed. end_data -= 1 end_segment = end_data // segsize self._start_segment = start_segment self._end_segment = end_segment # Now ask for the servermap to be updated in MODE_WRITE with # this update range. return self._update_servermap(update_range=(start_segment, end_segment)) def _decode_and_decrypt_segments(self, ignored, data, offset): """ After the servermap update, I take the encrypted and encoded data that the servermap fetched while doing its update and transform it into decoded-and-decrypted plaintext that can be used by the new uploadable. I return a Deferred that fires with the segments. """ r = Retrieve(self._node, self._storage_broker, self._servermap, self._version) # decode: takes in our blocks and salts from the servermap, # returns a Deferred that fires with the corresponding plaintext # segments. Does not download -- simply takes advantage of # existing infrastructure within the Retrieve class to avoid # duplicating code. sm = self._servermap # XXX: If the methods in the servermap don't work as # abstractions, you should rewrite them instead of going around # them. update_data = sm.update_data start_segments = {} # shnum -> start segment end_segments = {} # shnum -> end segment blockhashes = {} # shnum -> blockhash tree for (shnum, original_data) in list(update_data.items()): data = [d[1] for d in original_data if d[0] == self._version] # data is [(blockhashes,start,end)..] # Every data entry in our list should now be share shnum for # a particular version of the mutable file, so all of the # entries should be identical. datum = data[0] assert [x for x in data if x != datum] == [] # datum is (blockhashes,start,end) blockhashes[shnum] = datum[0] start_segments[shnum] = datum[1] # (block,salt) bytestrings end_segments[shnum] = datum[2] d1 = r.decode(start_segments, self._start_segment) d2 = r.decode(end_segments, self._end_segment) d3 = defer.succeed(blockhashes) return deferredutil.gatherResults([d1, d2, d3]) def _build_uploadable_and_finish(self, segments_and_bht, data, offset): """ After the process has the plaintext segments, I build the TransformingUploadable that the publisher will eventually re-upload to the grid. I then invoke the publisher with that uploadable, and return a Deferred when the publish operation has completed without issue. """ u = TransformingUploadable(data, offset, self._version[3], segments_and_bht[0], segments_and_bht[1]) p = Publish(self._node, self._storage_broker, self._servermap) return p.update(u, offset, segments_and_bht[2], self._version) def _update_servermap(self, mode=MODE_WRITE, update_range=None): """ I update the servermap. I return a Deferred that fires when the servermap update is done. """ if update_range: u = ServermapUpdater(self._node, self._storage_broker, Monitor(), self._servermap, mode=mode, update_range=update_range) else: u = ServermapUpdater(self._node, self._storage_broker, Monitor(), self._servermap, mode=mode) return u.update() # https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3562 def get_servermap(self): raise NotImplementedError tahoe_lafs-1.20.0/src/allmydata/mutable/layout.py0000644000000000000000000021313413615410400016702 0ustar00""" Ported to Python 3. """ import struct from allmydata.mutable.common import NeedMoreDataError, UnknownVersionError, \ BadShareError from allmydata.interfaces import HASH_SIZE, SALT_SIZE, SDMF_VERSION, \ MDMF_VERSION, IMutableSlotWriter from allmydata.util import mathutil from twisted.python import failure from twisted.internet import defer from zope.interface import implementer # These strings describe the format of the packed structs they help process. # Here's what they mean: # # PREFIX: # >: Big-endian byte order; the most significant byte is first (leftmost). # B: The container version information; stored as an unsigned 8-bit integer. # This is currently either SDMF_VERSION or MDMF_VERSION. # Q: The sequence number; this is sort of like a revision history for # mutable files; they start at 1 and increase as they are changed after # being uploaded. Stored as an unsigned 64-bit integer. # 32s: The root hash of the share hash tree. We use sha-256d, so we use 32 # bytes to store the value. # 16s: The salt for the readkey. This is a 16-byte random value. # # SIGNED_PREFIX additions, things that are covered by the signature: # B: The "k" encoding parameter. We store this as an unsigned 8-bit # integer, since our erasure coding scheme cannot encode to more than # 255 pieces. # B: The "N" encoding parameter. Stored as an unsigned 8-bit integer for # the same reason as above. # Q: The segment size of the uploaded file. This is an unsigned 64-bit # integer, to allow handling large segments and files. For SDMF the # segment size is the data length plus padding; for MDMF it can be # smaller. # Q: The data length of the uploaded file. Like the segment size field, # it is an unsigned 64-bit integer. # # HEADER additions: # L: The offset of the signature. An unsigned 32-bit integer. # L: The offset of the share hash chain. An unsigned 32-bit integer. # L: The offset of the block hash tree. An unsigned 32-bit integer. # L: The offset of the share data. An unsigned 32-bit integer. # Q: The offset of the encrypted private key. An unsigned 64-bit integer, # to account for the possibility of a lot of share data. # Q: The offset of the EOF. An unsigned 64-bit integer, to account for # the possibility of a lot of share data. # # After all of these, we have the following: # - The verification key: Occupies the space between the end of the header # and the start of the signature (i.e.: data[HEADER_LENGTH:o['signature']]. # - The signature, which goes from the signature offset to the share hash # chain offset. # - The share hash chain, which goes from the share hash chain offset to # the block hash tree offset. # - The share data, which goes from the share data offset to the encrypted # private key offset. # - The encrypted private key offset, which goes until the end of the file. # # The block hash tree in this encoding has only one share, so the offset of # the share data will be 32 bits more than the offset of the block hash tree. # Given this, we may need to check to see how many bytes a reasonably sized # block hash tree will take up. PREFIX = ">BQ32s16s" # each version may have a different prefix SIGNED_PREFIX = ">BQ32s16s BBQQ" # this is covered by the signature SIGNED_PREFIX_LENGTH = struct.calcsize(SIGNED_PREFIX) HEADER = ">BQ32s16s BBQQ LLLLQQ" # includes offsets HEADER_LENGTH = struct.calcsize(HEADER) OFFSETS = ">LLLLQQ" OFFSETS_LENGTH = struct.calcsize(OFFSETS) MAX_MUTABLE_SHARE_SIZE = 69105*1000*1000*1000*1000 # 69105 TB, kind of arbitrary # These are still used for some tests of SDMF files. def unpack_header(data): o = {} (version, seqnum, root_hash, IV, k, N, segsize, datalen, o['signature'], o['share_hash_chain'], o['block_hash_tree'], o['share_data'], o['enc_privkey'], o['EOF']) = struct.unpack(HEADER, data[:HEADER_LENGTH]) return (version, seqnum, root_hash, IV, k, N, segsize, datalen, o) def unpack_share(data): assert len(data) >= HEADER_LENGTH o = {} (version, seqnum, root_hash, IV, k, N, segsize, datalen, o['signature'], o['share_hash_chain'], o['block_hash_tree'], o['share_data'], o['enc_privkey'], o['EOF']) = struct.unpack(HEADER, data[:HEADER_LENGTH]) if version != 0: raise UnknownVersionError("got mutable share version %d, but I only understand version 0" % version) if len(data) < o['EOF']: raise NeedMoreDataError(o['EOF'], o['enc_privkey'], o['EOF']-o['enc_privkey']) pubkey = data[HEADER_LENGTH:o['signature']] signature = data[o['signature']:o['share_hash_chain']] share_hash_chain_s = data[o['share_hash_chain']:o['block_hash_tree']] share_hash_format = ">H32s" hsize = struct.calcsize(share_hash_format) if len(share_hash_chain_s) % hsize != 0: raise BadShareError("hash chain is %d bytes, not multiple of %d" % (len(share_hash_chain_s), hsize)) share_hash_chain = [] for i in range(0, len(share_hash_chain_s), hsize): chunk = share_hash_chain_s[i:i+hsize] (hid, h) = struct.unpack(share_hash_format, chunk) share_hash_chain.append( (hid, h) ) share_hash_chain = dict(share_hash_chain) block_hash_tree_s = data[o['block_hash_tree']:o['share_data']] if len(block_hash_tree_s) % 32 != 0: raise BadShareError("block_hash_tree is %d bytes, not multiple of %d" % (len(block_hash_tree_s), 32)) block_hash_tree = [] for i in range(0, len(block_hash_tree_s), 32): block_hash_tree.append(block_hash_tree_s[i:i+32]) share_data = data[o['share_data']:o['enc_privkey']] enc_privkey = data[o['enc_privkey']:o['EOF']] return (seqnum, root_hash, IV, k, N, segsize, datalen, pubkey, signature, share_hash_chain, block_hash_tree, share_data, enc_privkey) def get_version_from_checkstring(checkstring): (t, ) = struct.unpack(">B", checkstring[:1]) return t def unpack_sdmf_checkstring(checkstring): cs_len = struct.calcsize(PREFIX) version, seqnum, root_hash, IV = struct.unpack(PREFIX, checkstring[:cs_len]) assert version == SDMF_VERSION, version return (seqnum, root_hash, IV) def unpack_mdmf_checkstring(checkstring): cs_len = struct.calcsize(MDMFCHECKSTRING) version, seqnum, root_hash = struct.unpack(MDMFCHECKSTRING, checkstring[:cs_len]) assert version == MDMF_VERSION, version return (seqnum, root_hash) def pack_offsets(verification_key_length, signature_length, share_hash_chain_length, block_hash_tree_length, share_data_length, encprivkey_length): post_offset = HEADER_LENGTH offsets = {} o1 = offsets['signature'] = post_offset + verification_key_length o2 = offsets['share_hash_chain'] = o1 + signature_length o3 = offsets['block_hash_tree'] = o2 + share_hash_chain_length o4 = offsets['share_data'] = o3 + block_hash_tree_length o5 = offsets['enc_privkey'] = o4 + share_data_length offsets['EOF'] = o5 + encprivkey_length return struct.pack(">LLLLQQ", offsets['signature'], offsets['share_hash_chain'], offsets['block_hash_tree'], offsets['share_data'], offsets['enc_privkey'], offsets['EOF']) def pack_share(prefix, verification_key, signature, share_hash_chain, block_hash_tree, share_data, encprivkey): share_hash_chain_s = b"".join([struct.pack(">H32s", i, share_hash_chain[i]) for i in sorted(share_hash_chain.keys())]) for h in block_hash_tree: assert len(h) == 32 block_hash_tree_s = b"".join(block_hash_tree) offsets = pack_offsets(len(verification_key), len(signature), len(share_hash_chain_s), len(block_hash_tree_s), len(share_data), len(encprivkey)) final_share = b"".join([prefix, offsets, verification_key, signature, share_hash_chain_s, block_hash_tree_s, share_data, encprivkey]) return final_share def pack_prefix(seqnum, root_hash, IV, required_shares, total_shares, segment_size, data_length): prefix = struct.pack(SIGNED_PREFIX, 0, # version, seqnum, root_hash, IV, required_shares, total_shares, segment_size, data_length, ) return prefix @implementer(IMutableSlotWriter) class SDMFSlotWriteProxy(object): """ I represent a remote write slot for an SDMF mutable file. I build a share in memory, and then write it in one piece to the remote server. This mimics how SDMF shares were built before MDMF (and the new MDMF uploader), but provides that functionality in a way that allows the MDMF uploader to be built without much special-casing for file format, which makes the uploader code more readable. """ def __init__(self, shnum, storage_server, # an IStorageServer storage_index, secrets, # (write_enabler, renew_secret, cancel_secret) seqnum, # the sequence number of the mutable file required_shares, total_shares, segment_size, data_length): # the length of the original file self.shnum = shnum self._storage_server = storage_server self._storage_index = storage_index self._secrets = secrets self._seqnum = seqnum self._required_shares = required_shares self._total_shares = total_shares self._segment_size = segment_size self._data_length = data_length # This is an SDMF file, so it should have only one segment, so, # modulo padding of the data length, the segment size and the # data length should be the same. expected_segment_size = mathutil.next_multiple(data_length, self._required_shares) assert expected_segment_size == segment_size self._block_size = self._segment_size // self._required_shares # This is meant to mimic how SDMF files were built before MDMF # entered the picture: we generate each share in its entirety, # then push it off to the storage server in one write. When # callers call set_*, they are just populating this dict. # finish_publishing will stitch these pieces together into a # coherent share, and then write the coherent share to the # storage server. self._share_pieces = {} # This tells the write logic what checkstring to use when # writing remote shares. self._testvs = [] self._readvs = [(0, struct.calcsize(PREFIX))] def set_checkstring(self, checkstring_or_seqnum, root_hash=None, salt=None): """ Set the checkstring that I will pass to the remote server when writing. @param checkstring_or_seqnum: A packed checkstring to use, or a sequence number. I will treat this as a checkstr Note that implementations can differ in which semantics they wish to support for set_checkstring -- they can, for example, build the checkstring themselves from its constituents, or some other thing. """ if root_hash and salt: checkstring = struct.pack(PREFIX, 0, checkstring_or_seqnum, root_hash, salt) else: checkstring = checkstring_or_seqnum self._testvs = [(0, len(checkstring), checkstring)] def get_checkstring(self): """ Get the checkstring that I think currently exists on the remote server. """ if self._testvs: return self._testvs[0][2] return b"" def put_block(self, data, segnum, salt): """ Add a block and salt to the share. """ # SDMF files have only one segment assert segnum == 0 assert len(data) == self._block_size assert len(salt) == SALT_SIZE self._share_pieces['sharedata'] = data self._share_pieces['salt'] = salt # TODO: Figure out something intelligent to return. return defer.succeed(None) def put_encprivkey(self, encprivkey): """ Add the encrypted private key to the share. """ self._share_pieces['encprivkey'] = encprivkey return defer.succeed(None) def put_blockhashes(self, blockhashes): """ Add the block hash tree to the share. """ assert isinstance(blockhashes, list) for h in blockhashes: assert len(h) == HASH_SIZE # serialize the blockhashes, then set them. blockhashes_s = b"".join(blockhashes) self._share_pieces['block_hash_tree'] = blockhashes_s return defer.succeed(None) def put_sharehashes(self, sharehashes): """ Add the share hash chain to the share. """ assert isinstance(sharehashes, dict) for h in sharehashes.values(): assert len(h) == HASH_SIZE # serialize the sharehashes, then set them. sharehashes_s = b"".join([struct.pack(">H32s", i, sharehashes[i]) for i in sorted(sharehashes.keys())]) self._share_pieces['share_hash_chain'] = sharehashes_s return defer.succeed(None) def put_root_hash(self, root_hash): """ Add the root hash to the share. """ assert len(root_hash) == HASH_SIZE self._share_pieces['root_hash'] = root_hash return defer.succeed(None) def put_salt(self, salt): """ Add a salt to an empty SDMF file. """ assert len(salt) == SALT_SIZE self._share_pieces['salt'] = salt self._share_pieces['sharedata'] = b"" def get_signable(self): """ Return the part of the share that needs to be signed. SDMF writers need to sign the packed representation of the first eight fields of the remote share, that is: - version number (0) - sequence number - root of the share hash tree - salt - k - n - segsize - datalen This method is responsible for returning that to callers. """ return struct.pack(SIGNED_PREFIX, 0, self._seqnum, self._share_pieces['root_hash'], self._share_pieces['salt'], self._required_shares, self._total_shares, self._segment_size, self._data_length) def put_signature(self, signature): """ Add the signature to the share. """ self._share_pieces['signature'] = signature return defer.succeed(None) def put_verification_key(self, verification_key): """ Add the verification key to the share. """ self._share_pieces['verification_key'] = verification_key return defer.succeed(None) def get_verinfo(self): """ I return my verinfo tuple. This is used by the ServermapUpdater to keep track of versions of mutable files. The verinfo tuple for MDMF files contains: - seqnum - root hash - a blank (nothing) - segsize - datalen - k - n - prefix (the thing that you sign) - a tuple of offsets We include the nonce in MDMF to simplify processing of version information tuples. The verinfo tuple for SDMF files is the same, but contains a 16-byte IV instead of a hash of salts. """ return (self._seqnum, self._share_pieces['root_hash'], self._share_pieces['salt'], self._segment_size, self._data_length, self._required_shares, self._total_shares, self.get_signable(), self._get_offsets_tuple()) def _get_offsets_dict(self): post_offset = HEADER_LENGTH offsets = {} verification_key_length = len(self._share_pieces['verification_key']) o1 = offsets['signature'] = post_offset + verification_key_length signature_length = len(self._share_pieces['signature']) o2 = offsets['share_hash_chain'] = o1 + signature_length share_hash_chain_length = len(self._share_pieces['share_hash_chain']) o3 = offsets['block_hash_tree'] = o2 + share_hash_chain_length block_hash_tree_length = len(self._share_pieces['block_hash_tree']) o4 = offsets['share_data'] = o3 + block_hash_tree_length share_data_length = len(self._share_pieces['sharedata']) o5 = offsets['enc_privkey'] = o4 + share_data_length encprivkey_length = len(self._share_pieces['encprivkey']) offsets['EOF'] = o5 + encprivkey_length return offsets def _get_offsets_tuple(self): offsets = self._get_offsets_dict() return tuple([(key, value) for key, value in offsets.items()]) def _pack_offsets(self): offsets = self._get_offsets_dict() return struct.pack(">LLLLQQ", offsets['signature'], offsets['share_hash_chain'], offsets['block_hash_tree'], offsets['share_data'], offsets['enc_privkey'], offsets['EOF']) def finish_publishing(self): """ Do anything necessary to finish writing the share to a remote server. I require that no further publishing needs to take place after this method has been called. """ for k in ["sharedata", "encprivkey", "signature", "verification_key", "share_hash_chain", "block_hash_tree"]: assert k in self._share_pieces, (self.shnum, k, self._share_pieces.keys()) # This is the only method that actually writes something to the # remote server. # First, we need to pack the share into data that we can write # to the remote server in one write. offsets = self._pack_offsets() prefix = self.get_signable() final_share = b"".join([prefix, offsets, self._share_pieces['verification_key'], self._share_pieces['signature'], self._share_pieces['share_hash_chain'], self._share_pieces['block_hash_tree'], self._share_pieces['sharedata'], self._share_pieces['encprivkey']]) # Our only data vector is going to be writing the final share, # in its entirely. datavs = [(0, final_share)] if not self._testvs: # Our caller has not provided us with another checkstring # yet, so we assume that we are writing a new share, and set # a test vector that will only allow a new share to be written. self._testvs = [] self._testvs.append(tuple([0, 1, b""])) tw_vectors = {} tw_vectors[self.shnum] = (self._testvs, datavs, None) return self._storage_server.slot_testv_and_readv_and_writev( self._storage_index, self._secrets, tw_vectors, # TODO is it useful to read something? self._readvs, ) MDMFHEADER = ">BQ32sBBQQ QQQQQQQQ" MDMFHEADERWITHOUTOFFSETS = ">BQ32sBBQQ" MDMFHEADERSIZE = struct.calcsize(MDMFHEADER) MDMFHEADERWITHOUTOFFSETSSIZE = struct.calcsize(MDMFHEADERWITHOUTOFFSETS) MDMFCHECKSTRING = ">BQ32s" MDMFSIGNABLEHEADER = ">BQ32sBBQQ" MDMFOFFSETS = ">QQQQQQQQ" MDMFOFFSETS_LENGTH = struct.calcsize(MDMFOFFSETS) PRIVATE_KEY_SIZE = 1220 SIGNATURE_SIZE = 260 VERIFICATION_KEY_SIZE = 292 # We know we won't have more than 256 shares, and we know that we won't need # to store more than ln2(256) hash-chain nodes to validate, so that's our # bound. Each node requires 2 bytes of node-number plus 32 bytes of hash. SHARE_HASH_CHAIN_SIZE = (2+HASH_SIZE)*mathutil.log_ceil(256, 2) @implementer(IMutableSlotWriter) class MDMFSlotWriteProxy(object): """ I represent a remote write slot for an MDMF mutable file. I abstract away from my caller the details of block and salt management, and the implementation of the on-disk format for MDMF shares. """ # Expected layout, MDMF: # offset: size: name: #-- signed part -- # 0 1 version number (01) # 1 8 sequence number # 9 32 share tree root hash # 41 1 The "k" encoding parameter # 42 1 The "N" encoding parameter # 43 8 The segment size of the uploaded file # 51 8 The data length of the original plaintext #-- end signed part -- # 59 8 The offset of the encrypted private key # 67 8 The offset of the share hash chain # 75 8 The offset of the signature # 83 8 The offset of the verification key # 91 8 The offset of the end of the v. key. # 99 8 The offset of the share data # 107 8 The offset of the block hash tree # 115 8 The offset of EOF # 123 var encrypted private key # var var share hash chain # var var signature # var var verification key # var large share data # var var block hash tree # # We order the fields that way to make smart downloaders -- downloaders # which prempetively read a big part of the share -- possible. # # The checkstring is the first three fields -- the version number, # sequence number, root hash and root salt hash. This is consistent # in meaning to what we have with SDMF files, except now instead of # using the literal salt, we use a value derived from all of the # salts -- the share hash root. # # The salt is stored before the block for each segment. The block # hash tree is computed over the combination of block and salt for # each segment. In this way, we get integrity checking for both # block and salt with the current block hash tree arrangement. # # The ordering of the offsets is different to reflect the dependencies # that we'll run into with an MDMF file. The expected write flow is # something like this: # # 0: Initialize with the sequence number, encoding parameters and # data length. From this, we can deduce the number of segments, # and where they should go.. We can also figure out where the # encrypted private key should go, because we can figure out how # big the share data will be. # # 1: Encrypt, encode, and upload the file in chunks. Do something # like # # put_block(data, segnum, salt) # # to write a block and a salt to the disk. We can do both of # these operations now because we have enough of the offsets to # know where to put them. # # 2: Put the encrypted private key. Use: # # put_encprivkey(encprivkey) # # Now that we know the length of the private key, we can fill # in the offset for the block hash tree. # # 3: We're now in a position to upload the block hash tree for # a share. Put that using something like: # # put_blockhashes(block_hash_tree) # # Note that block_hash_tree is a list of hashes -- we'll take # care of the details of serializing that appropriately. When # we get the block hash tree, we are also in a position to # calculate the offset for the share hash chain, and fill that # into the offsets table. # # 4: We're now in a position to upload the share hash chain for # a share. Do that with something like: # # put_sharehashes(share_hash_chain) # # share_hash_chain should be a dictionary mapping shnums to # 32-byte hashes -- the wrapper handles serialization. # We'll know where to put the signature at this point, also. # The root of this tree will be put explicitly in the next # step. # # 5: Before putting the signature, we must first put the # root_hash. Do this with: # # put_root_hash(root_hash). # # In terms of knowing where to put this value, it was always # possible to place it, but it makes sense semantically to # place it after the share hash tree, so that's why you do it # in this order. # # 6: With the root hash put, we can now sign the header. Use: # # get_signable() # # to get the part of the header that you want to sign, and use: # # put_signature(signature) # # to write your signature to the remote server. # # 6: Add the verification key, and finish. Do: # # put_verification_key(key) # # and # # finish_publish() # # Checkstring management: # # To write to a mutable slot, we have to provide test vectors to ensure # that we are writing to the same data that we think we are. These # vectors allow us to detect uncoordinated writes; that is, writes # where both we and some other shareholder are writing to the # mutable slot, and to report those back to the parts of the program # doing the writing. # # With SDMF, this was easy -- all of the share data was written in # one go, so it was easy to detect uncoordinated writes, and we only # had to do it once. With MDMF, not all of the file is written at # once. # # If a share is new, we write out as much of the header as we can # before writing out anything else. This gives other writers a # canary that they can use to detect uncoordinated writes, and, if # they do the same thing, gives us the same canary. We them update # the share. We won't be able to write out two fields of the header # -- the share tree hash and the salt hash -- until we finish # writing out the share. We only require the writer to provide the # initial checkstring, and keep track of what it should be after # updates ourselves. # # If we haven't written anything yet, then on the first write (which # will probably be a block + salt of a share), we'll also write out # the header. On subsequent passes, we'll expect to see the header. # This changes in two places: # # - When we write out the salt hash # - When we write out the root of the share hash tree # # since these values will change the header. It is possible that we # can just make those be written in one operation to minimize # disruption. def __init__(self, shnum, storage_server, # a remote reference to a storage server storage_index, secrets, # (write_enabler, renew_secret, cancel_secret) seqnum, # the sequence number of the mutable file required_shares, total_shares, segment_size, data_length): # the length of the original file self.shnum = shnum self._storage_server = storage_server self._storage_index = storage_index self._seqnum = seqnum self._required_shares = required_shares assert self.shnum >= 0 and self.shnum < total_shares self._total_shares = total_shares # We build up the offset table as we write things. It is the # last thing we write to the remote server. self._offsets = {} self._testvs = [] # This is a list of write vectors that will be sent to our # remote server once we are directed to write things there. self._writevs = [] self._secrets = secrets # The segment size needs to be a multiple of the k parameter -- # any padding should have been carried out by the publisher # already. assert segment_size % required_shares == 0 self._segment_size = segment_size self._data_length = data_length # These are set later -- we define them here so that we can # check for their existence easily # This is the root of the share hash tree -- the Merkle tree # over the roots of the block hash trees computed for shares in # this upload. self._root_hash = None # We haven't yet written anything to the remote bucket. By # setting this, we tell the _write method as much. The write # method will then know that it also needs to add a write vector # for the checkstring (or what we have of it) to the first write # request. We'll then record that value for future use. If # we're expecting something to be there already, we need to call # set_checkstring before we write anything to tell the first # write about that. self._written = False # When writing data to the storage servers, we get a read vector # for free. We'll read the checkstring, which will help us # figure out what's gone wrong if a write fails. self._readv = [(0, struct.calcsize(MDMFCHECKSTRING))] # We calculate the number of segments because it tells us # where the salt part of the file ends/share segment begins, # and also because it provides a useful amount of bounds checking. self._num_segments = mathutil.div_ceil(self._data_length, self._segment_size) self._block_size = self._segment_size // self._required_shares # We also calculate the share size, to help us with block # constraints later. tail_size = self._data_length % self._segment_size if not tail_size: self._tail_block_size = self._block_size else: self._tail_block_size = mathutil.next_multiple(tail_size, self._required_shares) self._tail_block_size = self._tail_block_size // self._required_shares # We already know where the sharedata starts; right after the end # of the header (which is defined as the signable part + the offsets) # We can also calculate where the encrypted private key begins # from what we know know. self._actual_block_size = self._block_size + SALT_SIZE data_size = self._actual_block_size * (self._num_segments - 1) data_size += self._tail_block_size data_size += SALT_SIZE self._offsets['enc_privkey'] = MDMFHEADERSIZE # We don't define offsets for these because we want them to be # tightly packed -- this allows us to ignore the responsibility # of padding individual values, and of removing that padding # later. So nonconstant_start is where we start writing # nonconstant data. nonconstant_start = self._offsets['enc_privkey'] nonconstant_start += PRIVATE_KEY_SIZE nonconstant_start += SIGNATURE_SIZE nonconstant_start += VERIFICATION_KEY_SIZE nonconstant_start += SHARE_HASH_CHAIN_SIZE self._offsets['share_data'] = nonconstant_start # Finally, we know how big the share data will be, so we can # figure out where the block hash tree needs to go. # XXX: But this will go away if Zooko wants to make it so that # you don't need to know the size of the file before you start # uploading it. self._offsets['block_hash_tree'] = self._offsets['share_data'] + \ data_size # Done. We can snow start writing. def set_checkstring(self, seqnum_or_checkstring, root_hash=None, salt=None): """ Set checkstring checkstring for the given shnum. This can be invoked in one of two ways. With one argument, I assume that you are giving me a literal checkstring -- e.g., the output of get_checkstring. I will then set that checkstring as it is. This form is used by unit tests. With two arguments, I assume that you are giving me a sequence number and root hash to make a checkstring from. In that case, I will build a checkstring and set it for you. This form is used by the publisher. By default, I assume that I am writing new shares to the grid. If you don't explcitly set your own checkstring, I will use one that requires that the remote share not exist. You will want to use this method if you are updating a share in-place; otherwise, writes will fail. """ # You're allowed to overwrite checkstrings with this method; # I assume that users know what they are doing when they call # it. if root_hash: checkstring = struct.pack(MDMFCHECKSTRING, 1, seqnum_or_checkstring, root_hash) else: checkstring = seqnum_or_checkstring if checkstring == b"": # We special-case this, since len("") = 0, but we need # length of 1 for the case of an empty share to work on the # storage server, which is what a checkstring that is the # empty string means. self._testvs = [] else: self._testvs = [] self._testvs.append((0, len(checkstring), checkstring)) def __repr__(self): return "MDMFSlotWriteProxy for share %d" % self.shnum def get_checkstring(self): """ Given a share number, I return a representation of what the checkstring for that share on the server will look like. I am mostly used for tests. """ if self._root_hash: roothash = self._root_hash else: roothash = b"\x00" * 32 return struct.pack(MDMFCHECKSTRING, 1, self._seqnum, roothash) def put_block(self, data, segnum, salt): """ I queue a write vector for the data, salt, and segment number provided to me. I return None, as I do not actually cause anything to be written yet. """ if segnum >= self._num_segments: raise LayoutInvalid("I won't overwrite the block hash tree") if len(salt) != SALT_SIZE: raise LayoutInvalid("I was given a salt of size %d, but " "I wanted a salt of size %d") if segnum + 1 == self._num_segments: if len(data) != self._tail_block_size: raise LayoutInvalid("I was given the wrong size block to write") elif len(data) != self._block_size: raise LayoutInvalid("I was given the wrong size block to write") # We want to write at len(MDMFHEADER) + segnum * block_size. offset = self._offsets['share_data'] + \ (self._actual_block_size * segnum) data = salt + data self._writevs.append(tuple([offset, data])) def put_encprivkey(self, encprivkey): """ I queue a write vector for the encrypted private key provided to me. """ assert self._offsets assert self._offsets['enc_privkey'] # You shouldn't re-write the encprivkey after the block hash # tree is written, since that could cause the private key to run # into the block hash tree. Before it writes the block hash # tree, the block hash tree writing method writes the offset of # the share hash chain. So that's a good indicator of whether or # not the block hash tree has been written. if "signature" in self._offsets: raise LayoutInvalid("You can't put the encrypted private key " "after putting the share hash chain") self._offsets['share_hash_chain'] = self._offsets['enc_privkey'] + \ len(encprivkey) self._writevs.append(tuple([self._offsets['enc_privkey'], encprivkey])) def put_blockhashes(self, blockhashes): """ I queue a write vector to put the block hash tree in blockhashes onto the remote server. The encrypted private key must be queued before the block hash tree, since we need to know how large it is to know where the block hash tree should go. The block hash tree must be put before the share hash chain, since its size determines the offset of the share hash chain. """ assert self._offsets assert "block_hash_tree" in self._offsets assert isinstance(blockhashes, list) blockhashes_s = b"".join(blockhashes) self._offsets['EOF'] = self._offsets['block_hash_tree'] + len(blockhashes_s) self._writevs.append(tuple([self._offsets['block_hash_tree'], blockhashes_s])) def put_sharehashes(self, sharehashes): """ I queue a write vector to put the share hash chain in my argument onto the remote server. The block hash tree must be queued before the share hash chain, since we need to know where the block hash tree ends before we can know where the share hash chain starts. The share hash chain must be put before the signature, since the length of the packed share hash chain determines the offset of the signature. Also, semantically, you must know what the root of the block hash tree is before you can generate a valid signature. """ assert isinstance(sharehashes, dict) assert self._offsets if "share_hash_chain" not in self._offsets: raise LayoutInvalid("You must put the block hash tree before " "putting the share hash chain") # The signature comes after the share hash chain. If the # signature has already been written, we must not write another # share hash chain. The signature writes the verification key # offset when it gets sent to the remote server, so we look for # that. if "verification_key" in self._offsets: raise LayoutInvalid("You must write the share hash chain " "before you write the signature") sharehashes_s = b"".join([struct.pack(">H32s", i, sharehashes[i]) for i in sorted(sharehashes.keys())]) self._offsets['signature'] = self._offsets['share_hash_chain'] + \ len(sharehashes_s) self._writevs.append(tuple([self._offsets['share_hash_chain'], sharehashes_s])) def put_root_hash(self, roothash): """ Put the root hash (the root of the share hash tree) in the remote slot. """ # It does not make sense to be able to put the root # hash without first putting the share hashes, since you need # the share hashes to generate the root hash. # # Signature is defined by the routine that places the share hash # chain, so it's a good thing to look for in finding out whether # or not the share hash chain exists on the remote server. if len(roothash) != HASH_SIZE: raise LayoutInvalid("hashes and salts must be exactly %d bytes" % HASH_SIZE) self._root_hash = roothash # To write both of these values, we update the checkstring on # the remote server, which includes them checkstring = self.get_checkstring() self._writevs.append(tuple([0, checkstring])) # This write, if successful, changes the checkstring, so we need # to update our internal checkstring to be consistent with the # one on the server. def get_signable(self): """ Get the first seven fields of the mutable file; the parts that are signed. """ if not self._root_hash: raise LayoutInvalid("You need to set the root hash " "before getting something to " "sign") return struct.pack(MDMFSIGNABLEHEADER, 1, self._seqnum, self._root_hash, self._required_shares, self._total_shares, self._segment_size, self._data_length) def put_signature(self, signature): """ I queue a write vector for the signature of the MDMF share. I require that the root hash and share hash chain have been put to the grid before I will write the signature to the grid. """ if "signature" not in self._offsets: raise LayoutInvalid("You must put the share hash chain " # It does not make sense to put a signature without first # putting the root hash and the salt hash (since otherwise # the signature would be incomplete), so we don't allow that. "before putting the signature") if not self._root_hash: raise LayoutInvalid("You must complete the signed prefix " "before computing a signature") # If we put the signature after we put the verification key, we # could end up running into the verification key, and will # probably screw up the offsets as well. So we don't allow that. if "verification_key_end" in self._offsets: raise LayoutInvalid("You can't put the signature after the " "verification key") # The method that writes the verification key defines the EOF # offset before writing the verification key, so look for that. self._offsets['verification_key'] = self._offsets['signature'] +\ len(signature) self._writevs.append(tuple([self._offsets['signature'], signature])) def put_verification_key(self, verification_key): """ I queue a write vector for the verification key. I require that the signature have been written to the storage server before I allow the verification key to be written to the remote server. """ if "verification_key" not in self._offsets: raise LayoutInvalid("You must put the signature before you " "can put the verification key") self._offsets['verification_key_end'] = \ self._offsets['verification_key'] + len(verification_key) assert self._offsets['verification_key_end'] <= self._offsets['share_data'] self._writevs.append(tuple([self._offsets['verification_key'], verification_key])) def _get_offsets_tuple(self): return tuple([(key, value) for key, value in self._offsets.items()]) def get_verinfo(self): return (self._seqnum, self._root_hash, None, self._segment_size, self._data_length, self._required_shares, self._total_shares, self.get_signable(), self._get_offsets_tuple()) def finish_publishing(self): """ I add a write vector for the offsets table, and then cause all of the write vectors that I've dealt with so far to be published to the remote server, ending the write process. """ if "verification_key_end" not in self._offsets: raise LayoutInvalid("You must put the verification key before " "you can publish the offsets") offsets_offset = struct.calcsize(MDMFHEADERWITHOUTOFFSETS) offsets = struct.pack(MDMFOFFSETS, self._offsets['enc_privkey'], self._offsets['share_hash_chain'], self._offsets['signature'], self._offsets['verification_key'], self._offsets['verification_key_end'], self._offsets['share_data'], self._offsets['block_hash_tree'], self._offsets['EOF']) self._writevs.append(tuple([offsets_offset, offsets])) encoding_parameters_offset = struct.calcsize(MDMFCHECKSTRING) params = struct.pack(">BBQQ", self._required_shares, self._total_shares, self._segment_size, self._data_length) self._writevs.append(tuple([encoding_parameters_offset, params])) return self._write(self._writevs) def _write(self, datavs, on_failure=None, on_success=None): """I write the data vectors in datavs to the remote slot.""" tw_vectors = {} if not self._testvs: # Make sure we will only successfully write if the share didn't # previously exist. self._testvs = [] self._testvs.append(tuple([0, 1, b""])) if not self._written: # Write a new checkstring to the share when we write it, so # that we have something to check later. new_checkstring = self.get_checkstring() datavs.append((0, new_checkstring)) def _first_write(): self._written = True self._testvs = [(0, len(new_checkstring), new_checkstring)] on_success = _first_write tw_vectors[self.shnum] = (self._testvs, datavs, None) d = self._storage_server.slot_testv_and_readv_and_writev( self._storage_index, self._secrets, tw_vectors, self._readv, ) def _result(results): if isinstance(results, failure.Failure) or not results[0]: # Do nothing; the write was unsuccessful. if on_failure: on_failure() else: if on_success: on_success() return results d.addBoth(_result) return d def _handle_bad_struct(f): # struct.unpack errors mean the server didn't give us enough data, so # this share is bad f.trap(struct.error) raise BadShareError(f.value.args[0]) class MDMFSlotReadProxy(object): """ I read from a mutable slot filled with data written in the MDMF data format (which is described above). I can be initialized with some amount of data, which I will use (if it is valid) to eliminate some of the need to fetch it from servers. """ def __init__(self, storage_server, storage_index, shnum, data=b"", data_is_everything=False): # Start the initialization process. self._storage_server = storage_server self._storage_index = storage_index self.shnum = shnum # Before doing anything, the reader is probably going to want to # verify that the signature is correct. To do that, they'll need # the verification key, and the signature. To get those, we'll # need the offset table. So fetch the offset table on the # assumption that that will be the first thing that a reader is # going to do. # The fact that these encoding parameters are None tells us # that we haven't yet fetched them from the remote share, so we # should. We could just not set them, but the checks will be # easier to read if we don't have to use hasattr. self._version_number = None self._sequence_number = None self._root_hash = None # Filled in if we're dealing with an SDMF file. Unused # otherwise. self._salt = None self._required_shares = None self._total_shares = None self._segment_size = None self._data_length = None self._offsets = None # If the user has chosen to initialize us with some data, we'll # try to satisfy subsequent data requests with that data before # asking the storage server for it. self._data = data # If the provided data is known to be complete, then we know there's # nothing to be gained by querying the server, so we should just # partially satisfy requests with what we have. self._data_is_everything = data_is_everything # The way callers interact with cache in the filenode returns # None if there isn't any cached data, but the way we index the # cached data requires a string, so convert None to "". if self._data == None: self._data = b"" def _maybe_fetch_offsets_and_header(self, force_remote=False): """ I fetch the offset table and the header from the remote slot if I don't already have them. If I do have them, I do nothing and return an empty Deferred. """ if self._offsets: return defer.succeed(None) # At this point, we may be either SDMF or MDMF. Fetching 107 # bytes will be enough to get header and offsets for both SDMF and # MDMF, though we'll be left with 4 more bytes than we # need if this ends up being MDMF. This is probably less # expensive than the cost of a second roundtrip. readvs = [(0, 123)] d = self._read(readvs, force_remote) d.addCallback(self._process_encoding_parameters) d.addCallback(self._process_offsets) d.addErrback(_handle_bad_struct) return d def _process_encoding_parameters(self, encoding_parameters): if self.shnum not in encoding_parameters: raise BadShareError("no data for shnum %d" % self.shnum) encoding_parameters = encoding_parameters[self.shnum][0] # The first byte is the version number. It will tell us what # to do next. (verno,) = struct.unpack(">B", encoding_parameters[:1]) if verno == MDMF_VERSION: read_size = MDMFHEADERWITHOUTOFFSETSSIZE (verno, seqnum, root_hash, k, n, segsize, datalen) = struct.unpack(MDMFHEADERWITHOUTOFFSETS, encoding_parameters[:read_size]) if segsize == 0 and datalen == 0: # Empty file, no segments. self._num_segments = 0 else: self._num_segments = mathutil.div_ceil(datalen, segsize) elif verno == SDMF_VERSION: read_size = SIGNED_PREFIX_LENGTH (verno, seqnum, root_hash, salt, k, n, segsize, datalen) = struct.unpack(">BQ32s16s BBQQ", encoding_parameters[:SIGNED_PREFIX_LENGTH]) self._salt = salt if segsize == 0 and datalen == 0: # empty file self._num_segments = 0 else: # non-empty SDMF files have one segment. self._num_segments = 1 else: raise UnknownVersionError("You asked me to read mutable file " "version %d, but I only understand " "%d and %d" % (verno, SDMF_VERSION, MDMF_VERSION)) self._version_number = verno self._sequence_number = seqnum self._root_hash = root_hash self._required_shares = k self._total_shares = n self._segment_size = segsize self._data_length = datalen self._block_size = self._segment_size // self._required_shares # We can upload empty files, and need to account for this fact # so as to avoid zero-division and zero-modulo errors. if datalen > 0: tail_size = self._data_length % self._segment_size else: tail_size = 0 if not tail_size: self._tail_block_size = self._block_size else: self._tail_block_size = mathutil.next_multiple(tail_size, self._required_shares) self._tail_block_size = self._tail_block_size // self._required_shares return encoding_parameters def _process_offsets(self, offsets): if self._version_number == 0: read_size = OFFSETS_LENGTH read_offset = SIGNED_PREFIX_LENGTH end = read_size + read_offset (signature, share_hash_chain, block_hash_tree, share_data, enc_privkey, EOF) = struct.unpack(">LLLLQQ", offsets[read_offset:end]) self._offsets = {} self._offsets['signature'] = signature self._offsets['share_data'] = share_data self._offsets['block_hash_tree'] = block_hash_tree self._offsets['share_hash_chain'] = share_hash_chain self._offsets['enc_privkey'] = enc_privkey self._offsets['EOF'] = EOF elif self._version_number == 1: read_offset = MDMFHEADERWITHOUTOFFSETSSIZE read_length = MDMFOFFSETS_LENGTH end = read_offset + read_length (encprivkey, sharehashes, signature, verification_key, verification_key_end, sharedata, blockhashes, eof) = struct.unpack(MDMFOFFSETS, offsets[read_offset:end]) self._offsets = {} self._offsets['enc_privkey'] = encprivkey self._offsets['block_hash_tree'] = blockhashes self._offsets['share_hash_chain'] = sharehashes self._offsets['signature'] = signature self._offsets['verification_key'] = verification_key self._offsets['verification_key_end']= \ verification_key_end self._offsets['EOF'] = eof self._offsets['share_data'] = sharedata def get_block_and_salt(self, segnum): """ I return (block, salt), where block is the block data and salt is the salt used to encrypt that segment. """ d = self._maybe_fetch_offsets_and_header() def _then(ignored): base_share_offset = self._offsets['share_data'] if segnum + 1 > self._num_segments: raise LayoutInvalid("Not a valid segment number") if self._version_number == 0: share_offset = base_share_offset + self._block_size * segnum else: share_offset = base_share_offset + (self._block_size + \ SALT_SIZE) * segnum if segnum + 1 == self._num_segments: data = self._tail_block_size else: data = self._block_size if self._version_number == 1: data += SALT_SIZE readvs = [(share_offset, data)] return readvs d.addCallback(_then) d.addCallback(lambda readvs: self._read(readvs)) def _process_results(results): if self.shnum not in results: raise BadShareError("no data for shnum %d" % self.shnum) if self._version_number == 0: # We only read the share data, but we know the salt from # when we fetched the header data = results[self.shnum] if not data: data = b"" else: if len(data) != 1: raise BadShareError("got %d vectors, not 1" % len(data)) data = data[0] salt = self._salt else: data = results[self.shnum] if not data: salt = data = b"" else: salt_and_data = results[self.shnum][0] salt = salt_and_data[:SALT_SIZE] data = salt_and_data[SALT_SIZE:] return data, salt d.addCallback(_process_results) return d def get_blockhashes(self, needed=None, force_remote=False): """ I return the block hash tree I take an optional argument, needed, which is a set of indices correspond to hashes that I should fetch. If this argument is missing, I will fetch the entire block hash tree; otherwise, I may attempt to fetch fewer hashes, based on what needed says that I should do. Note that I may fetch as many hashes as I want, so long as the set of hashes that I do fetch is a superset of the ones that I am asked for, so callers should be prepared to tolerate additional hashes. """ # TODO: Return only the parts of the block hash tree necessary # to validate the blocknum provided? # This is a good idea, but it is hard to implement correctly. It # is bad to fetch any one block hash more than once, so we # probably just want to fetch the whole thing at once and then # serve it. if needed == set([]): return defer.succeed([]) d = self._maybe_fetch_offsets_and_header() def _then(ignored): blockhashes_offset = self._offsets['block_hash_tree'] if self._version_number == 1: blockhashes_length = self._offsets['EOF'] - blockhashes_offset else: blockhashes_length = self._offsets['share_data'] - blockhashes_offset readvs = [(blockhashes_offset, blockhashes_length)] return readvs d.addCallback(_then) d.addCallback(lambda readvs: self._read(readvs, force_remote=force_remote)) def _build_block_hash_tree(results): if self.shnum not in results: raise BadShareError("no data for shnum %d" % self.shnum) rawhashes = results[self.shnum][0] results = [rawhashes[i:i+HASH_SIZE] for i in range(0, len(rawhashes), HASH_SIZE)] return results d.addCallback(_build_block_hash_tree) return d def get_sharehashes(self, needed=None, force_remote=False): """ I return the part of the share hash chain placed to validate this share. I take an optional argument, needed. Needed is a set of indices that correspond to the hashes that I should fetch. If needed is not present, I will fetch and return the entire share hash chain. Otherwise, I may fetch and return any part of the share hash chain that is a superset of the part that I am asked to fetch. Callers should be prepared to deal with more hashes than they've asked for. """ if needed == set([]): return defer.succeed([]) d = self._maybe_fetch_offsets_and_header() def _make_readvs(ignored): sharehashes_offset = self._offsets['share_hash_chain'] if self._version_number == 0: sharehashes_length = self._offsets['block_hash_tree'] - sharehashes_offset else: sharehashes_length = self._offsets['signature'] - sharehashes_offset readvs = [(sharehashes_offset, sharehashes_length)] return readvs d.addCallback(_make_readvs) d.addCallback(lambda readvs: self._read(readvs, force_remote=force_remote)) def _build_share_hash_chain(results): if self.shnum not in results: raise BadShareError("no data for shnum %d" % self.shnum) sharehashes = results[self.shnum][0] results = [sharehashes[i:i+(HASH_SIZE + 2)] for i in range(0, len(sharehashes), HASH_SIZE + 2)] results = dict([struct.unpack(">H32s", data) for data in results]) return results d.addCallback(_build_share_hash_chain) d.addErrback(_handle_bad_struct) return d def get_encprivkey(self): """ I return the encrypted private key. """ d = self._maybe_fetch_offsets_and_header() def _make_readvs(ignored): privkey_offset = self._offsets['enc_privkey'] if self._version_number == 0: privkey_length = self._offsets['EOF'] - privkey_offset else: privkey_length = self._offsets['share_hash_chain'] - privkey_offset readvs = [(privkey_offset, privkey_length)] return readvs d.addCallback(_make_readvs) d.addCallback(lambda readvs: self._read(readvs)) def _process_results(results): if self.shnum not in results: raise BadShareError("no data for shnum %d" % self.shnum) privkey = results[self.shnum][0] return privkey d.addCallback(_process_results) return d def get_signature(self): """ I return the signature of my share. """ d = self._maybe_fetch_offsets_and_header() def _make_readvs(ignored): signature_offset = self._offsets['signature'] if self._version_number == 1: signature_length = self._offsets['verification_key'] - signature_offset else: signature_length = self._offsets['share_hash_chain'] - signature_offset readvs = [(signature_offset, signature_length)] return readvs d.addCallback(_make_readvs) d.addCallback(lambda readvs: self._read(readvs)) def _process_results(results): if self.shnum not in results: raise BadShareError("no data for shnum %d" % self.shnum) signature = results[self.shnum][0] return signature d.addCallback(_process_results) return d def get_verification_key(self): """ I return the verification key. """ d = self._maybe_fetch_offsets_and_header() def _make_readvs(ignored): if self._version_number == 1: vk_offset = self._offsets['verification_key'] vk_length = self._offsets['verification_key_end'] - vk_offset else: vk_offset = struct.calcsize(">BQ32s16sBBQQLLLLQQ") vk_length = self._offsets['signature'] - vk_offset readvs = [(vk_offset, vk_length)] return readvs d.addCallback(_make_readvs) d.addCallback(lambda readvs: self._read(readvs)) def _process_results(results): if self.shnum not in results: raise BadShareError("no data for shnum %d" % self.shnum) verification_key = results[self.shnum][0] return verification_key d.addCallback(_process_results) return d def get_encoding_parameters(self): """ I return (k, n, segsize, datalen) """ d = self._maybe_fetch_offsets_and_header() d.addCallback(lambda ignored: (self._required_shares, self._total_shares, self._segment_size, self._data_length)) return d def get_seqnum(self): """ I return the sequence number for this share. """ d = self._maybe_fetch_offsets_and_header() d.addCallback(lambda ignored: self._sequence_number) return d def get_root_hash(self): """ I return the root of the block hash tree """ d = self._maybe_fetch_offsets_and_header() d.addCallback(lambda ignored: self._root_hash) return d def get_checkstring(self): """ I return the packed representation of the following: - version number - sequence number - root hash - salt hash which my users use as a checkstring to detect other writers. """ d = self._maybe_fetch_offsets_and_header() def _build_checkstring(ignored): if self._salt: checkstring = struct.pack(PREFIX, self._version_number, self._sequence_number, self._root_hash, self._salt) else: checkstring = struct.pack(MDMFCHECKSTRING, self._version_number, self._sequence_number, self._root_hash) return checkstring d.addCallback(_build_checkstring) return d def get_prefix(self, force_remote): d = self._maybe_fetch_offsets_and_header(force_remote) d.addCallback(lambda ignored: self._build_prefix()) return d def _build_prefix(self): # The prefix is another name for the part of the remote share # that gets signed. It consists of everything up to and # including the datalength, packed by struct. if self._version_number == SDMF_VERSION: return struct.pack(SIGNED_PREFIX, self._version_number, self._sequence_number, self._root_hash, self._salt, self._required_shares, self._total_shares, self._segment_size, self._data_length) else: return struct.pack(MDMFSIGNABLEHEADER, self._version_number, self._sequence_number, self._root_hash, self._required_shares, self._total_shares, self._segment_size, self._data_length) def _get_offsets_tuple(self): # The offsets tuple is another component of the version # information tuple. It is basically our offsets dictionary, # itemized and in a tuple. return self._offsets.copy() def get_verinfo(self): """ I return my verinfo tuple. This is used by the ServermapUpdater to keep track of versions of mutable files. The verinfo tuple for MDMF files contains: - seqnum - root hash - a blank (nothing) - segsize - datalen - k - n - prefix (the thing that you sign) - a tuple of offsets We include the nonce in MDMF to simplify processing of version information tuples. The verinfo tuple for SDMF files is the same, but contains a 16-byte IV instead of a hash of salts. """ d = self._maybe_fetch_offsets_and_header() def _build_verinfo(ignored): if self._version_number == SDMF_VERSION: salt_to_use = self._salt else: salt_to_use = None return (self._sequence_number, self._root_hash, salt_to_use, self._segment_size, self._data_length, self._required_shares, self._total_shares, self._build_prefix(), self._get_offsets_tuple()) d.addCallback(_build_verinfo) return d def _read(self, readvs, force_remote=False): unsatisfiable = [x for x in readvs if x[0] + x[1] > len(self._data)] # TODO: It's entirely possible to tweak this so that it just # fulfills the requests that it can, and not demand that all # requests are satisfiable before running it. if not unsatisfiable or self._data_is_everything: results = [self._data[offset:offset+length] for (offset, length) in readvs] results = {self.shnum: results} return defer.succeed(results) else: return self._storage_server.slot_readv( self._storage_index, [self.shnum], readvs, ) def is_sdmf(self): """I tell my caller whether or not my remote file is SDMF or MDMF """ d = self._maybe_fetch_offsets_and_header() d.addCallback(lambda ignored: self._version_number == 0) return d class LayoutInvalid(BadShareError): """ This isn't a valid MDMF mutable file """ tahoe_lafs-1.20.0/src/allmydata/mutable/publish.py0000644000000000000000000015722513615410400017043 0ustar00""" Ported to Python 3. """ import os, time from io import BytesIO from itertools import count from zope.interface import implementer from twisted.internet import defer from twisted.python import failure from allmydata.crypto import aes from allmydata.crypto import rsa from allmydata.interfaces import IPublishStatus, SDMF_VERSION, MDMF_VERSION, \ IMutableUploadable from allmydata.util import base32, hashutil, mathutil, log from allmydata.util.dictutil import DictOfSets from allmydata.util.deferredutil import async_to_deferred from allmydata.util.cputhreadpool import defer_to_thread from allmydata import hashtree, codec from allmydata.storage.server import si_b2a from foolscap.api import eventually, fireEventually from allmydata.mutable.common import MODE_WRITE, MODE_CHECK, MODE_REPAIR, \ UncoordinatedWriteError, NotEnoughServersError from allmydata.mutable.servermap import ServerMap from allmydata.mutable.layout import get_version_from_checkstring,\ unpack_mdmf_checkstring, \ unpack_sdmf_checkstring, \ MDMFSlotWriteProxy, \ SDMFSlotWriteProxy from eliot import ( Message, start_action, ) KiB = 1024 DEFAULT_MUTABLE_MAX_SEGMENT_SIZE = 128 * KiB PUSHING_BLOCKS_STATE = 0 PUSHING_EVERYTHING_ELSE_STATE = 1 DONE_STATE = 2 @implementer(IPublishStatus) class PublishStatus(object): statusid_counter = count(0) def __init__(self): self.timings = {} self.timings["send_per_server"] = {} self.timings["encrypt"] = 0.0 self.timings["encode"] = 0.0 self.servermap = None self._problems = {} self.active = True self.storage_index = None self.helper = False self.encoding = ("?", "?") self.size = None self.status = "Not started" self.progress = 0.0 self.counter = next(self.statusid_counter) self.started = time.time() def add_per_server_time(self, server, elapsed): if server not in self.timings["send_per_server"]: self.timings["send_per_server"][server] = [] self.timings["send_per_server"][server].append(elapsed) def accumulate_encode_time(self, elapsed): self.timings["encode"] += elapsed def accumulate_encrypt_time(self, elapsed): self.timings["encrypt"] += elapsed def get_started(self): return self.started def get_storage_index(self): return self.storage_index def get_encoding(self): return self.encoding def using_helper(self): return self.helper def get_servermap(self): return self.servermap def get_size(self): return self.size def get_status(self): return self.status def get_progress(self): return self.progress def get_active(self): return self.active def get_counter(self): return self.counter def get_problems(self): return self._problems def set_storage_index(self, si): self.storage_index = si def set_helper(self, helper): self.helper = helper def set_servermap(self, servermap): self.servermap = servermap def set_encoding(self, k, n): self.encoding = (k, n) def set_size(self, size): self.size = size def set_status(self, status): self.status = status def set_progress(self, value): self.progress = value def set_active(self, value): self.active = value class LoopLimitExceededError(Exception): pass class Publish(object): """I represent a single act of publishing the mutable file to the grid. I will only publish my data if the servermap I am using still represents the current state of the world. To make the initial publish, set servermap to None. """ def __init__(self, filenode, storage_broker, servermap): self._node = filenode self._storage_broker = storage_broker self._servermap = servermap self._storage_index = self._node.get_storage_index() self._log_prefix = prefix = si_b2a(self._storage_index)[:5] num = self.log("Publish(%r): starting" % prefix, parent=None) self._log_number = num self._running = True self._first_write_error = None self._last_failure = None self._status = PublishStatus() self._status.set_storage_index(self._storage_index) self._status.set_helper(False) self._status.set_progress(0.0) self._status.set_active(True) self._version = self._node.get_version() assert self._version in (SDMF_VERSION, MDMF_VERSION) def get_status(self): return self._status def log(self, *args, **kwargs): if 'parent' not in kwargs: kwargs['parent'] = self._log_number if "facility" not in kwargs: kwargs["facility"] = "tahoe.mutable.publish" return log.msg(*args, **kwargs) def update(self, data, offset, blockhashes, version): """ I replace the contents of this file with the contents of data, starting at offset. I return a Deferred that fires with None when the replacement has been completed, or with an error if something went wrong during the process. Note that this process will not upload new shares. If the file being updated is in need of repair, callers will have to repair it on their own. """ # How this works: # 1: Make server assignments. We'll assign each share that we know # about on the grid to that server that currently holds that # share, and will not place any new shares. # 2: Setup encoding parameters. Most of these will stay the same # -- datalength will change, as will some of the offsets. # 3. Upload the new segments. # 4. Be done. assert IMutableUploadable.providedBy(data) self.data = data # XXX: Use the MutableFileVersion instead. self.datalength = self._node.get_size() if data.get_size() > self.datalength: self.datalength = data.get_size() self.log("starting update") self.log("adding new data of length %d at offset %d" % \ (data.get_size(), offset)) self.log("new data length is %d" % self.datalength) self._status.set_size(self.datalength) self._status.set_status("Started") self._started = time.time() self.done_deferred = defer.Deferred() self._writekey = self._node.get_writekey() assert self._writekey, "need write capability to publish" # first, which servers will we publish to? We require that the # servermap was updated in MODE_WRITE, so we can depend upon the # serverlist computed by that process instead of computing our own. assert self._servermap assert self._servermap.get_last_update()[0] in (MODE_WRITE, MODE_CHECK, MODE_REPAIR) # we will push a version that is one larger than anything present # in the grid, according to the servermap. self._new_seqnum = self._servermap.highest_seqnum() + 1 self._status.set_servermap(self._servermap) self.log(format="new seqnum will be %(seqnum)d", seqnum=self._new_seqnum, level=log.NOISY) # We're updating an existing file, so all of the following # should be available. self.readkey = self._node.get_readkey() self.required_shares = self._node.get_required_shares() assert self.required_shares is not None self.total_shares = self._node.get_total_shares() assert self.total_shares is not None self._status.set_encoding(self.required_shares, self.total_shares) self._pubkey = self._node.get_pubkey() assert self._pubkey self._privkey = self._node.get_privkey() assert self._privkey self._encprivkey = self._node.get_encprivkey() sb = self._storage_broker full_serverlist = list(sb.get_servers_for_psi(self._storage_index)) self.full_serverlist = full_serverlist # for use later, immutable self.bad_servers = set() # servers who have errbacked/refused requests # This will set self.segment_size, self.num_segments, and # self.fec. TODO: Does it know how to do the offset? Probably # not. So do that part next. self.setup_encoding_parameters(offset=offset) # if we experience any surprises (writes which were rejected because # our test vector did not match, or shares which we didn't expect to # see), we set this flag and report an UncoordinatedWriteError at the # end of the publish process. self.surprised = False # we keep track of three tables. The first is our goal: which share # we want to see on which servers. This is initially populated by the # existing servermap. self.goal = set() # pairs of (server, shnum) tuples # the number of outstanding queries: those that are in flight and # may or may not be delivered, accepted, or acknowledged. This is # incremented when a query is sent, and decremented when the response # returns or errbacks. self.num_outstanding = 0 # the third is a table of successes: share which have actually been # placed. These are populated when responses come back with success. # When self.placed == self.goal, we're done. self.placed = set() # (server, shnum) tuples self.bad_share_checkstrings = {} # This is set at the last step of the publishing process. self.versioninfo = "" # we use the servermap to populate the initial goal: this way we will # try to update each existing share in place. Since we're # updating, we ignore damaged and missing shares -- callers must # do a repair to repair and recreate these. self.goal = set(self._servermap.get_known_shares()) # shnum -> set of IMutableSlotWriter self.writers = DictOfSets() # SDMF files are updated differently. self._version = MDMF_VERSION writer_class = MDMFSlotWriteProxy # For each (server, shnum) in self.goal, we make a # write proxy for that server. We'll use this to write # shares to the server. for (server,shnum) in self.goal: write_enabler = self._node.get_write_enabler(server) renew_secret = self._node.get_renewal_secret(server) cancel_secret = self._node.get_cancel_secret(server) secrets = (write_enabler, renew_secret, cancel_secret) writer = writer_class(shnum, server.get_storage_server(), self._storage_index, secrets, self._new_seqnum, self.required_shares, self.total_shares, self.segment_size, self.datalength) self.writers.add(shnum, writer) writer.server = server known_shares = self._servermap.get_known_shares() assert (server, shnum) in known_shares old_versionid, old_timestamp = known_shares[(server,shnum)] (old_seqnum, old_root_hash, old_salt, old_segsize, old_datalength, old_k, old_N, old_prefix, old_offsets_tuple) = old_versionid writer.set_checkstring(old_seqnum, old_root_hash, old_salt) # Our remote shares will not have a complete checkstring until # after we are done writing share data and have started to write # blocks. In the meantime, we need to know what to look for when # writing, so that we can detect UncoordinatedWriteErrors. self._checkstring = self._get_some_writer().get_checkstring() # Now, we start pushing shares. self._status.timings["setup"] = time.time() - self._started # First, we encrypt, encode, and publish the shares that we need # to encrypt, encode, and publish. # Our update process fetched these for us. We need to update # them in place as publishing happens. self.blockhashes = {} # (shnum, [blochashes]) for (i, bht) in list(blockhashes.items()): # We need to extract the leaves from our old hash tree. old_segcount = mathutil.div_ceil(version[4], version[3]) h = hashtree.IncompleteHashTree(old_segcount) bht = dict(enumerate(bht)) h.set_hashes(bht) leaves = h[h.get_leaf_index(0):] for j in range(self.num_segments - len(leaves)): leaves.append(None) assert len(leaves) >= self.num_segments self.blockhashes[i] = leaves # This list will now be the leaves that were set during the # initial upload + enough empty hashes to make it a # power-of-two. If we exceed a power of two boundary, we # should be encoding the file over again, and should not be # here. So, we have #assert len(self.blockhashes[i]) == \ # hashtree.roundup_pow2(self.num_segments), \ # len(self.blockhashes[i]) # XXX: Except this doesn't work. Figure out why. # These are filled in later, after we've modified the block hash # tree suitably. self.sharehash_leaves = None # eventually [sharehashes] self.sharehashes = {} # shnum -> [sharehash leaves necessary to # validate the share] self.log("Starting push") self._state = PUSHING_BLOCKS_STATE self._push() return self.done_deferred def publish(self, newdata): """Publish the filenode's current contents. Returns a Deferred that fires (with None) when the publish has done as much work as it's ever going to do, or errbacks with ConsistencyError if it detects a simultaneous write. """ # 0. Setup encoding parameters, encoder, and other such things. # 1. Encrypt, encode, and publish segments. assert IMutableUploadable.providedBy(newdata) self.data = newdata self.datalength = newdata.get_size() #if self.datalength >= DEFAULT_MUTABLE_MAX_SEGMENT_SIZE: # self._version = MDMF_VERSION #else: # self._version = SDMF_VERSION self.log("starting publish, datalen is %s" % self.datalength) self._status.set_size(self.datalength) self._status.set_status("Started") self._started = time.time() self.done_deferred = defer.Deferred() self._writekey = self._node.get_writekey() assert self._writekey, "need write capability to publish" # first, which servers will we publish to? We require that the # servermap was updated in MODE_WRITE, so we can depend upon the # serverlist computed by that process instead of computing our own. if self._servermap: assert self._servermap.get_last_update()[0] in (MODE_WRITE, MODE_CHECK, MODE_REPAIR) # we will push a version that is one larger than anything present # in the grid, according to the servermap. self._new_seqnum = self._servermap.highest_seqnum() + 1 else: # If we don't have a servermap, that's because we're doing the # initial publish self._new_seqnum = 1 self._servermap = ServerMap() self._status.set_servermap(self._servermap) self.log(format="new seqnum will be %(seqnum)d", seqnum=self._new_seqnum, level=log.NOISY) # having an up-to-date servermap (or using a filenode that was just # created for the first time) also guarantees that the following # fields are available self.readkey = self._node.get_readkey() self.required_shares = self._node.get_required_shares() assert self.required_shares is not None self.total_shares = self._node.get_total_shares() assert self.total_shares is not None self._status.set_encoding(self.required_shares, self.total_shares) self._pubkey = self._node.get_pubkey() assert self._pubkey self._privkey = self._node.get_privkey() assert self._privkey self._encprivkey = self._node.get_encprivkey() sb = self._storage_broker full_serverlist = list(sb.get_servers_for_psi(self._storage_index)) self.full_serverlist = full_serverlist # for use later, immutable self.bad_servers = set() # servers who have errbacked/refused requests # This will set self.segment_size, self.num_segments, and # self.fec. self.setup_encoding_parameters() # if we experience any surprises (writes which were rejected because # our test vector did not match, or shares which we didn't expect to # see), we set this flag and report an UncoordinatedWriteError at the # end of the publish process. self.surprised = False # we keep track of three tables. The first is our goal: which share # we want to see on which servers. This is initially populated by the # existing servermap. self.goal = set() # pairs of (server, shnum) tuples # the number of outstanding queries: those that are in flight and # may or may not be delivered, accepted, or acknowledged. This is # incremented when a query is sent, and decremented when the response # returns or errbacks. self.num_outstanding = 0 # the third is a table of successes: share which have actually been # placed. These are populated when responses come back with success. # When self.placed == self.goal, we're done. self.placed = set() # (server, shnum) tuples self.bad_share_checkstrings = {} # This is set at the last step of the publishing process. self.versioninfo = "" # we use the servermap to populate the initial goal: this way we will # try to update each existing share in place. self.goal = set(self._servermap.get_known_shares()) # then we add in all the shares that were bad (corrupted, bad # signatures, etc). We want to replace these. for key, old_checkstring in list(self._servermap.get_bad_shares().items()): (server, shnum) = key self.goal.add( (server,shnum) ) self.bad_share_checkstrings[(server,shnum)] = old_checkstring # TODO: Make this part do server selection. self.update_goal() # shnum -> set of IMutableSlotWriter self.writers = DictOfSets() if self._version == MDMF_VERSION: writer_class = MDMFSlotWriteProxy else: writer_class = SDMFSlotWriteProxy # For each (server, shnum) in self.goal, we make a # write proxy for that server. We'll use this to write # shares to the server. for (server,shnum) in self.goal: write_enabler = self._node.get_write_enabler(server) renew_secret = self._node.get_renewal_secret(server) cancel_secret = self._node.get_cancel_secret(server) secrets = (write_enabler, renew_secret, cancel_secret) writer = writer_class(shnum, server.get_storage_server(), self._storage_index, secrets, self._new_seqnum, self.required_shares, self.total_shares, self.segment_size, self.datalength) self.writers.add(shnum, writer) writer.server = server known_shares = self._servermap.get_known_shares() if (server, shnum) in known_shares: old_versionid, old_timestamp = known_shares[(server,shnum)] (old_seqnum, old_root_hash, old_salt, old_segsize, old_datalength, old_k, old_N, old_prefix, old_offsets_tuple) = old_versionid writer.set_checkstring(old_seqnum, old_root_hash, old_salt) elif (server, shnum) in self.bad_share_checkstrings: old_checkstring = self.bad_share_checkstrings[(server, shnum)] writer.set_checkstring(old_checkstring) # Our remote shares will not have a complete checkstring until # after we are done writing share data and have started to write # blocks. In the meantime, we need to know what to look for when # writing, so that we can detect UncoordinatedWriteErrors. self._checkstring = self._get_some_writer().get_checkstring() # Now, we start pushing shares. self._status.timings["setup"] = time.time() - self._started # First, we encrypt, encode, and publish the shares that we need # to encrypt, encode, and publish. # This will eventually hold the block hash chain for each share # that we publish. We define it this way so that empty publishes # will still have something to write to the remote slot. self.blockhashes = dict([(i, []) for i in range(self.total_shares)]) for i in range(self.total_shares): blocks = self.blockhashes[i] for j in range(self.num_segments): blocks.append(None) self.sharehash_leaves = None # eventually [sharehashes] self.sharehashes = {} # shnum -> [sharehash leaves necessary to # validate the share] self.log("Starting push") self._state = PUSHING_BLOCKS_STATE self._push() return self.done_deferred def _get_some_writer(self): return list(list(self.writers.values())[0])[0] def _update_status(self): self._status.set_status("Sending Shares: %d placed out of %d, " "%d messages outstanding" % (len(self.placed), len(self.goal), self.num_outstanding)) self._status.set_progress(1.0 * len(self.placed) / len(self.goal)) def setup_encoding_parameters(self, offset=0): if self._version == MDMF_VERSION: segment_size = DEFAULT_MUTABLE_MAX_SEGMENT_SIZE # 128 KiB by default else: segment_size = self.datalength # SDMF is only one segment # this must be a multiple of self.required_shares segment_size = mathutil.next_multiple(segment_size, self.required_shares) self.segment_size = segment_size # Calculate the starting segment for the upload. if segment_size: # We use div_ceil instead of integer division here because # it is semantically correct. # If datalength isn't an even multiple of segment_size, but # is larger than segment_size, datalength // segment_size # will be the largest number such that num <= datalength and # num % segment_size == 0. But that's not what we want, # because it ignores the extra data. div_ceil will give us # the right number of segments for the data that we're # given. self.num_segments = mathutil.div_ceil(self.datalength, segment_size) self.starting_segment = offset // segment_size else: self.num_segments = 0 self.starting_segment = 0 self.log("building encoding parameters for file") self.log("got segsize %d" % self.segment_size) self.log("got %d segments" % self.num_segments) if self._version == SDMF_VERSION: assert self.num_segments in (0, 1) # SDMF # calculate the tail segment size. if segment_size and self.datalength: self.tail_segment_size = self.datalength % segment_size self.log("got tail segment size %d" % self.tail_segment_size) else: self.tail_segment_size = 0 if self.tail_segment_size == 0 and segment_size: # The tail segment is the same size as the other segments. self.tail_segment_size = segment_size # Make FEC encoders fec = codec.CRSEncoder() fec.set_params(self.segment_size, self.required_shares, self.total_shares) self.piece_size = fec.get_block_size() self.fec = fec if self.tail_segment_size == self.segment_size: self.tail_fec = self.fec else: tail_fec = codec.CRSEncoder() tail_fec.set_params(self.tail_segment_size, self.required_shares, self.total_shares) self.tail_fec = tail_fec self._current_segment = self.starting_segment self.end_segment = self.num_segments - 1 # Now figure out where the last segment should be. if self.data.get_size() != self.datalength: # We're updating a few segments in the middle of a mutable # file, so we don't want to republish the whole thing. # (we don't have enough data to do that even if we wanted # to) end = self.data.get_size() self.end_segment = end // segment_size if end % segment_size == 0: self.end_segment -= 1 self.log("got start segment %d" % self.starting_segment) self.log("got end segment %d" % self.end_segment) def _push(self, ignored=None): """ I manage state transitions. In particular, I see that we still have a good enough number of writers to complete the upload successfully. """ # Can we still successfully publish this file? # TODO: Keep track of outstanding queries before aborting the # process. num_shnums = len(self.writers) if num_shnums < self.required_shares or self.surprised: return self._failure() # Figure out what we need to do next. Each of these needs to # return a deferred so that we don't block execution when this # is first called in the upload method. if self._state == PUSHING_BLOCKS_STATE: return self.push_segment(self._current_segment) elif self._state == PUSHING_EVERYTHING_ELSE_STATE: return self.push_everything_else() # If we make it to this point, we were successful in placing the # file. return self._done() def push_segment(self, segnum): if self.num_segments == 0 and self._version == SDMF_VERSION: self._add_dummy_salts() if segnum > self.end_segment: # We don't have any more segments to push. self._state = PUSHING_EVERYTHING_ELSE_STATE return self._push() d = self._encode_segment(segnum) d.addCallback(self._push_segment, segnum) def _increment_segnum(ign): self._current_segment += 1 # XXX: I don't think we need to do addBoth here -- any errBacks # should be handled within push_segment. d.addCallback(_increment_segnum) d.addCallback(self._turn_barrier) d.addCallback(self._push) d.addErrback(self._failure) def _turn_barrier(self, result): """ I help the publish process avoid the recursion limit issues described in #237. """ return fireEventually(result) def _add_dummy_salts(self): """ SDMF files need a salt even if they're empty, or the signature won't make sense. This method adds a dummy salt to each of our SDMF writers so that they can write the signature later. """ salt = os.urandom(16) assert self._version == SDMF_VERSION for shnum, writers in self.writers.items(): for writer in writers: writer.put_salt(salt) @async_to_deferred async def _encode_segment(self, segnum): """ I encrypt and encode the segment segnum. """ started = time.time() if segnum + 1 == self.num_segments: segsize = self.tail_segment_size else: segsize = self.segment_size self.log("Pushing segment %d of %d" % (segnum + 1, self.num_segments)) data = self.data.read(segsize) if not isinstance(data, bytes): # XXX: Why does this return a list? data = b"".join(data) assert len(data) == segsize, len(data) self._status.set_status("Encrypting") def encrypt(readkey): salt = os.urandom(16) key = hashutil.ssk_readkey_data_hash(salt, readkey) encryptor = aes.create_encryptor(key) crypttext = aes.encrypt_data(encryptor, data) assert len(crypttext) == len(data) return salt, crypttext salt, crypttext = await defer_to_thread(encrypt, self.readkey) now = time.time() self._status.accumulate_encrypt_time(now - started) started = now # now apply FEC if segnum + 1 == self.num_segments: fec = self.tail_fec else: fec = self.fec self._status.set_status("Encoding") crypttext_pieces = [None] * self.required_shares piece_size = fec.get_block_size() for i in range(len(crypttext_pieces)): offset = i * piece_size piece = crypttext[offset:offset+piece_size] piece = piece + b"\x00"*(piece_size - len(piece)) # padding crypttext_pieces[i] = piece assert len(piece) == piece_size res = await fec.encode(crypttext_pieces) elapsed = time.time() - started self._status.accumulate_encode_time(elapsed) return (res, salt) @async_to_deferred async def _push_segment(self, encoded_and_salt, segnum): """ I push (data, salt) as segment number segnum. """ results, salt = encoded_and_salt shares, shareids = results self._status.set_status("Pushing segment") for i in range(len(shares)): sharedata = shares[i] shareid = shareids[i] if self._version == MDMF_VERSION: hashed = salt + sharedata else: hashed = sharedata block_hash = await defer_to_thread(hashutil.block_hash, hashed) self.blockhashes[shareid][segnum] = block_hash # find the writer for this share writers = self.writers[shareid] for writer in writers: writer.put_block(sharedata, segnum, salt) def push_everything_else(self): """ I put everything else associated with a share. """ self._pack_started = time.time() self.push_encprivkey() self.push_blockhashes() self.push_sharehashes() self.push_toplevel_hashes_and_signature() d = self.finish_publishing() def _change_state(ignored): self._state = DONE_STATE d.addCallback(_change_state) d.addCallback(self._push) return d def push_encprivkey(self): encprivkey = self._encprivkey self._status.set_status("Pushing encrypted private key") for shnum, writers in self.writers.items(): for writer in writers: writer.put_encprivkey(encprivkey) def push_blockhashes(self): self.sharehash_leaves = [None] * len(self.blockhashes) self._status.set_status("Building and pushing block hash tree") for shnum, blockhashes in list(self.blockhashes.items()): t = hashtree.HashTree(blockhashes) self.blockhashes[shnum] = list(t) # set the leaf for future use. self.sharehash_leaves[shnum] = t[0] writers = self.writers[shnum] for writer in writers: writer.put_blockhashes(self.blockhashes[shnum]) def push_sharehashes(self): self._status.set_status("Building and pushing share hash chain") share_hash_tree = hashtree.HashTree(self.sharehash_leaves) for shnum in range(len(self.sharehash_leaves)): needed_indices = share_hash_tree.needed_hashes(shnum) self.sharehashes[shnum] = dict( [ (i, share_hash_tree[i]) for i in needed_indices] ) writers = self.writers[shnum] for writer in writers: writer.put_sharehashes(self.sharehashes[shnum]) self.root_hash = share_hash_tree[0] def push_toplevel_hashes_and_signature(self): # We need to to three things here: # - Push the root hash and salt hash # - Get the checkstring of the resulting layout; sign that. # - Push the signature self._status.set_status("Pushing root hashes and signature") for shnum in range(self.total_shares): writers = self.writers[shnum] for writer in writers: writer.put_root_hash(self.root_hash) self._update_checkstring() self._make_and_place_signature() def _update_checkstring(self): """ After putting the root hash, MDMF files will have the checkstring written to the storage server. This means that we can update our copy of the checkstring so we can detect uncoordinated writes. SDMF files will have the same checkstring, so we need not do anything. """ self._checkstring = self._get_some_writer().get_checkstring() def _make_and_place_signature(self): """ I create and place the signature. """ started = time.time() self._status.set_status("Signing prefix") signable = self._get_some_writer().get_signable() self.signature = rsa.sign_data(self._privkey, signable) for (shnum, writers) in self.writers.items(): for writer in writers: writer.put_signature(self.signature) self._status.timings['sign'] = time.time() - started def finish_publishing(self): # We're almost done -- we just need to put the verification key # and the offsets started = time.time() self._status.set_status("Pushing shares") self._started_pushing = started ds = [] verification_key = rsa.der_string_from_verifying_key(self._pubkey) for (shnum, writers) in list(self.writers.copy().items()): for writer in writers: writer.put_verification_key(verification_key) self.num_outstanding += 1 def _no_longer_outstanding(res): self.num_outstanding -= 1 return res d = writer.finish_publishing() d.addBoth(_no_longer_outstanding) d.addErrback(self._connection_problem, writer) d.addCallback(self._got_write_answer, writer, started) ds.append(d) self._record_verinfo() self._status.timings['pack'] = time.time() - started return defer.DeferredList(ds) def _record_verinfo(self): self.versioninfo = self._get_some_writer().get_verinfo() def _connection_problem(self, f, writer): """ We ran into a connection problem while working with writer, and need to deal with that. """ self.log("found problem: %s" % str(f)) self._last_failure = f self.writers.discard(writer.shnum, writer) def log_goal(self, goal, message=""): logmsg = [message] for (shnum, server) in sorted([(s,p) for (p,s) in goal], key=lambda t: (id(t[0]), id(t[1]))): logmsg.append("sh%d to [%r]" % (shnum, server.get_name())) self.log("current goal: %s" % (", ".join(logmsg)), level=log.NOISY) self.log("we are planning to push new seqnum=#%d" % self._new_seqnum, level=log.NOISY) def update_goal(self): self.log_goal(self.goal, "before update: ") # first, remove any bad servers from our goal self.goal = set([ (server, shnum) for (server, shnum) in self.goal if server not in self.bad_servers ]) # find the homeless shares: homefull_shares = set([shnum for (server, shnum) in self.goal]) homeless_shares = set(range(self.total_shares)) - homefull_shares homeless_shares = sorted(list(homeless_shares)) # place them somewhere. We prefer unused servers at the beginning of # the available server list. if not homeless_shares: return # if an old share X is on a node, put the new share X there too. # TODO: 1: redistribute shares to achieve one-per-server, by copying # shares from existing servers to new (less-crowded) ones. The # old shares must still be updated. # TODO: 2: move those shares instead of copying them, to reduce future # update work # this is a bit CPU intensive but easy to analyze. We create a sort # order for each server. If the server is marked as bad, we don't # even put them in the list. Then we care about the number of shares # which have already been assigned to them. After that we care about # their permutation order. old_assignments = DictOfSets() for (server, shnum) in self.goal: old_assignments.add(server, shnum) serverlist = [] action = start_action( action_type=u"mutable:upload:update_goal", homeless_shares=len(homeless_shares), ) with action: for i, server in enumerate(self.full_serverlist): serverid = server.get_serverid() if server in self.bad_servers: Message.log( message_type=u"mutable:upload:bad-server", server_id=serverid, ) continue # if we have >= 1 grid-managers, this checks that we have # a valid certificate for this server if not server.upload_permitted(): Message.log( message_type=u"mutable:upload:no-gm-certs", server_id=serverid, ) continue entry = (len(old_assignments.get(server, [])), i, serverid, server) serverlist.append(entry) serverlist.sort() if not serverlist: raise NotEnoughServersError("Ran out of non-bad servers, " "first_error=%s" % str(self._first_write_error), self._first_write_error) # we then index this serverlist with an integer, because we may have # to wrap. We update the goal as we go. i = 0 for shnum in homeless_shares: (ignored1, ignored2, ignored3, server) = serverlist[i] # if we are forced to send a share to a server that already has # one, we may have two write requests in flight, and the # servermap (which was computed before either request was sent) # won't reflect the new shares, so the second response will be # surprising. There is code in _got_write_answer() to tolerate # this, otherwise it would cause the publish to fail with an # UncoordinatedWriteError. See #546 for details of the trouble # this used to cause. self.goal.add( (server, shnum) ) i += 1 if i >= len(serverlist): i = 0 self.log_goal(self.goal, "after update: ") def _got_write_answer(self, answer, writer, started): if not answer: # SDMF writers only pretend to write when readers set their # blocks, salts, and so on -- they actually just write once, # at the end of the upload process. In fake writes, they # return defer.succeed(None). If we see that, we shouldn't # bother checking it. return server = writer.server lp = self.log("_got_write_answer from %r, share %d" % (server.get_name(), writer.shnum)) now = time.time() elapsed = now - started self._status.add_per_server_time(server, elapsed) wrote, read_data = answer surprise_shares = set(read_data.keys()) - set([writer.shnum]) # We need to remove from surprise_shares any shares that we are # knowingly also writing to that server from other writers. # TODO: Precompute this. shares = [] for shnum, writers in self.writers.items(): shares.extend([x.shnum for x in writers if x.server == server]) known_shnums = set(shares) surprise_shares -= known_shnums self.log("found the following surprise shares: %s" % str(surprise_shares)) # Now surprise shares contains all of the shares that we did not # expect to be there. surprised = False for shnum in surprise_shares: # read_data is a dict mapping shnum to checkstring (SIGNED_PREFIX) checkstring = read_data[shnum][0] # What we want to do here is to see if their (seqnum, # roothash, salt) is the same as our (seqnum, roothash, # salt), or the equivalent for MDMF. The best way to do this # is to store a packed representation of our checkstring # somewhere, then not bother unpacking the other # checkstring. if checkstring == self._checkstring: # they have the right share, somehow if (server,shnum) in self.goal: # and we want them to have it, so we probably sent them a # copy in an earlier write. This is ok, and avoids the # #546 problem. continue # They aren't in our goal, but they are still for the right # version. Somebody else wrote them, and it's a convergent # uncoordinated write. Pretend this is ok (don't be # surprised), since I suspect there's a decent chance that # we'll hit this in normal operation. continue else: # the new shares are of a different version if server in self._servermap.get_reachable_servers(): # we asked them about their shares, so we had knowledge # of what they used to have. Any surprising shares must # have come from someone else, so UCW. surprised = True else: # we didn't ask them, and now we've discovered that they # have a share we didn't know about. This indicates that # mapupdate should have wokred harder and asked more # servers before concluding that it knew about them all. # signal UCW, but make sure to ask this server next time, # so we'll remember to update it if/when we retry. surprised = True # TODO: ask this server next time. I don't yet have a good # way to do this. Two insufficient possibilities are: # # self._servermap.add_new_share(server, shnum, verinfo, now) # but that requires fetching/validating/parsing the whole # version string, and all we have is the checkstring # self._servermap.mark_bad_share(server, shnum, checkstring) # that will make publish overwrite the share next time, # but it won't re-query the server, and it won't make # mapupdate search further # TODO later: when publish starts, do # servermap.get_best_version(), extract the seqnum, # subtract one, and store as highest-replaceable-seqnum. # Then, if this surprise-because-we-didn't-ask share is # of highest-replaceable-seqnum or lower, we're allowed # to replace it: send out a new writev (or rather add it # to self.goal and loop). surprised = True if surprised: self.log("they had shares %s that we didn't know about" % (list(surprise_shares),), parent=lp, level=log.WEIRD, umid="un9CSQ") self.surprised = True if not wrote: # TODO: there are two possibilities. The first is that the server # is full (or just doesn't want to give us any room), which means # we shouldn't ask them again, but is *not* an indication of an # uncoordinated write. The second is that our testv failed, which # *does* indicate an uncoordinated write. We currently don't have # a way to tell these two apart (in fact, the storage server code # doesn't have the option of refusing our share). # # If the server is full, mark the server as bad (so we don't ask # them again), but don't set self.surprised. The loop() will find # a new server. # # If the testv failed, log it, set self.surprised, but don't # bother adding to self.bad_servers . self.log("our testv failed, so the write did not happen", parent=lp, level=log.WEIRD, umid="8sc26g") self.surprised = True self.bad_servers.add(server) # don't ask them again # use the checkstring to add information to the log message unknown_format = False for (shnum,readv) in list(read_data.items()): checkstring = readv[0] version = get_version_from_checkstring(checkstring) if version == MDMF_VERSION: (other_seqnum, other_roothash) = unpack_mdmf_checkstring(checkstring) elif version == SDMF_VERSION: (other_seqnum, other_roothash, other_IV) = unpack_sdmf_checkstring(checkstring) else: unknown_format = True expected_version = self._servermap.version_on_server(server, shnum) if expected_version: (seqnum, root_hash, IV, segsize, datalength, k, N, prefix, offsets_tuple) = expected_version msg = ("somebody modified the share on us:" " shnum=%d: I thought they had #%d:R=%r," % (shnum, seqnum, base32.b2a(root_hash)[:4])) if unknown_format: msg += (" but I don't know how to read share" " format %d" % version) else: msg += " but testv reported #%d:R=%r" % \ (other_seqnum, base32.b2a(other_roothash)[:4]) self.log(msg, parent=lp, level=log.NOISY) # if expected_version==None, then we didn't expect to see a # share on that server, and the 'surprise_shares' clause # above will have logged it. return # and update the servermap # self.versioninfo is set during the last phase of publishing. # If we get there, we know that responses correspond to placed # shares, and can safely execute these statements. if self.versioninfo: self.log("wrote successfully: adding new share to servermap") self._servermap.add_new_share(server, writer.shnum, self.versioninfo, started) self.placed.add( (server, writer.shnum) ) self._update_status() # the next method in the deferred chain will check to see if # we're done and successful. return def _done(self): if not self._running: return self._running = False now = time.time() self._status.timings["total"] = now - self._started elapsed = now - self._started_pushing self._status.timings['push'] = elapsed self._status.set_active(False) self.log("Publish done, success") self._status.set_status("Finished") self._status.set_progress(1.0) # Get k and segsize, then give them to the caller. hints = {} hints['segsize'] = self.segment_size hints['k'] = self.required_shares self._node.set_downloader_hints(hints) eventually(self.done_deferred.callback, None) def _failure(self, f=None): if f: self._last_failure = f if not self.surprised: # We ran out of servers msg = "Publish ran out of good servers" if self._last_failure: msg += ", last failure was: %s" % str(self._last_failure) self.log(msg) e = NotEnoughServersError(msg) else: # We ran into shares that we didn't recognize, which means # that we need to return an UncoordinatedWriteError. self.log("Publish failed with UncoordinatedWriteError") e = UncoordinatedWriteError() f = failure.Failure(e) eventually(self.done_deferred.callback, f) @implementer(IMutableUploadable) class MutableFileHandle(object): """ I am a mutable uploadable built around a filehandle-like object, usually either a BytesIO instance or a handle to an actual file. """ def __init__(self, filehandle): # The filehandle is defined as a generally file-like object that # has these two methods. We don't care beyond that. assert hasattr(filehandle, "read") assert hasattr(filehandle, "close") self._filehandle = filehandle # We must start reading at the beginning of the file, or we risk # encountering errors when the data read does not match the size # reported to the uploader. self._filehandle.seek(0) # We have not yet read anything, so our position is 0. self._marker = 0 def get_size(self): """ I return the amount of data in my filehandle. """ if not hasattr(self, "_size"): old_position = self._filehandle.tell() # Seek to the end of the file by seeking 0 bytes from the # file's end self._filehandle.seek(0, os.SEEK_END) self._size = self._filehandle.tell() # Restore the previous position, in case this was called # after a read. self._filehandle.seek(old_position) assert self._filehandle.tell() == old_position assert hasattr(self, "_size") return self._size def pos(self): """ I return the position of my read marker -- i.e., how much data I have already read and returned to callers. """ return self._marker def read(self, length): """ I return some data (up to length bytes) from my filehandle. In most cases, I return length bytes, but sometimes I won't -- for example, if I am asked to read beyond the end of a file, or an error occurs. """ results = self._filehandle.read(length) self._marker += len(results) return [results] def close(self): """ I close the underlying filehandle. Any further operations on the filehandle fail at this point. """ self._filehandle.close() class MutableData(MutableFileHandle): """ I am a mutable uploadable built around a string, which I then cast into a BytesIO and treat as a filehandle. """ def __init__(self, s): # Take a string and return a file-like uploadable. assert isinstance(s, bytes) MutableFileHandle.__init__(self, BytesIO(s)) @implementer(IMutableUploadable) class TransformingUploadable(object): """ I am an IMutableUploadable that wraps another IMutableUploadable, and some segments that are already on the grid. When I am called to read, I handle merging of boundary segments. """ def __init__(self, data, offset, segment_size, start, end): assert IMutableUploadable.providedBy(data) self._newdata = data self._offset = offset self._segment_size = segment_size self._start = start self._end = end self._read_marker = 0 self._first_segment_offset = offset % segment_size num = self.log("TransformingUploadable: starting", parent=None) self._log_number = num self.log("got fso: %d" % self._first_segment_offset) self.log("got offset: %d" % self._offset) def log(self, *args, **kwargs): if 'parent' not in kwargs: kwargs['parent'] = self._log_number if "facility" not in kwargs: kwargs["facility"] = "tahoe.mutable.transforminguploadable" return log.msg(*args, **kwargs) def get_size(self): return self._offset + self._newdata.get_size() def read(self, length): # We can get data from 3 sources here. # 1. The first of the segments provided to us. # 2. The data that we're replacing things with. # 3. The last of the segments provided to us. # are we in state 0? self.log("reading %d bytes" % length) old_start_data = b"" old_data_length = self._first_segment_offset - self._read_marker if old_data_length > 0: if old_data_length > length: old_data_length = length self.log("returning %d bytes of old start data" % old_data_length) old_data_end = old_data_length + self._read_marker old_start_data = self._start[self._read_marker:old_data_end] length -= old_data_length else: # otherwise calculations later get screwed up. old_data_length = 0 # Is there enough new data to satisfy this read? If not, we need # to pad the end of the data with data from our last segment. old_end_length = length - \ (self._newdata.get_size() - self._newdata.pos()) old_end_data = b"" if old_end_length > 0: self.log("reading %d bytes of old end data" % old_end_length) # TODO: We're not explicitly checking for tail segment size # here. Is that a problem? old_data_offset = (length - old_end_length + \ old_data_length) % self._segment_size self.log("reading at offset %d" % old_data_offset) old_end = old_data_offset + old_end_length old_end_data = self._end[old_data_offset:old_end] length -= old_end_length assert length == self._newdata.get_size() - self._newdata.pos() self.log("reading %d bytes of new data" % length) new_data = self._newdata.read(length) new_data = b"".join(new_data) self._read_marker += len(old_start_data + new_data + old_end_data) return old_start_data + new_data + old_end_data def close(self): pass tahoe_lafs-1.20.0/src/allmydata/mutable/repairer.py0000644000000000000000000001363413615410400017201 0ustar00""" Ported to Python 3. """ from zope.interface import implementer from twisted.internet import defer from allmydata.interfaces import IRepairResults, ICheckResults from allmydata.mutable.publish import MutableData from allmydata.mutable.common import MODE_REPAIR from allmydata.mutable.servermap import ServerMap, ServermapUpdater @implementer(IRepairResults) class RepairResults(object): def __init__(self, smap): self.servermap = smap def set_successful(self, successful): self.successful = successful def get_successful(self): return self.successful def to_string(self): return "" class RepairRequiresWritecapError(Exception): """Repair currently requires a writecap.""" class MustForceRepairError(Exception): pass class Repairer(object): def __init__(self, node, check_results, storage_broker, history, monitor): self.node = node self.check_results = ICheckResults(check_results) assert check_results.get_storage_index() == node.get_storage_index() self._storage_broker = storage_broker self._history = history self._monitor = monitor def start(self, force=False): # download, then re-publish. If a server had a bad share, try to # replace it with a good one of the same shnum. # The normal repair operation should not be used to replace # application-specific merging of alternate versions: i.e if there # are multiple highest seqnums with different roothashes. In this # case, the application must use node.upload() (referencing the # servermap that indicates the multiple-heads condition), or # node.overwrite(). The repair() operation will refuse to run in # these conditions unless a force=True argument is provided. If # force=True is used, then the highest root hash will be reinforced. # Likewise, the presence of an unrecoverable latest version is an # unusual event, and should ideally be handled by retrying a couple # times (spaced out over hours or days) and hoping that new shares # will become available. If repair(force=True) is called, data will # be lost: a new seqnum will be generated with the same contents as # the most recent recoverable version, skipping over the lost # version. repair(force=False) will refuse to run in a situation like # this. # Repair is designed to fix the following injuries: # missing shares: add new ones to get at least N distinct ones # old shares: replace old shares with the latest version # bogus shares (bad sigs): replace the bad one with a good one # first, update the servermap in MODE_REPAIR, which files all shares # and makes sure we get the privkey. u = ServermapUpdater(self.node, self._storage_broker, self._monitor, ServerMap(), MODE_REPAIR) if self._history: self._history.notify_mapupdate(u.get_status()) d = u.update() d.addCallback(self._got_full_servermap, force) return d def _got_full_servermap(self, smap, force): best_version = smap.best_recoverable_version() if not best_version: # the file is damaged beyond repair rr = RepairResults(smap) rr.set_successful(False) return defer.succeed(rr) if smap.unrecoverable_newer_versions(): if not force: raise MustForceRepairError("There were unrecoverable newer " "versions, so force=True must be " "passed to the repair() operation") # continuing on means that node.upload() will pick a seqnum that # is higher than everything visible in the servermap, effectively # discarding the unrecoverable versions. if smap.needs_merge(): if not force: raise MustForceRepairError("There were multiple recoverable " "versions with identical seqnums, " "so force=True must be passed to " "the repair() operation") # continuing on means that smap.best_recoverable_version() will # pick the one with the highest roothash, and then node.upload() # will replace all shares with its contents # missing shares are handled during upload, which tries to find a # home for every share # old shares are handled during upload, which will replace any share # that was present in the servermap # bogus shares need to be managed here. We might notice a bogus share # during mapupdate (whether done for a filecheck or just before a # download) by virtue of it having an invalid signature. We might # also notice a bad hash in the share during verify or download. In # either case, the problem will be noted in the servermap, and the # bad share (along with its checkstring) will be recorded in # servermap.bad_shares . Publish knows that it should try and replace # these. # I chose to use the retrieve phase to ensure that the privkey is # available, to avoid the extra roundtrip that would occur if we, # say, added an smap.get_privkey() method. if not self.node.get_writekey(): raise RepairRequiresWritecapError("Sorry, repair currently requires a writecap, to set the write-enabler properly.") d = self.node.download_version(smap, best_version, fetch_privkey=True) d.addCallback(lambda data: MutableData(data)) d.addCallback(self.node.upload, smap) d.addCallback(self.get_results, smap) return d def get_results(self, res, smap): rr = RepairResults(smap) rr.set_successful(True) return rr tahoe_lafs-1.20.0/src/allmydata/mutable/retrieve.py0000644000000000000000000012463313615410400017217 0ustar00""" Ported to Python 3. """ from __future__ import annotations import time from itertools import count from zope.interface import implementer from twisted.internet import defer from twisted.python import failure from twisted.internet.interfaces import IPushProducer, IConsumer from foolscap.api import eventually, fireEventually, DeadReferenceError, \ RemoteException from allmydata.crypto import aes from allmydata.crypto import rsa from allmydata.interfaces import IRetrieveStatus, NotEnoughSharesError, \ DownloadStopped, MDMF_VERSION, SDMF_VERSION from allmydata.util.assertutil import _assert, precondition from allmydata.util import hashutil, log, mathutil, deferredutil from allmydata.util.dictutil import DictOfSets from allmydata.util.cputhreadpool import defer_to_thread from allmydata import hashtree, codec from allmydata.storage.server import si_b2a from allmydata.mutable.common import CorruptShareError, BadShareError, \ UncoordinatedWriteError, decrypt_privkey from allmydata.mutable.layout import MDMFSlotReadProxy @implementer(IRetrieveStatus) class RetrieveStatus(object): statusid_counter = count(0) def __init__(self): self.timings = {} self.timings["fetch_per_server"] = {} self.timings["decode"] = 0.0 self.timings["decrypt"] = 0.0 self.timings["cumulative_verify"] = 0.0 self._problems = {} self.active = True self.storage_index = None self.helper = False self.encoding = ("?","?") self.size = None self.status = "Not started" self.progress = 0.0 self.counter = next(self.statusid_counter) self.started = time.time() def get_started(self): return self.started def get_storage_index(self): return self.storage_index def get_encoding(self): return self.encoding def using_helper(self): return self.helper def get_size(self): return self.size def get_status(self): return self.status def get_progress(self): return self.progress def get_active(self): return self.active def get_counter(self): return self.counter def get_problems(self): return self._problems def add_fetch_timing(self, server, elapsed): if server not in self.timings["fetch_per_server"]: self.timings["fetch_per_server"][server] = [] self.timings["fetch_per_server"][server].append(elapsed) def accumulate_decode_time(self, elapsed): self.timings["decode"] += elapsed def accumulate_decrypt_time(self, elapsed): self.timings["decrypt"] += elapsed def set_storage_index(self, si): self.storage_index = si def set_helper(self, helper): self.helper = helper def set_encoding(self, k, n): self.encoding = (k, n) def set_size(self, size): self.size = size def set_status(self, status): self.status = status def set_progress(self, value): self.progress = value def set_active(self, value): self.active = value def add_problem(self, server, f): serverid = server.get_serverid() self._problems[serverid] = f class Marker(object): pass @implementer(IPushProducer) class Retrieve(object): # this class is currently single-use. Eventually (in MDMF) we will make # it multi-use, in which case you can call download(range) multiple # times, and each will have a separate response chain. However the # Retrieve object will remain tied to a specific version of the file, and # will use a single ServerMap instance. def __init__(self, filenode, storage_broker, servermap, verinfo, fetch_privkey=False, verify=False): self._node = filenode _assert(self._node.get_pubkey()) self._storage_broker = storage_broker self._storage_index = filenode.get_storage_index() _assert(self._node.get_readkey()) self._last_failure = None prefix = si_b2a(self._storage_index)[:5] self._log_number = log.msg("Retrieve(%r): starting" % prefix) self._running = True self._decoding = False self._bad_shares = set() self.servermap = servermap self.verinfo = verinfo # TODO: make it possible to use self.verinfo.datalength instead (seqnum, root_hash, IV, segsize, datalength, k, N, prefix, offsets_tuple) = self.verinfo self._data_length = datalength # during repair, we may be called upon to grab the private key, since # it wasn't picked up during a verify=False checker run, and we'll # need it for repair to generate a new version. self._need_privkey = verify or (fetch_privkey and not self._node.get_privkey()) if self._need_privkey: # TODO: Evaluate the need for this. We'll use it if we want # to limit how many queries are on the wire for the privkey # at once. self._privkey_query_markers = [] # one Marker for each time we've # tried to get the privkey. # verify means that we are using the downloader logic to verify all # of our shares. This tells the downloader a few things. # # 1. We need to download all of the shares. # 2. We don't need to decode or decrypt the shares, since our # caller doesn't care about the plaintext, only the # information about which shares are or are not valid. # 3. When we are validating readers, we need to validate the # signature on the prefix. Do we? We already do this in the # servermap update? self._verify = verify self._status = RetrieveStatus() self._status.set_storage_index(self._storage_index) self._status.set_helper(False) self._status.set_progress(0.0) self._status.set_active(True) self._status.set_size(datalength) self._status.set_encoding(k, N) self.readers = {} self._stopped = False self._pause_deferred = None self._offset = None self._read_length = None self.log("got seqnum %d" % self.verinfo[0]) def get_status(self): return self._status def log(self, *args, **kwargs): if "parent" not in kwargs: kwargs["parent"] = self._log_number if "facility" not in kwargs: kwargs["facility"] = "tahoe.mutable.retrieve" return log.msg(*args, **kwargs) def _set_current_status(self, state): seg = "%d/%d" % (self._current_segment, self._last_segment) self._status.set_status("segment %s (%s)" % (seg, state)) ################### # IPushProducer def pauseProducing(self): """ I am called by my download target if we have produced too much data for it to handle. I make the downloader stop producing new data until my resumeProducing method is called. """ if self._pause_deferred is not None: return # fired when the download is unpaused. self._old_status = self._status.get_status() self._set_current_status("paused") self._pause_deferred = defer.Deferred() def resumeProducing(self): """ I am called by my download target once it is ready to begin receiving data again. """ if self._pause_deferred is None: return p = self._pause_deferred self._pause_deferred = None self._status.set_status(self._old_status) eventually(p.callback, None) def stopProducing(self): self._stopped = True self.resumeProducing() def _check_for_paused(self, res): """ I am called just before a write to the consumer. I return a Deferred that eventually fires with the data that is to be written to the consumer. If the download has not been paused, the Deferred fires immediately. Otherwise, the Deferred fires when the downloader is unpaused. """ if self._pause_deferred is not None: d = defer.Deferred() self._pause_deferred.addCallback(lambda ignored: d.callback(res)) return d return res def _check_for_stopped(self, res): if self._stopped: raise DownloadStopped("our Consumer called stopProducing()") return res def download(self, consumer=None, offset=0, size=None): precondition(self._verify or IConsumer.providedBy(consumer)) if size is None: size = self._data_length - offset if self._verify: _assert(size == self._data_length, (size, self._data_length)) self.log("starting download") self._done_deferred = defer.Deferred() if consumer: self._consumer = consumer # we provide IPushProducer, so streaming=True, per IConsumer. self._consumer.registerProducer(self, streaming=True) self._started = time.time() self._started_fetching = time.time() if size == 0: # short-circuit the rest of the process self._done() else: self._start_download(consumer, offset, size) return self._done_deferred def _start_download(self, consumer, offset, size): precondition((0 <= offset < self._data_length) and (size > 0) and (offset+size <= self._data_length), (offset, size, self._data_length)) self._offset = offset self._read_length = size self._setup_encoding_parameters() self._setup_download() # The download process beyond this is a state machine. # _add_active_servers will select the servers that we want to use # for the download, and then attempt to start downloading. After # each segment, it will check for doneness, reacting to broken # servers and corrupt shares as necessary. If it runs out of good # servers before downloading all of the segments, _done_deferred # will errback. Otherwise, it will eventually callback with the # contents of the mutable file. self.loop() def loop(self): d = fireEventually(None) # avoid #237 recursion limit problem d.addCallback(lambda ign: self._activate_enough_servers()) d.addCallback(lambda ign: self._download_current_segment()) # when we're done, _download_current_segment will call _done. If we # aren't, it will call loop() again. d.addErrback(self._error) def _setup_download(self): self._status.set_status("Retrieving Shares") # how many shares do we need? (seqnum, root_hash, IV, segsize, datalength, k, N, prefix, offsets_tuple) = self.verinfo # first, which servers can we use? versionmap = self.servermap.make_versionmap() shares = versionmap[self.verinfo] # this sharemap is consumed as we decide to send requests self.remaining_sharemap = DictOfSets() for (shnum, server, timestamp) in shares: self.remaining_sharemap.add(shnum, server) # Reuse the SlotReader from the servermap. key = (self.verinfo, server.get_serverid(), self._storage_index, shnum) if key in self.servermap.proxies: reader = self.servermap.proxies[key] else: reader = MDMFSlotReadProxy(server.get_storage_server(), self._storage_index, shnum, None) reader.server = server self.readers[shnum] = reader if len(self.remaining_sharemap) < k: self._raise_notenoughshareserror() self.shares = {} # maps shnum to validated blocks self._active_readers = [] # list of active readers for this dl. self._block_hash_trees = {} # shnum => hashtree for i in range(self._total_shares): # So we don't have to do this later. self._block_hash_trees[i] = hashtree.IncompleteHashTree(self._num_segments) # We need one share hash tree for the entire file; its leaves # are the roots of the block hash trees for the shares that # comprise it, and its root is in the verinfo. self.share_hash_tree = hashtree.IncompleteHashTree(N) self.share_hash_tree.set_hashes({0: root_hash}) def decode(self, blocks_and_salts, segnum): """ I am a helper method that the mutable file update process uses as a shortcut to decode and decrypt the segments that it needs to fetch in order to perform a file update. I take in a collection of blocks and salts, and pick some of those to make a segment with. I return the plaintext associated with that segment. """ # We don't need the block hash trees in this case. self._block_hash_trees = None self._offset = 0 self._read_length = self._data_length self._setup_encoding_parameters() # _decode_blocks() expects the output of a gatherResults that # contains the outputs of _validate_block() (each of which is a dict # mapping shnum to (block,salt) bytestrings). d = self._decode_blocks([blocks_and_salts], segnum) d.addCallback(self._decrypt_segment) return d def _setup_encoding_parameters(self): """ I set up the encoding parameters, including k, n, the number of segments associated with this file, and the segment decoders. """ (seqnum, root_hash, IV, segsize, datalength, k, n, known_prefix, offsets_tuple) = self.verinfo self._required_shares = k self._total_shares = n self._segment_size = segsize #self._data_length = datalength # set during __init__() if not IV: self._version = MDMF_VERSION else: self._version = SDMF_VERSION if datalength and segsize: self._num_segments = mathutil.div_ceil(datalength, segsize) self._tail_data_size = datalength % segsize else: self._num_segments = 0 self._tail_data_size = 0 self._segment_decoder = codec.CRSDecoder() self._segment_decoder.set_params(segsize, k, n) if not self._tail_data_size: self._tail_data_size = segsize self._tail_segment_size = mathutil.next_multiple(self._tail_data_size, self._required_shares) if self._tail_segment_size == self._segment_size: self._tail_decoder = self._segment_decoder else: self._tail_decoder = codec.CRSDecoder() self._tail_decoder.set_params(self._tail_segment_size, self._required_shares, self._total_shares) self.log("got encoding parameters: " "k: %d " "n: %d " "%d segments of %d bytes each (%d byte tail segment)" % \ (k, n, self._num_segments, self._segment_size, self._tail_segment_size)) # Our last task is to tell the downloader where to start and # where to stop. We use three parameters for that: # - self._start_segment: the segment that we need to start # downloading from. # - self._current_segment: the next segment that we need to # download. # - self._last_segment: The last segment that we were asked to # download. # # We say that the download is complete when # self._current_segment > self._last_segment. We use # self._start_segment and self._last_segment to know when to # strip things off of segments, and how much to strip. if self._offset: self.log("got offset: %d" % self._offset) # our start segment is the first segment containing the # offset we were given. start = self._offset // self._segment_size _assert(start <= self._num_segments, start=start, num_segments=self._num_segments, offset=self._offset, segment_size=self._segment_size) self._start_segment = start self.log("got start segment: %d" % self._start_segment) else: self._start_segment = 0 # We might want to read only part of the file, and need to figure out # where to stop reading. Our end segment is the last segment # containing part of the segment that we were asked to read. _assert(self._read_length > 0, self._read_length) end_data = self._offset + self._read_length # We don't actually need to read the byte at end_data, but the one # before it. end = (end_data - 1) // self._segment_size _assert(0 <= end < self._num_segments, end=end, num_segments=self._num_segments, end_data=end_data, offset=self._offset, read_length=self._read_length, segment_size=self._segment_size) self._last_segment = end self.log("got end segment: %d" % self._last_segment) self._current_segment = self._start_segment def _activate_enough_servers(self): """ I populate self._active_readers with enough active readers to retrieve the contents of this mutable file. I am called before downloading starts, and (eventually) after each validation error, connection error, or other problem in the download. """ # TODO: It would be cool to investigate other heuristics for # reader selection. For instance, the cost (in time the user # spends waiting for their file) of selecting a really slow server # that happens to have a primary share is probably more than # selecting a really fast server that doesn't have a primary # share. Maybe the servermap could be extended to provide this # information; it could keep track of latency information while # it gathers more important data, and then this routine could # use that to select active readers. # # (these and other questions would be easier to answer with a # robust, configurable tahoe-lafs simulator, which modeled node # failures, differences in node speed, and other characteristics # that we expect storage servers to have. You could have # presets for really stable grids (like allmydata.com), # friendnets, make it easy to configure your own settings, and # then simulate the effect of big changes on these use cases # instead of just reasoning about what the effect might be. Out # of scope for MDMF, though.) # XXX: Why don't format= log messages work here? known_shnums = set(self.remaining_sharemap.keys()) used_shnums = set([r.shnum for r in self._active_readers]) unused_shnums = known_shnums - used_shnums if self._verify: new_shnums = unused_shnums # use them all elif len(self._active_readers) < self._required_shares: # need more shares more = self._required_shares - len(self._active_readers) # We favor lower numbered shares, since FEC is faster with # primary shares than with other shares, and lower-numbered # shares are more likely to be primary than higher numbered # shares. new_shnums = sorted(unused_shnums)[:more] if len(new_shnums) < more: # We don't have enough readers to retrieve the file; fail. self._raise_notenoughshareserror() else: new_shnums = [] self.log("adding %d new servers to the active list" % len(new_shnums)) for shnum in new_shnums: reader = self.readers[shnum] self._active_readers.append(reader) self.log("added reader for share %d" % shnum) # Each time we add a reader, we check to see if we need the # private key. If we do, we politely ask for it and then continue # computing. If we find that we haven't gotten it at the end of # segment decoding, then we'll take more drastic measures. if self._need_privkey and not self._node.is_readonly(): d = reader.get_encprivkey() d.addCallback(self._try_to_validate_privkey, reader, reader.server) # XXX: don't just drop the Deferred. We need error-reporting # but not flow-control here. def _try_to_validate_prefix(self, prefix, reader): """ I check that the prefix returned by a candidate server for retrieval matches the prefix that the servermap knows about (and, hence, the prefix that was validated earlier). If it does, I return True, which means that I approve of the use of the candidate server for segment retrieval. If it doesn't, I return False, which means that another server must be chosen. """ (seqnum, root_hash, IV, segsize, datalength, k, N, known_prefix, offsets_tuple) = self.verinfo if known_prefix != prefix: self.log("prefix from share %d doesn't match" % reader.shnum) raise UncoordinatedWriteError("Mismatched prefix -- this could " "indicate an uncoordinated write") # Otherwise, we're okay -- no issues. def _mark_bad_share(self, server, shnum, reader, f): """ I mark the given (server, shnum) as a bad share, which means that it will not be used anywhere else. There are several reasons to want to mark something as a bad share. These include: - A connection error to the server. - A mismatched prefix (that is, a prefix that does not match our local conception of the version information string). - A failing block hash, salt hash, share hash, or other integrity check. This method will ensure that readers that we wish to mark bad (for these reasons or other reasons) are not used for the rest of the download. Additionally, it will attempt to tell the remote server (with no guarantee of success) that its share is corrupt. """ self.log("marking share %d on server %r as bad" % \ (shnum, server.get_name())) prefix = self.verinfo[-2] self.servermap.mark_bad_share(server, shnum, prefix) self._bad_shares.add((server, shnum, f)) self._status.add_problem(server, f) self._last_failure = f # Remove the reader from _active_readers self._active_readers.remove(reader) for shnum in list(self.remaining_sharemap.keys()): self.remaining_sharemap.discard(shnum, reader.server) if f.check(BadShareError): self.notify_server_corruption(server, shnum, str(f.value)) def _download_current_segment(self): """ I download, validate, decode, decrypt, and assemble the segment that this Retrieve is currently responsible for downloading. """ if self._current_segment > self._last_segment: # No more segments to download, we're done. self.log("got plaintext, done") return self._done() elif self._verify and len(self._active_readers) == 0: self.log("no more good shares, no need to keep verifying") return self._done() self.log("on segment %d of %d" % (self._current_segment + 1, self._num_segments)) d = self._process_segment(self._current_segment) d.addCallback(lambda ign: self.loop()) return d def _process_segment(self, segnum): """ I download, validate, decode, and decrypt one segment of the file that this Retrieve is retrieving. This means coordinating the process of getting k blocks of that file, validating them, assembling them into one segment with the decoder, and then decrypting them. """ self.log("processing segment %d" % segnum) # TODO: The old code uses a marker. Should this code do that # too? What did the Marker do? # We need to ask each of our active readers for its block and # salt. We will then validate those. If validation is # successful, we will assemble the results into plaintext. ds = [] for reader in self._active_readers: started = time.time() d1 = reader.get_block_and_salt(segnum) d2,d3 = self._get_needed_hashes(reader, segnum) d = deferredutil.gatherResults([d1,d2,d3]) d.addCallback(self._validate_block, segnum, reader, reader.server, started) # _handle_bad_share takes care of recoverable errors (by dropping # that share and returning None). Any other errors (i.e. code # bugs) are passed through and cause the retrieve to fail. d.addErrback(self._handle_bad_share, [reader]) ds.append(d) dl = deferredutil.gatherResults(ds) if self._verify: dl.addCallback(lambda ignored: "") dl.addCallback(self._set_segment) else: dl.addCallback(self._maybe_decode_and_decrypt_segment, segnum) return dl def _maybe_decode_and_decrypt_segment(self, results, segnum): """ I take the results of fetching and validating the blocks from _process_segment. If validation and fetching succeeded without incident, I will proceed with decoding and decryption. Otherwise, I will do nothing. """ self.log("trying to decode and decrypt segment %d" % segnum) # 'results' is the output of a gatherResults set up in # _process_segment(). Each component Deferred will either contain the # non-Failure output of _validate_block() for a single block (i.e. # {segnum:(block,salt)}), or None if _validate_block threw an # exception and _validation_or_decoding_failed handled it (by # dropping that server). if None in results: self.log("some validation operations failed; not proceeding") return defer.succeed(None) self.log("everything looks ok, building segment %d" % segnum) d = self._decode_blocks(results, segnum) d.addCallback(self._decrypt_segment) # check to see whether we've been paused before writing # anything. d.addCallback(self._check_for_paused) d.addCallback(self._check_for_stopped) d.addCallback(self._set_segment) return d def _set_segment(self, segment): """ Given a plaintext segment, I register that segment with the target that is handling the file download. """ self.log("got plaintext for segment %d" % self._current_segment) if self._read_length == 0: self.log("on first+last segment, size=0, using 0 bytes") segment = b"" if self._current_segment == self._last_segment: # trim off the tail wanted = (self._offset + self._read_length) % self._segment_size if wanted != 0: self.log("on the last segment: using first %d bytes" % wanted) segment = segment[:wanted] else: self.log("on the last segment: using all %d bytes" % len(segment)) if self._current_segment == self._start_segment: # Trim off the head, if offset != 0. This should also work if # start==last, because we trim the tail first. skip = self._offset % self._segment_size self.log("on the first segment: skipping first %d bytes" % skip) segment = segment[skip:] if not self._verify: self._consumer.write(segment) else: # we don't care about the plaintext if we are doing a verify. segment = None self._current_segment += 1 def _handle_bad_share(self, f, readers): """ I am called when a block or a salt fails to correctly validate, or when the decryption or decoding operation fails for some reason. I react to this failure by notifying the remote server of corruption, and then removing the remote server from further activity. """ # these are the errors we can tolerate: by giving up on this share # and finding others to replace it. Any other errors (i.e. coding # bugs) are re-raised, causing the download to fail. f.trap(DeadReferenceError, RemoteException, BadShareError) # DeadReferenceError happens when we try to fetch data from a server # that has gone away. RemoteException happens if the server had an # internal error. BadShareError encompasses: (UnknownVersionError, # LayoutInvalid, struct.error) which happen when we get obviously # wrong data, and CorruptShareError which happens later, when we # perform integrity checks on the data. precondition(isinstance(readers, list), readers) bad_shnums = [reader.shnum for reader in readers] self.log("validation or decoding failed on share(s) %s, server(s) %s " ", segment %d: %s" % \ (bad_shnums, readers, self._current_segment, str(f))) for reader in readers: self._mark_bad_share(reader.server, reader.shnum, reader, f) return None @deferredutil.async_to_deferred async def _validate_block(self, results, segnum, reader, server, started): """ I validate a block from one share on a remote server. """ # Grab the part of the block hash tree that is necessary to # validate this block, then generate the block hash root. self.log("validating share %d for segment %d" % (reader.shnum, segnum)) elapsed = time.time() - started self._status.add_fetch_timing(server, elapsed) self._set_current_status("validating blocks") block_and_salt, blockhashes, sharehashes = results block, salt = block_and_salt _assert(isinstance(block, bytes), (block, salt)) blockhashes = dict(enumerate(blockhashes)) self.log("the reader gave me the following blockhashes: %s" % \ list(blockhashes.keys())) self.log("the reader gave me the following sharehashes: %s" % \ list(sharehashes.keys())) bht = self._block_hash_trees[reader.shnum] if bht.needed_hashes(segnum, include_leaf=True): try: bht.set_hashes(blockhashes) except (hashtree.BadHashError, hashtree.NotEnoughHashesError, \ IndexError) as e: raise CorruptShareError(server, reader.shnum, "block hash tree failure: %s" % e) if self._version == MDMF_VERSION: blockhash = await defer_to_thread(hashutil.block_hash, salt + block) else: blockhash = await defer_to_thread(hashutil.block_hash, block) # If this works without an error, then validation is # successful. try: bht.set_hashes(leaves={segnum: blockhash}) except (hashtree.BadHashError, hashtree.NotEnoughHashesError, \ IndexError) as e: raise CorruptShareError(server, reader.shnum, "block hash tree failure: %s" % e) # Reaching this point means that we know that this segment # is correct. Now we need to check to see whether the share # hash chain is also correct. # SDMF wrote share hash chains that didn't contain the # leaves, which would be produced from the block hash tree. # So we need to validate the block hash tree first. If # successful, then bht[0] will contain the root for the # shnum, which will be a leaf in the share hash tree, which # will allow us to validate the rest of the tree. try: self.share_hash_tree.set_hashes(hashes=sharehashes, leaves={reader.shnum: bht[0]}) except (hashtree.BadHashError, hashtree.NotEnoughHashesError, \ IndexError) as e: raise CorruptShareError(server, reader.shnum, "corrupt hashes: %s" % e) self.log('share %d is valid for segment %d' % (reader.shnum, segnum)) return {reader.shnum: (block, salt)} def _get_needed_hashes(self, reader, segnum): """ I get the hashes needed to validate segnum from the reader, then return to my caller when this is done. """ bht = self._block_hash_trees[reader.shnum] needed = bht.needed_hashes(segnum, include_leaf=True) # The root of the block hash tree is also a leaf in the share # hash tree. So we don't need to fetch it from the remote # server. In the case of files with one segment, this means that # we won't fetch any block hash tree from the remote server, # since the hash of each share of the file is the entire block # hash tree, and is a leaf in the share hash tree. This is fine, # since any share corruption will be detected in the share hash # tree. #needed.discard(0) self.log("getting blockhashes for segment %d, share %d: %s" % \ (segnum, reader.shnum, str(needed))) # TODO is force_remote necessary here? d1 = reader.get_blockhashes(needed, force_remote=False) if self.share_hash_tree.needed_hashes(reader.shnum): need = self.share_hash_tree.needed_hashes(reader.shnum) self.log("also need sharehashes for share %d: %s" % (reader.shnum, str(need))) d2 = reader.get_sharehashes(need, force_remote=False) else: d2 = defer.succeed({}) # the logic in the next method # expects a dict return d1,d2 def _decode_blocks(self, results, segnum): """ I take a list of k blocks and salts, and decode that into a single encrypted segment. """ # 'results' is one or more dicts (each {shnum:(block,salt)}), and we # want to merge them all blocks_and_salts = {} for d in results: blocks_and_salts.update(d) # All of these blocks should have the same salt; in SDMF, it is # the file-wide IV, while in MDMF it is the per-segment salt. In # either case, we just need to get one of them and use it. # # d.items()[0] is like (shnum, (block, salt)) # d.items()[0][1] is like (block, salt) # d.items()[0][1][1] is the salt. salt = list(blocks_and_salts.items())[0][1][1] # Next, extract just the blocks from the dict. We'll use the # salt in the next step. share_and_shareids = [(k, v[0]) for k, v in blocks_and_salts.items()] d2 = dict(share_and_shareids) shareids = [] shares = [] for shareid, share in d2.items(): shareids.append(shareid) shares.append(share) self._set_current_status("decoding") started = time.time() _assert(len(shareids) >= self._required_shares, len(shareids)) # zfec really doesn't want extra shares shareids = shareids[:self._required_shares] shares = shares[:self._required_shares] self.log("decoding segment %d" % segnum) if segnum == self._num_segments - 1: d = self._tail_decoder.decode(shares, shareids) else: d = self._segment_decoder.decode(shares, shareids) # For larger shares, this can take a few milliseconds. As such, we want # to unblock the event loop. In newer Python b"".join() will release # the GIL: https://github.com/python/cpython/issues/80232 @deferredutil.async_to_deferred async def _got_buffers(buffers): return await defer_to_thread(lambda: b"".join(buffers)) d.addCallback(_got_buffers) def _process(segment): self.log(format="now decoding segment %(segnum)s of %(numsegs)s", segnum=segnum, numsegs=self._num_segments, level=log.NOISY) self.log(" joined length %d, datalength %d" % (len(segment), self._data_length)) if segnum == self._num_segments - 1: size_to_use = self._tail_data_size else: size_to_use = self._segment_size segment = segment[:size_to_use] self.log(" segment len=%d" % len(segment)) self._status.accumulate_decode_time(time.time() - started) return segment, salt d.addCallback(_process) return d @deferredutil.async_to_deferred async def _decrypt_segment(self, segment_and_salt): """ I take a single segment and its salt, and decrypt it. I return the plaintext of the segment that is in my argument. """ segment, salt = segment_and_salt self._set_current_status("decrypting") self.log("decrypting segment %d" % self._current_segment) started = time.time() readkey = self._node.get_readkey() def decrypt(): key = hashutil.ssk_readkey_data_hash(salt, readkey) decryptor = aes.create_decryptor(key) return aes.decrypt_data(decryptor, segment) plaintext = await defer_to_thread(decrypt) self._status.accumulate_decrypt_time(time.time() - started) return plaintext def notify_server_corruption(self, server, shnum, reason): if isinstance(reason, str): reason = reason.encode("utf-8") storage_server = server.get_storage_server() storage_server.advise_corrupt_share( b"mutable", self._storage_index, shnum, reason, ) @deferredutil.async_to_deferred async def _try_to_validate_privkey(self, enc_privkey, reader, server): node_writekey = self._node.get_writekey() def get_privkey(): alleged_privkey_s = decrypt_privkey(node_writekey, enc_privkey) alleged_writekey = hashutil.ssk_writekey_hash(alleged_privkey_s) if alleged_writekey != node_writekey: return None privkey, _ = rsa.create_signing_keypair_from_string(alleged_privkey_s) return privkey privkey = await defer_to_thread(get_privkey) if privkey is None: self.log("invalid privkey from %s shnum %d" % (reader, reader.shnum), level=log.WEIRD, umid="YIw4tA") if self._verify: self.servermap.mark_bad_share(server, reader.shnum, self.verinfo[-2]) e = CorruptShareError(server, reader.shnum, "invalid privkey") f = failure.Failure(e) self._bad_shares.add((server, reader.shnum, f)) return # it's good self.log("got valid privkey from shnum %d on reader %s" % (reader.shnum, reader)) self._node._populate_encprivkey(enc_privkey) self._node._populate_privkey(privkey) self._need_privkey = False def _done(self): """ I am called by _download_current_segment when the download process has finished successfully. After making some useful logging statements, I return the decrypted contents to the owner of this Retrieve object through self._done_deferred. """ self._running = False self._status.set_active(False) now = time.time() self._status.timings['total'] = now - self._started self._status.timings['fetch'] = now - self._started_fetching self._status.set_status("Finished") self._status.set_progress(1.0) # remember the encoding parameters, use them again next time (seqnum, root_hash, IV, segsize, datalength, k, N, prefix, offsets_tuple) = self.verinfo self._node._populate_required_shares(k) self._node._populate_total_shares(N) if self._verify: ret = self._bad_shares self.log("done verifying, found %d bad shares" % len(ret)) else: # TODO: upload status here? ret = self._consumer self._consumer.unregisterProducer() eventually(self._done_deferred.callback, ret) def _raise_notenoughshareserror(self): """ I am called when there are not enough active servers left to complete the download. After making some useful logging statements, I throw an exception to that effect to the caller of this Retrieve object through self._done_deferred. """ format = ("ran out of servers: " "have %(have)d of %(total)d segments; " "found %(bad)d bad shares; " "have %(remaining)d remaining shares of the right version; " "encoding %(k)d-of-%(n)d") args = {"have": self._current_segment, "total": self._num_segments, "need": self._last_segment, "k": self._required_shares, "n": self._total_shares, "bad": len(self._bad_shares), "remaining": len(self.remaining_sharemap), } raise NotEnoughSharesError("%s, last failure: %s" % (format % args, str(self._last_failure))) def _error(self, f): # all errors, including NotEnoughSharesError, land here self._running = False self._status.set_active(False) now = time.time() self._status.timings['total'] = now - self._started self._status.timings['fetch'] = now - self._started_fetching self._status.set_status("Failed") eventually(self._done_deferred.errback, f) tahoe_lafs-1.20.0/src/allmydata/mutable/servermap.py0000644000000000000000000015013513615410400017372 0ustar00""" Ported to Python 3. """ from __future__ import annotations from six import ensure_str import sys, time, copy from zope.interface import implementer from itertools import count from collections import defaultdict from twisted.internet import defer from twisted.python import failure from foolscap.api import DeadReferenceError, RemoteException, eventually, \ fireEventually from allmydata.crypto.error import BadSignature from allmydata.crypto import rsa from allmydata.util import base32, hashutil, log, deferredutil from allmydata.util.dictutil import DictOfSets from allmydata.storage.server import si_b2a from allmydata.interfaces import IServermapUpdaterStatus from allmydata.mutable.common import MODE_CHECK, MODE_ANYTHING, MODE_WRITE, \ MODE_READ, MODE_REPAIR, CorruptShareError, decrypt_privkey from allmydata.mutable.layout import SIGNED_PREFIX_LENGTH, MDMFSlotReadProxy @implementer(IServermapUpdaterStatus) class UpdateStatus(object): statusid_counter = count(0) def __init__(self): self.timings = {} self.timings["per_server"] = defaultdict(list) self.timings["cumulative_verify"] = 0.0 self.privkey_from = None self.problems = {} self.active = True self.storage_index = None self.mode = "?" self.status = "Not started" self.progress = 0.0 self.counter = next(self.statusid_counter) self.started = time.time() self.finished = None def add_per_server_time(self, server, op, sent, elapsed): assert op in ("query", "late", "privkey") self.timings["per_server"][server].append((op,sent,elapsed)) def get_started(self): return self.started def get_finished(self): return self.finished def get_storage_index(self): return self.storage_index def get_mode(self): return self.mode def get_servermap(self): return self.servermap def get_privkey_from(self): return self.privkey_from def using_helper(self): return False def get_size(self): return "-NA-" def get_status(self): return self.status def get_progress(self): return self.progress def get_active(self): return self.active def get_counter(self): return self.counter def set_storage_index(self, si): self.storage_index = si def set_mode(self, mode): self.mode = mode def set_privkey_from(self, server): self.privkey_from = server def set_status(self, status): self.status = status def set_progress(self, value): self.progress = value def set_active(self, value): self.active = value def set_finished(self, when): self.finished = when class ServerMap(object): """I record the placement of mutable shares. This object records which shares (of various versions) are located on which servers. One purpose I serve is to inform callers about which versions of the mutable file are recoverable and 'current'. A second purpose is to serve as a state marker for test-and-set operations. I am passed out of retrieval operations and back into publish operations, which means 'publish this new version, but only if nothing has changed since I last retrieved this data'. This reduces the chances of clobbering a simultaneous (uncoordinated) write. @var _known_shares: a dictionary, mapping a (server, shnum) tuple to a (versionid, timestamp) tuple. Each 'versionid' is a tuple of (seqnum, root_hash, IV, segsize, datalength, k, N, signed_prefix, offsets) @ivar _bad_shares: dict with keys of (server, shnum) tuples, describing shares that I should ignore (because a previous user of the servermap determined that they were invalid). The updater only locates a certain number of shares: if some of these turn out to have integrity problems and are unusable, the caller will need to mark those shares as bad, then re-update the servermap, then try again. The dict maps (server, shnum) tuple to old checkstring. """ def __init__(self): self._known_shares = {} self.unreachable_servers = set() # servers that didn't respond to queries self.reachable_servers = set() # servers that did respond to queries self._problems = [] # mostly for debugging self._bad_shares = {} # maps (server,shnum) to old checkstring self._last_update_mode = None self._last_update_time = 0 self.proxies = {} self.update_data = {} # shnum -> [(verinfo,(blockhashes,start,end)),..] # where blockhashes is a list of bytestrings (the result of # layout.MDMFSlotReadProxy.get_blockhashes), and start/end are both # (block,salt) tuple-of-bytestrings from get_block_and_salt() def copy(self): s = ServerMap() s._known_shares = self._known_shares.copy() # tuple->tuple s.unreachable_servers = set(self.unreachable_servers) s.reachable_servers = set(self.reachable_servers) s._problems = self._problems[:] s._bad_shares = self._bad_shares.copy() # tuple->str s._last_update_mode = self._last_update_mode s._last_update_time = self._last_update_time s.update_data = copy.deepcopy(self.update_data) return s def get_reachable_servers(self): return self.reachable_servers def mark_server_reachable(self, server): self.reachable_servers.add(server) def mark_server_unreachable(self, server): self.unreachable_servers.add(server) def mark_bad_share(self, server, shnum, checkstring): """This share was found to be bad, either in the checkstring or signature (detected during mapupdate), or deeper in the share (detected at retrieve time). Remove it from our list of useful shares, and remember that it is bad so we don't add it back again later. We record the share's old checkstring (which might be corrupted or badly signed) so that a repair operation can do the test-and-set using it as a reference. """ assert isinstance(checkstring, bytes) key = (server, shnum) # record checkstring self._bad_shares[key] = checkstring self._known_shares.pop(key, None) def get_bad_shares(self): # key=(server,shnum) -> checkstring return self._bad_shares def add_new_share(self, server, shnum, verinfo, timestamp): """We've written a new share out, replacing any that was there before.""" key = (server, shnum) self._bad_shares.pop(key, None) self._known_shares[key] = (verinfo, timestamp) def add_problem(self, f): self._problems.append(f) def get_problems(self): return self._problems def set_last_update(self, mode, when): self._last_update_mode = mode self._last_update_time = when def get_last_update(self): return (self._last_update_mode, self._last_update_time) def dump(self, out=sys.stdout): print("servermap:", file=out) for ( (server, shnum), (verinfo, timestamp) ) in list(self._known_shares.items()): (seqnum, root_hash, IV, segsize, datalength, k, N, prefix, offsets_tuple) = verinfo print("[%s]: sh#%d seq%d-%s %d-of-%d len%d" % (str(server.get_name(), "utf-8"), shnum, seqnum, str(base32.b2a(root_hash)[:4], "utf-8"), k, N, datalength), file=out) if self._problems: print("%d PROBLEMS" % len(self._problems), file=out) for f in self._problems: print(str(f), file=out) return out def all_servers(self): return set([server for (server, shnum) in self._known_shares]) def all_servers_for_version(self, verinfo): """Return a set of servers that hold shares for the given version.""" return set([server for ( (server, shnum), (verinfo2, timestamp) ) in self._known_shares.items() if verinfo == verinfo2]) def get_known_shares(self): # maps (server,shnum) to (versionid,timestamp) return self._known_shares def make_sharemap(self): """Return a dict that maps shnum to a set of servers that hold it.""" sharemap = DictOfSets() for (server, shnum) in self._known_shares: sharemap.add(shnum, server) return sharemap def make_versionmap(self): """Return a dict that maps versionid to sets of (shnum, server, timestamp) tuples.""" versionmap = DictOfSets() for ( (server, shnum), (verinfo, timestamp) ) in list(self._known_shares.items()): versionmap.add(verinfo, (shnum, server, timestamp)) return versionmap def debug_shares_on_server(self, server): # used by tests return set([shnum for (s, shnum) in self._known_shares if s == server]) def version_on_server(self, server, shnum): key = (server, shnum) if key in self._known_shares: (verinfo, timestamp) = self._known_shares[key] return verinfo return None def shares_available(self): """Return a dict that maps verinfo to tuples of (num_distinct_shares, k, N) tuples.""" versionmap = self.make_versionmap() all_shares = {} for verinfo, shares in list(versionmap.items()): s = set() for (shnum, server, timestamp) in shares: s.add(shnum) (seqnum, root_hash, IV, segsize, datalength, k, N, prefix, offsets_tuple) = verinfo all_shares[verinfo] = (len(s), k, N) return all_shares def highest_seqnum(self): available = self.shares_available() seqnums = [verinfo[0] for verinfo in available.keys()] seqnums.append(0) return max(seqnums) def summarize_version(self, verinfo): """Take a versionid, return a string that describes it.""" (seqnum, root_hash, IV, segsize, datalength, k, N, prefix, offsets_tuple) = verinfo return "seq%d-%s" % (seqnum, str(base32.b2a(root_hash)[:4], "utf-8")) def summarize_versions(self): """Return a string describing which versions we know about.""" versionmap = self.make_versionmap() bits = [] for (verinfo, shares) in list(versionmap.items()): vstr = self.summarize_version(verinfo) shnums = set([shnum for (shnum, server, timestamp) in shares]) bits.append("%d*%s" % (len(shnums), vstr)) return "/".join(bits) def recoverable_versions(self): """Return a set of versionids, one for each version that is currently recoverable.""" versionmap = self.make_versionmap() recoverable_versions = set() for (verinfo, shares) in list(versionmap.items()): (seqnum, root_hash, IV, segsize, datalength, k, N, prefix, offsets_tuple) = verinfo shnums = set([shnum for (shnum, server, timestamp) in shares]) if len(shnums) >= k: # this one is recoverable recoverable_versions.add(verinfo) return recoverable_versions def unrecoverable_versions(self): """Return a set of versionids, one for each version that is currently unrecoverable.""" versionmap = self.make_versionmap() unrecoverable_versions = set() for (verinfo, shares) in list(versionmap.items()): (seqnum, root_hash, IV, segsize, datalength, k, N, prefix, offsets_tuple) = verinfo shnums = set([shnum for (shnum, server, timestamp) in shares]) if len(shnums) < k: unrecoverable_versions.add(verinfo) return unrecoverable_versions def best_recoverable_version(self): """Return a single versionid, for the so-called 'best' recoverable version. Sequence number is the primary sort criteria, followed by root hash. Returns None if there are no recoverable versions.""" recoverable = list(self.recoverable_versions()) recoverable.sort() if recoverable: return recoverable[-1] return None def size_of_version(self, verinfo): """Given a versionid (perhaps returned by best_recoverable_version), return the size of the file in bytes.""" (seqnum, root_hash, IV, segsize, datalength, k, N, prefix, offsets_tuple) = verinfo return datalength def unrecoverable_newer_versions(self): # Return a dict of versionid -> health, for versions that are # unrecoverable and have later seqnums than any recoverable versions. # These indicate that a write will lose data. versionmap = self.make_versionmap() healths = {} # maps verinfo to (found,k) unrecoverable = set() highest_recoverable_seqnum = -1 for (verinfo, shares) in list(versionmap.items()): (seqnum, root_hash, IV, segsize, datalength, k, N, prefix, offsets_tuple) = verinfo shnums = set([shnum for (shnum, server, timestamp) in shares]) healths[verinfo] = (len(shnums),k) if len(shnums) < k: unrecoverable.add(verinfo) else: highest_recoverable_seqnum = max(seqnum, highest_recoverable_seqnum) newversions = {} for verinfo in unrecoverable: (seqnum, root_hash, IV, segsize, datalength, k, N, prefix, offsets_tuple) = verinfo if seqnum > highest_recoverable_seqnum: newversions[verinfo] = healths[verinfo] return newversions def needs_merge(self): # return True if there are multiple recoverable versions with the # same seqnum, meaning that MutableFileNode.read_best_version is not # giving you the whole story, and that using its data to do a # subsequent publish will lose information. recoverable_seqnums = [verinfo[0] for verinfo in self.recoverable_versions()] for seqnum in recoverable_seqnums: if recoverable_seqnums.count(seqnum) > 1: return True return False def get_update_data_for_share_and_verinfo(self, shnum, verinfo): """ I return the update data for the given shnum """ update_data = self.update_data[shnum] update_datum = [i[1] for i in update_data if i[0] == verinfo][0] return update_datum def set_update_data_for_share_and_verinfo(self, shnum, verinfo, data): """ I record the block hash tree for the given shnum. """ self.update_data.setdefault(shnum , []).append((verinfo, data)) class ServermapUpdater(object): def __init__(self, filenode, storage_broker, monitor, servermap, mode=MODE_READ, add_lease=False, update_range=None): """I update a servermap, locating a sufficient number of useful shares and remembering where they are located. """ self._node = filenode self._storage_broker = storage_broker self._monitor = monitor self._servermap = servermap self.mode = mode self._add_lease = add_lease self._running = True self._storage_index = filenode.get_storage_index() self._last_failure = None self._status = UpdateStatus() self._status.set_storage_index(self._storage_index) self._status.set_progress(0.0) self._status.set_mode(mode) self._servers_responded = set() # how much data should we read? # SDMF: # * if we only need the checkstring, then [0:75] # * if we need to validate the checkstring sig, then [543ish:799ish] # * if we need the verification key, then [107:436ish] # * the offset table at [75:107] tells us about the 'ish' # * if we need the encrypted private key, we want [-1216ish:] # * but we can't read from negative offsets # * the offset table tells us the 'ish', also the positive offset # MDMF: # * Checkstring? [0:72] # * If we want to validate the checkstring, then [0:72], [143:?] -- # the offset table will tell us for sure. # * If we need the verification key, we have to consult the offset # table as well. # At this point, we don't know which we are. Our filenode can # tell us, but it might be lying -- in some cases, we're # responsible for telling it which kind of file it is. self._read_size = 4000 if mode == MODE_CHECK: # we use unpack_prefix_and_signature, so we need 1k self._read_size = 1000 self._need_privkey = False if mode in (MODE_WRITE, MODE_REPAIR) and not self._node.get_privkey(): self._need_privkey = True # check+repair: repair requires the privkey, so if we didn't happen # to ask for it during the check, we'll have problems doing the # publish. self.fetch_update_data = False if mode == MODE_WRITE and update_range: # We're updating the servermap in preparation for an # in-place file update, so we need to fetch some additional # data from each share that we find. assert len(update_range) == 2 self.start_segment = update_range[0] self.end_segment = update_range[1] self.fetch_update_data = True prefix = si_b2a(self._storage_index)[:5] self._log_number = log.msg(format="SharemapUpdater(%(si)s): starting (%(mode)s)", si=prefix, mode=mode) def get_status(self): return self._status def log(self, *args, **kwargs): if "parent" not in kwargs: kwargs["parent"] = self._log_number if "facility" not in kwargs: kwargs["facility"] = "tahoe.mutable.mapupdate" return log.msg(*args, **kwargs) def update(self): """Update the servermap to reflect current conditions. Returns a Deferred that fires with the servermap once the update has finished.""" self._started = time.time() self._status.set_active(True) # self._valid_versions is a set of validated verinfo tuples. We just # use it to remember which versions had valid signatures, so we can # avoid re-checking the signatures for each share. self._valid_versions = set() self._done_deferred = defer.Deferred() # first, which servers should be talk to? Any that were in our old # servermap, plus "enough" others. self._queries_completed = 0 sb = self._storage_broker # All of the servers, permuted by the storage index, as usual. full_serverlist = list(sb.get_servers_for_psi(self._storage_index)) self.full_serverlist = full_serverlist # for use later, immutable self.extra_servers = full_serverlist[:] # servers are removed as we use them self._good_servers = set() # servers who had some shares self._servers_with_shares = set() #servers that we know have shares now self._empty_servers = set() # servers who don't have any shares self._bad_servers = set() # servers to whom our queries failed k = self._node.get_required_shares() # For what cases can these conditions work? if k is None: # make a guess k = 3 N = self._node.get_total_shares() if N is None: N = 10 self.EPSILON = k # we want to send queries to at least this many servers (although we # might not wait for all of their answers to come back) self.num_servers_to_query = k + self.EPSILON if self.mode in (MODE_CHECK, MODE_REPAIR): # We want to query all of the servers. initial_servers_to_query = list(full_serverlist) must_query = set(initial_servers_to_query) self.extra_servers = [] elif self.mode == MODE_WRITE: # we're planning to replace all the shares, so we want a good # chance of finding them all. We will keep searching until we've # seen epsilon that don't have a share. # We don't query all of the servers because that could take a while. self.num_servers_to_query = N + self.EPSILON initial_servers_to_query, must_query = self._build_initial_querylist() self.required_num_empty_servers = self.EPSILON # TODO: arrange to read lots of data from k-ish servers, to avoid # the extra round trip required to read large directories. This # might also avoid the round trip required to read the encrypted # private key. else: # MODE_READ, MODE_ANYTHING # 2*k servers is good enough. initial_servers_to_query, must_query = self._build_initial_querylist() # this is a set of servers that we are required to get responses # from: they are servers who used to have a share, so we need to know # where they currently stand, even if that means we have to wait for # a silently-lost TCP connection to time out. We remove servers from # this set as we get responses. self._must_query = set(must_query) # now initial_servers_to_query contains the servers that we should # ask, self.must_query contains the servers that we must have heard # from before we can consider ourselves finished, and # self.extra_servers contains the overflow (servers that we should # tap if we don't get enough responses) # I guess that self._must_query is a subset of # initial_servers_to_query? assert must_query.issubset(initial_servers_to_query) self._send_initial_requests(initial_servers_to_query) self._status.timings["initial_queries"] = time.time() - self._started return self._done_deferred def _build_initial_querylist(self): # we send queries to everyone who was already in the sharemap initial_servers_to_query = set(self._servermap.all_servers()) # and we must wait for responses from them must_query = set(initial_servers_to_query) while ((self.num_servers_to_query > len(initial_servers_to_query)) and self.extra_servers): initial_servers_to_query.add(self.extra_servers.pop(0)) return initial_servers_to_query, must_query def _send_initial_requests(self, serverlist): self._status.set_status("Sending %d initial queries" % len(serverlist)) self._queries_outstanding = set() for server in serverlist: self._queries_outstanding.add(server) self._do_query(server, self._storage_index, self._read_size) if not serverlist: # there is nobody to ask, so we need to short-circuit the state # machine. d = defer.maybeDeferred(self._check_for_done, None) d.addErrback(self._fatal_error) # control flow beyond this point: state machine. Receiving responses # from queries is the input. We might send out more queries, or we # might produce a result. return None def _do_query(self, server, storage_index, readsize): self.log(format="sending query to [%(name)s], readsize=%(readsize)d", name=server.get_name(), readsize=readsize, level=log.NOISY) started = time.time() self._queries_outstanding.add(server) d = self._do_read(server, storage_index, [], [(0, readsize)]) d.addCallback(self._got_results, server, readsize, storage_index, started) d.addErrback(self._query_failed, server) # errors that aren't handled by _query_failed (and errors caused by # _query_failed) get logged, but we still want to check for doneness. d.addErrback(log.err) d.addErrback(self._fatal_error) d.addCallback(self._check_for_done) return d def _do_read(self, server, storage_index, shnums, readv): """ If self._add_lease is true, a lease is added, and the result only fires once the least has also been added. """ ss = server.get_storage_server() if self._add_lease: # send an add-lease message in parallel. The results are handled # separately. renew_secret = self._node.get_renewal_secret(server) cancel_secret = self._node.get_cancel_secret(server) d2 = ss.add_lease( storage_index, renew_secret, cancel_secret, ) # we ignore success d2.addErrback(self._add_lease_failed, server, storage_index) else: d2 = defer.succeed(None) d = ss.slot_readv(storage_index, shnums, readv) def passthrough(result): # Wait for d2, but fire with result of slot_readv() regardless of # result of d2. return d2.addBoth(lambda _: result) d.addCallback(passthrough) return d def _got_corrupt_share(self, e, shnum, server, data, lp): """ I am called when a remote server returns a corrupt share in response to one of our queries. By corrupt, I mean a share without a valid signature. I then record the failure, notify the server of the corruption, and record the share as bad. """ f = failure.Failure(e) self.log(format="bad share: %(f_value)s", f_value=str(f), failure=f, parent=lp, level=log.WEIRD, umid="h5llHg") # Notify the server that its share is corrupt. self.notify_server_corruption(server, shnum, str(e)) # By flagging this as a bad server, we won't count any of # the other shares on that server as valid, though if we # happen to find a valid version string amongst those # shares, we'll keep track of it so that we don't need # to validate the signature on those again. self._bad_servers.add(server) self._last_failure = f # XXX: Use the reader for this? checkstring = data[:SIGNED_PREFIX_LENGTH] self._servermap.mark_bad_share(server, shnum, checkstring) self._servermap.add_problem(f) def _got_results(self, datavs, server, readsize, storage_index, started): lp = self.log(format="got result from [%(name)s], %(numshares)d shares", name=server.get_name(), numshares=len(datavs)) ss = server.get_storage_server() now = time.time() elapsed = now - started def _done_processing(ignored=None): self._queries_outstanding.discard(server) self._servermap.mark_server_reachable(server) self._must_query.discard(server) self._queries_completed += 1 if not self._running: self.log("but we're not running, so we'll ignore it", parent=lp) _done_processing() self._status.add_per_server_time(server, "late", started, elapsed) return self._status.add_per_server_time(server, "query", started, elapsed) if datavs: self._good_servers.add(server) else: self._empty_servers.add(server) ds = [] for shnum,datav in list(datavs.items()): data = datav[0] reader = MDMFSlotReadProxy(ss, storage_index, shnum, data, data_is_everything=(len(data) < readsize)) # our goal, with each response, is to validate the version # information and share data as best we can at this point -- # we do this by validating the signature. To do this, we # need to do the following: # - If we don't already have the public key, fetch the # public key. We use this to validate the signature. if not self._node.get_pubkey(): # fetch and set the public key. d = reader.get_verification_key() d.addCallback(lambda results, shnum=shnum: self._try_to_set_pubkey(results, server, shnum, lp)) # XXX: Make self._pubkey_query_failed? d.addErrback(lambda error, shnum=shnum, data=data: self._got_corrupt_share(error, shnum, server, data, lp)) else: # we already have the public key. d = defer.succeed(None) # Neither of these two branches return anything of # consequence, so the first entry in our deferredlist will # be None. # - Next, we need the version information. We almost # certainly got this by reading the first thousand or so # bytes of the share on the storage server, so we # shouldn't need to fetch anything at this step. d2 = reader.get_verinfo() d2.addErrback(lambda error, shnum=shnum, data=data: self._got_corrupt_share(error, shnum, server, data, lp)) # - Next, we need the signature. For an SDMF share, it is # likely that we fetched this when doing our initial fetch # to get the version information. In MDMF, this lives at # the end of the share, so unless the file is quite small, # we'll need to do a remote fetch to get it. d3 = reader.get_signature() d3.addErrback(lambda error, shnum=shnum, data=data: self._got_corrupt_share(error, shnum, server, data, lp)) # Once we have all three of these responses, we can move on # to validating the signature # Does the node already have a privkey? If not, we'll try to # fetch it here. if self._need_privkey: d4 = reader.get_encprivkey() d4.addCallback(lambda results, shnum=shnum: self._try_to_validate_privkey(results, server, shnum, lp)) d4.addErrback(lambda error, shnum=shnum: self._privkey_query_failed(error, server, shnum, lp)) else: d4 = defer.succeed(None) if self.fetch_update_data: # fetch the block hash tree and first + last segment, as # configured earlier. # Then set them in wherever we happen to want to set # them. ds = [] # XXX: We do this above, too. Is there a good way to # make the two routines share the value without # introducing more roundtrips? ds.append(reader.get_verinfo()) ds.append(reader.get_blockhashes()) ds.append(reader.get_block_and_salt(self.start_segment)) ds.append(reader.get_block_and_salt(self.end_segment)) d5 = deferredutil.gatherResults(ds) d5.addCallback(self._got_update_results_one_share, shnum) else: d5 = defer.succeed(None) dl = defer.DeferredList([d, d2, d3, d4, d5]) def _append_proxy(passthrough, shnum=shnum, reader=reader): # Store the proxy (with its cache) keyed by serverid and # version. _, (_,verinfo), _, _, _ = passthrough verinfo = self._make_verinfo_hashable(verinfo) self._servermap.proxies[(verinfo, server.get_serverid(), storage_index, shnum)] = reader return passthrough dl.addCallback(_append_proxy) dl.addBoth(self._turn_barrier) dl.addCallback(lambda results, shnum=shnum: self._got_signature_one_share(results, shnum, server, lp)) dl.addErrback(lambda error, shnum=shnum, data=data: self._got_corrupt_share(error, shnum, server, data, lp)) ds.append(dl) # dl is a deferred list that will fire when all of the shares # that we found on this server are done processing. When dl fires, # we know that processing is done, so we can decrement the # semaphore-like thing that we incremented earlier. dl = defer.DeferredList(ds, fireOnOneErrback=True) # Are we done? Done means that there are no more queries to # send, that there are no outstanding queries, and that we # haven't received any queries that are still processing. If we # are done, self._check_for_done will cause the done deferred # that we returned to our caller to fire, which tells them that # they have a complete servermap, and that we won't be touching # the servermap anymore. dl.addCallback(_done_processing) dl.addCallback(self._check_for_done) dl.addErrback(self._fatal_error) # all done! self.log("_got_results done", parent=lp, level=log.NOISY) return dl def _turn_barrier(self, result): """ I help the servermap updater avoid the recursion limit issues discussed in #237. """ return fireEventually(result) def _try_to_set_pubkey(self, pubkey_s, server, shnum, lp): if self._node.get_pubkey(): return # don't go through this again if we don't have to fingerprint = hashutil.ssk_pubkey_fingerprint_hash(pubkey_s) assert len(fingerprint) == 32 if fingerprint != self._node.get_fingerprint(): raise CorruptShareError(server, shnum, "pubkey doesn't match fingerprint") self._node._populate_pubkey(self._deserialize_pubkey(pubkey_s)) assert self._node.get_pubkey() def notify_server_corruption(self, server, shnum, reason): if isinstance(reason, str): reason = reason.encode("utf-8") ss = server.get_storage_server() ss.advise_corrupt_share( b"mutable", self._storage_index, shnum, reason, ) def _got_signature_one_share(self, results, shnum, server, lp): # It is our job to give versioninfo to our caller. We need to # raise CorruptShareError if the share is corrupt for any # reason, something that our caller will handle. self.log(format="_got_results: got shnum #%(shnum)d from serverid %(name)s", shnum=shnum, name=server.get_name(), level=log.NOISY, parent=lp) if not self._running: # We can't process the results, since we can't touch the # servermap anymore. self.log("but we're not running anymore.") return None _, verinfo, signature, __, ___ = results verinfo = self._make_verinfo_hashable(verinfo[1]) # This tuple uniquely identifies a share on the grid; we use it # to keep track of the ones that we've already seen. (seqnum, root_hash, saltish, segsize, datalen, k, n, prefix, offsets_tuple) = verinfo if verinfo not in self._valid_versions: # This is a new version tuple, and we need to validate it # against the public key before keeping track of it. assert self._node.get_pubkey() try: rsa.verify_signature(self._node.get_pubkey(), signature[1], prefix) except BadSignature: raise CorruptShareError(server, shnum, "signature is invalid") # ok, it's a valid verinfo. Add it to the list of validated # versions. self.log(" found valid version %d-%s from %s-sh%d: %d-%d/%d/%d" % (seqnum, str(base32.b2a(root_hash)[:4], "utf-8"), ensure_str(server.get_name()), shnum, k, n, segsize, datalen), parent=lp) self._valid_versions.add(verinfo) # We now know that this is a valid candidate verinfo. Whether or # not this instance of it is valid is a matter for the next # statement; at this point, we just know that if we see this # version info again, that its signature checks out and that # we're okay to skip the signature-checking step. # (server, shnum) are bound in the method invocation. if (server, shnum) in self._servermap.get_bad_shares(): # we've been told that the rest of the data in this share is # unusable, so don't add it to the servermap. self.log("but we've been told this is a bad share", parent=lp, level=log.UNUSUAL) return verinfo # Add the info to our servermap. timestamp = time.time() self._servermap.add_new_share(server, shnum, verinfo, timestamp) self._servers_with_shares.add(server) return verinfo def _make_verinfo_hashable(self, verinfo): (seqnum, root_hash, saltish, segsize, datalen, k, n, prefix, offsets) = verinfo offsets_tuple = tuple( [(key,value) for key,value in offsets.items()] ) verinfo = (seqnum, root_hash, saltish, segsize, datalen, k, n, prefix, offsets_tuple) return verinfo def _got_update_results_one_share(self, results, share): """ I record the update results in results. """ assert len(results) == 4 verinfo, blockhashes, start, end = results verinfo = self._make_verinfo_hashable(verinfo) update_data = (blockhashes, start, end) self._servermap.set_update_data_for_share_and_verinfo(share, verinfo, update_data) def _deserialize_pubkey(self, pubkey_s): verifier = rsa.create_verifying_key_from_string(pubkey_s) return verifier def _try_to_validate_privkey(self, enc_privkey, server, shnum, lp): """ Given a writekey from a remote server, I validate it against the writekey stored in my node. If it is valid, then I set the privkey and encprivkey properties of the node. """ node_writekey = self._node.get_writekey() alleged_privkey_s = decrypt_privkey(node_writekey, enc_privkey) alleged_writekey = hashutil.ssk_writekey_hash(alleged_privkey_s) if alleged_writekey != node_writekey: self.log("invalid privkey from %r shnum %d" % (server.get_name(), shnum), parent=lp, level=log.WEIRD, umid="aJVccw") return # it's good self.log("got valid privkey from shnum %d on serverid %r" % (shnum, server.get_name()), parent=lp) privkey, _ = rsa.create_signing_keypair_from_string(alleged_privkey_s) self._node._populate_encprivkey(enc_privkey) self._node._populate_privkey(privkey) self._need_privkey = False self._status.set_privkey_from(server) def _add_lease_failed(self, f, server, storage_index): # Older versions of Tahoe didn't handle the add-lease message very # well: <=1.1.0 throws a NameError because it doesn't implement # remote_add_lease(), 1.2.0/1.3.0 throw IndexError on unknown buckets # (which is most of them, since we send add-lease to everybody, # before we know whether or not they have any shares for us), and # 1.2.0 throws KeyError even on known buckets due to an internal bug # in the latency-measuring code. # we want to ignore the known-harmless errors and log the others. In # particular we want to log any local errors caused by coding # problems. if f.check(DeadReferenceError): return if f.check(RemoteException): if f.value.failure.check(KeyError, IndexError, NameError): # this may ignore a bit too much, but that only hurts us # during debugging return self.log(format="error in add_lease from [%(name)s]: %(f_value)s", name=server.get_name(), f_value=str(f.value), failure=f, level=log.WEIRD, umid="iqg3mw") return # local errors are cause for alarm log.err(f, format="local error in add_lease to [%(name)s]: %(f_value)s", name=server.get_name(), f_value=str(f.value), level=log.WEIRD, umid="ZWh6HA") def _query_failed(self, f, server): if not self._running: return level = log.WEIRD if f.check(DeadReferenceError): level = log.UNUSUAL self.log(format="error during query: %(f_value)s", f_value=str(f.value), failure=f, level=level, umid="IHXuQg") self._must_query.discard(server) self._queries_outstanding.discard(server) self._bad_servers.add(server) self._servermap.add_problem(f) # a server could be in both ServerMap.reachable_servers and # .unreachable_servers if they responded to our query, but then an # exception was raised in _got_results. self._servermap.mark_server_unreachable(server) self._queries_completed += 1 self._last_failure = f def _privkey_query_failed(self, f, server, shnum, lp): self._queries_outstanding.discard(server) if not self._running: return level = log.WEIRD if f.check(DeadReferenceError): level = log.UNUSUAL self.log(format="error during privkey query: %(f_value)s", f_value=str(f.value), failure=f, parent=lp, level=level, umid="McoJ5w") self._servermap.add_problem(f) self._last_failure = f def _check_for_done(self, res): # exit paths: # return self._send_more_queries(outstanding) : send some more queries # return self._done() : all done # return : keep waiting, no new queries lp = self.log(format=("_check_for_done, mode is '%(mode)s', " "%(outstanding)d queries outstanding, " "%(extra)d extra servers available, " "%(must)d 'must query' servers left, " "need_privkey=%(need_privkey)s" ), mode=self.mode, outstanding=len(self._queries_outstanding), extra=len(self.extra_servers), must=len(self._must_query), need_privkey=self._need_privkey, level=log.NOISY, ) if not self._running: self.log("but we're not running", parent=lp, level=log.NOISY) return if self._must_query: # we are still waiting for responses from servers that used to have # a share, so we must continue to wait. No additional queries are # required at this time. self.log("%d 'must query' servers left" % len(self._must_query), level=log.NOISY, parent=lp) return if (not self._queries_outstanding and not self.extra_servers): # all queries have retired, and we have no servers left to ask. No # more progress can be made, therefore we are done. self.log("all queries are retired, no extra servers: done", parent=lp) return self._done() recoverable_versions = self._servermap.recoverable_versions() unrecoverable_versions = self._servermap.unrecoverable_versions() # what is our completion policy? how hard should we work? if self.mode == MODE_ANYTHING: if recoverable_versions: self.log("%d recoverable versions: done" % len(recoverable_versions), parent=lp) return self._done() if self.mode in (MODE_CHECK, MODE_REPAIR): # we used self._must_query, and we know there aren't any # responses still waiting, so that means we must be done self.log("done", parent=lp) return self._done() MAX_IN_FLIGHT = 5 if self.mode == MODE_READ: # if we've queried k+epsilon servers, and we see a recoverable # version, and we haven't seen any unrecoverable higher-seqnum'ed # versions, then we're done. if self._queries_completed < self.num_servers_to_query: self.log(format="%(completed)d completed, %(query)d to query: need more", completed=self._queries_completed, query=self.num_servers_to_query, level=log.NOISY, parent=lp) return self._send_more_queries(MAX_IN_FLIGHT) if not recoverable_versions: self.log("no recoverable versions: need more", level=log.NOISY, parent=lp) return self._send_more_queries(MAX_IN_FLIGHT) highest_recoverable = max(recoverable_versions) highest_recoverable_seqnum = highest_recoverable[0] for unrec_verinfo in unrecoverable_versions: if unrec_verinfo[0] > highest_recoverable_seqnum: # there is evidence of a higher-seqnum version, but we # don't yet see enough shares to recover it. Try harder. # TODO: consider sending more queries. # TODO: consider limiting the search distance self.log("evidence of higher seqnum: need more", level=log.UNUSUAL, parent=lp) return self._send_more_queries(MAX_IN_FLIGHT) # all the unrecoverable versions were old or concurrent with a # recoverable version. Good enough. self.log("no higher-seqnum: done", parent=lp) return self._done() if self.mode == MODE_WRITE: # we want to keep querying until we've seen a few that don't have # any shares, to be sufficiently confident that we've seen all # the shares. This is still less work than MODE_CHECK, which asks # every server in the world. if not recoverable_versions: self.log("no recoverable versions: need more", parent=lp, level=log.NOISY) return self._send_more_queries(MAX_IN_FLIGHT) last_found = -1 last_not_responded = -1 num_not_responded = 0 num_not_found = 0 states = [] found_boundary = False for i,server in enumerate(self.full_serverlist): if server in self._bad_servers: # query failed states.append("x") #self.log("loop [%s]: x" % server.get_name() elif server in self._empty_servers: # no shares states.append("0") #self.log("loop [%s]: 0" % server.get_name() if last_found != -1: num_not_found += 1 if num_not_found >= self.EPSILON: self.log("found our boundary, %s" % "".join(states), parent=lp, level=log.NOISY) found_boundary = True break elif server in self._servers_with_shares: # yes shares states.append("1") #self.log("loop [%s]: 1" % server.get_name() last_found = i num_not_found = 0 else: # not responded yet states.append("?") #self.log("loop [%s]: ?" % server.get_name() last_not_responded = i num_not_responded += 1 if found_boundary: # we need to know that we've gotten answers from # everybody to the left of here if last_not_responded == -1: # we're done self.log("have all our answers", parent=lp, level=log.NOISY) # .. unless we're still waiting on the privkey if self._need_privkey: self.log("but we're still waiting for the privkey", parent=lp, level=log.NOISY) # if we found the boundary but we haven't yet found # the privkey, we may need to look further. If # somehow all the privkeys were corrupted (but the # shares were readable), then this is likely to do an # exhaustive search. return self._send_more_queries(MAX_IN_FLIGHT) return self._done() # still waiting for somebody return self._send_more_queries(num_not_responded) # if we hit here, we didn't find our boundary, so we're still # waiting for servers self.log("no boundary yet, %s" % "".join(states), parent=lp, level=log.NOISY) return self._send_more_queries(MAX_IN_FLIGHT) # otherwise, keep up to 5 queries in flight. TODO: this is pretty # arbitrary, really I want this to be something like k - # max(known_version_sharecounts) + some extra self.log("catchall: need more", parent=lp, level=log.NOISY) return self._send_more_queries(MAX_IN_FLIGHT) def _send_more_queries(self, num_outstanding): more_queries = [] while True: self.log(format=" there are %(outstanding)d queries outstanding", outstanding=len(self._queries_outstanding), level=log.NOISY) active_queries = len(self._queries_outstanding) + len(more_queries) if active_queries >= num_outstanding: break if not self.extra_servers: break more_queries.append(self.extra_servers.pop(0)) self.log(format="sending %(more)d more queries: %(who)s", more=len(more_queries), who=" ".join(["[%r]" % s.get_name() for s in more_queries]), level=log.NOISY) for server in more_queries: self._do_query(server, self._storage_index, self._read_size) # we'll retrigger when those queries come back def _done(self): if not self._running: self.log("not running; we're already done") return self._running = False now = time.time() elapsed = now - self._started self._status.set_finished(now) self._status.timings["total"] = elapsed self._status.set_progress(1.0) self._status.set_status("Finished") self._status.set_active(False) self._servermap.set_last_update(self.mode, self._started) # the servermap will not be touched after this self.log("servermap: %s" % self._servermap.summarize_versions()) eventually(self._done_deferred.callback, self._servermap) def _fatal_error(self, f): self.log("fatal error", failure=f, level=log.WEIRD, umid="1cNvlw") self._done_deferred.errback(f) tahoe_lafs-1.20.0/src/allmydata/scripts/__init__.py0000644000000000000000000000000013615410400017144 0ustar00tahoe_lafs-1.20.0/src/allmydata/scripts/admin.py0000644000000000000000000002041113615410400016505 0ustar00""" Ported to Python 3. """ from six import ensure_binary from twisted.python import usage from twisted.python.filepath import ( FilePath, ) from allmydata.scripts.common import ( BaseOptions, BasedirOptions, ) from allmydata.storage import ( crawler, expirer, ) from allmydata.scripts.types_ import SubCommands from allmydata.client import read_config from allmydata.grid_manager import ( parse_grid_manager_certificate, ) from allmydata.scripts.cli import _default_nodedir from allmydata.util.encodingutil import argv_to_abspath from allmydata.util import jsonbytes class GenerateKeypairOptions(BaseOptions): def getUsage(self, width=None): t = BaseOptions.getUsage(self, width) t += """ Generate a public/private keypair, dumped to stdout as two lines of ASCII.. """ return t def print_keypair(options): from allmydata.crypto import ed25519 out = options.stdout private_key, public_key = ed25519.create_signing_keypair() print("private:", str(ed25519.string_from_signing_key(private_key), "ascii"), file=out) print("public:", str(ed25519.string_from_verifying_key(public_key), "ascii"), file=out) class DerivePubkeyOptions(BaseOptions): def parseArgs(self, privkey): self.privkey = privkey def getSynopsis(self): return "Usage: tahoe [global-options] admin derive-pubkey PRIVKEY" def getUsage(self, width=None): t = BaseOptions.getUsage(self, width) t += """ Given a private (signing) key that was previously generated with generate-keypair, derive the public key and print it to stdout. """ return t def derive_pubkey(options): out = options.stdout from allmydata.crypto import ed25519 privkey_vs = options.privkey privkey_vs = ensure_binary(privkey_vs) private_key, public_key = ed25519.signing_keypair_from_string(privkey_vs) print("private:", str(ed25519.string_from_signing_key(private_key), "ascii"), file=out) print("public:", str(ed25519.string_from_verifying_key(public_key), "ascii"), file=out) return 0 class MigrateCrawlerOptions(BasedirOptions): def getSynopsis(self): return "Usage: tahoe [global-options] admin migrate-crawler" def getUsage(self, width=None): t = BasedirOptions.getUsage(self, width) t += ( "The crawler data is now stored as JSON to avoid" " potential security issues with pickle files.\n\nIf" " you are confident the state files in the 'storage/'" " subdirectory of your node are trustworthy, run this" " command to upgrade them to JSON.\n\nThe files are:" " lease_checker.history, lease_checker.state, and" " bucket_counter.state" ) return t class AddGridManagerCertOptions(BaseOptions): """ Options for add-grid-manager-cert """ optParameters = [ ['filename', 'f', None, "Filename of the certificate ('-', a dash, for stdin)"], ['name', 'n', None, "Name to give this certificate"], ] def getSynopsis(self): return "Usage: tahoe [global-options] admin add-grid-manager-cert [options]" def postOptions(self) -> None: assert self.parent is not None assert self.parent.parent is not None if self['name'] is None: raise usage.UsageError( "Must provide --name option" ) if self['filename'] is None: raise usage.UsageError( "Must provide --filename option" ) data: str if self['filename'] == '-': print("reading certificate from stdin", file=self.parent.parent.stderr) # type: ignore[attr-defined] data = self.parent.parent.stdin.read() # type: ignore[attr-defined] if len(data) == 0: raise usage.UsageError( "Reading certificate from stdin failed" ) else: with open(self['filename'], 'r') as f: data = f.read() try: self.certificate_data = parse_grid_manager_certificate(data) except ValueError as e: raise usage.UsageError( "Error parsing certificate: {}".format(e) ) def getUsage(self, width=None): t = BaseOptions.getUsage(self, width) t += ( "Adds a Grid Manager certificate to a Storage Server.\n\n" "The certificate will be copied into the base-dir and config\n" "will be added to 'tahoe.cfg', which will be re-written. A\n" "restart is required for changes to take effect.\n\n" "The human who operates a Grid Manager would produce such a\n" "certificate and communicate it securely to you.\n" ) return t def migrate_crawler(options): out = options.stdout storage = FilePath(options['basedir']).child("storage") conversions = [ (storage.child("lease_checker.state"), crawler._convert_pickle_state_to_json), (storage.child("bucket_counter.state"), crawler._convert_pickle_state_to_json), (storage.child("lease_checker.history"), expirer._convert_pickle_state_to_json), ] for fp, converter in conversions: existed = fp.exists() newfp = crawler._upgrade_pickle_to_json(fp, converter) if existed: print("Converted '{}' to '{}'".format(fp.path, newfp.path), file=out) else: if newfp.exists(): print("Already converted: '{}'".format(newfp.path), file=out) else: print("Not found: '{}'".format(fp.path), file=out) def add_grid_manager_cert(options): """ Add a new Grid Manager certificate to our config """ # XXX is there really not already a function for this? if options.parent.parent['node-directory']: nd = argv_to_abspath(options.parent.parent['node-directory']) else: nd = _default_nodedir config = read_config(nd, "portnum") cert_fname = "{}.cert".format(options['name']) cert_path = FilePath(config.get_config_path(cert_fname)) cert_bytes = jsonbytes.dumps_bytes(options.certificate_data, indent=4) + b'\n' cert_name = options['name'] if cert_path.exists(): msg = "Already have certificate for '{}' (at {})".format( options['name'], cert_path.path, ) print(msg, file=options.stderr) return 1 config.set_config("storage", "grid_management", "True") config.set_config("grid_manager_certificates", cert_name, cert_fname) # write all the data out with cert_path.open("wb") as f: f.write(cert_bytes) cert_count = len(config.enumerate_section("grid_manager_certificates")) print("There are now {} certificates".format(cert_count), file=options.stderr) return 0 class AdminCommand(BaseOptions): subCommands = [ ("generate-keypair", None, GenerateKeypairOptions, "Generate a public/private keypair, write to stdout."), ("derive-pubkey", None, DerivePubkeyOptions, "Derive a public key from a private key."), ("migrate-crawler", None, MigrateCrawlerOptions, "Write the crawler-history data as JSON."), ("add-grid-manager-cert", None, AddGridManagerCertOptions, "Add a Grid Manager-provided certificate to a storage " "server's config."), ] def postOptions(self): if not hasattr(self, 'subOptions'): raise usage.UsageError("must specify a subcommand") def getSynopsis(self): return "Usage: tahoe [global-options] admin SUBCOMMAND" def getUsage(self, width=None): t = BaseOptions.getUsage(self, width) t += """ Please run e.g. 'tahoe admin generate-keypair --help' for more details on each subcommand. """ return t subDispatch = { "generate-keypair": print_keypair, "derive-pubkey": derive_pubkey, "migrate-crawler": migrate_crawler, "add-grid-manager-cert": add_grid_manager_cert, } def do_admin(options): so = options.subOptions so.stdout = options.stdout so.stderr = options.stderr f = subDispatch[options.subCommand] return f(so) subCommands : SubCommands = [ ("admin", None, AdminCommand, "admin subcommands: use 'tahoe admin' for a list"), ] dispatch = { "admin": do_admin, } tahoe_lafs-1.20.0/src/allmydata/scripts/backupdb.py0000644000000000000000000003120713615410400017175 0ustar00""" Ported to Python 3. """ import os.path, sys, time, random, stat from allmydata.util.netstring import netstring from allmydata.util.hashutil import backupdb_dirhash from allmydata.util import base32 from allmydata.util.fileutil import abspath_expanduser_unicode from allmydata.util.encodingutil import to_bytes from allmydata.util.dbutil import get_db, DBError DAY = 24*60*60 MONTH = 30*DAY SCHEMA_v1 = """ CREATE TABLE version -- added in v1 ( version INTEGER -- contains one row, set to 2 ); CREATE TABLE local_files -- added in v1 ( path VARCHAR(1024) PRIMARY KEY, -- index, this is an absolute UTF-8-encoded local filename size INTEGER, -- os.stat(fn)[stat.ST_SIZE] mtime NUMBER, -- os.stat(fn)[stat.ST_MTIME] ctime NUMBER, -- os.stat(fn)[stat.ST_CTIME] fileid INTEGER ); CREATE TABLE caps -- added in v1 ( fileid INTEGER PRIMARY KEY AUTOINCREMENT, filecap VARCHAR(256) UNIQUE -- URI:CHK:... ); CREATE TABLE last_upload -- added in v1 ( fileid INTEGER PRIMARY KEY, last_uploaded TIMESTAMP, last_checked TIMESTAMP ); """ TABLE_DIRECTORY = """ CREATE TABLE directories -- added in v2 ( dirhash varchar(256) PRIMARY KEY, -- base32(dirhash) dircap varchar(256), -- URI:DIR2-CHK:... last_uploaded TIMESTAMP, last_checked TIMESTAMP ); """ SCHEMA_v2 = SCHEMA_v1 + TABLE_DIRECTORY UPDATE_v1_to_v2 = TABLE_DIRECTORY + """ UPDATE version SET version=2; """ UPDATERS = { 2: UPDATE_v1_to_v2, } def get_backupdb(dbfile, stderr=sys.stderr, create_version=(SCHEMA_v2, 2), just_create=False): # Open or create the given backupdb file. The parent directory must # exist. try: (sqlite3, db) = get_db(dbfile, stderr, create_version, updaters=UPDATERS, just_create=just_create, dbname="backupdb") return BackupDB_v2(sqlite3, db) except DBError as e: print(e, file=stderr) return None class FileResult(object): def __init__(self, bdb, filecap, should_check, path, mtime, ctime, size): self.bdb = bdb self.filecap = filecap self.should_check_p = should_check self.path = path self.mtime = mtime self.ctime = ctime self.size = size def was_uploaded(self): if self.filecap: return self.filecap return False def did_upload(self, filecap): self.bdb.did_upload_file(filecap, self.path, self.mtime, self.ctime, self.size) def should_check(self): return self.should_check_p def did_check_healthy(self, results): self.bdb.did_check_file_healthy(self.filecap, results) class DirectoryResult(object): def __init__(self, bdb, dirhash, dircap, should_check): self.bdb = bdb self.dircap = dircap self.should_check_p = should_check self.dirhash = dirhash def was_created(self): if self.dircap: return self.dircap return False def did_create(self, dircap): self.bdb.did_create_directory(dircap, self.dirhash) def should_check(self): return self.should_check_p def did_check_healthy(self, results): self.bdb.did_check_directory_healthy(self.dircap, results) class BackupDB_v2(object): VERSION = 2 NO_CHECK_BEFORE = 1*MONTH ALWAYS_CHECK_AFTER = 2*MONTH def __init__(self, sqlite_module, connection): self.sqlite_module = sqlite_module self.connection = connection self.cursor = connection.cursor() def check_file(self, path, use_timestamps=True): """I will tell you if a given local file needs to be uploaded or not, by looking in a database and seeing if I have a record of this file having been uploaded earlier. I return a FileResults object, synchronously. If r.was_uploaded() returns False, you should upload the file. When you are finished uploading it, call r.did_upload(filecap), so I can update my database. If was_uploaded() returns a filecap, you might be able to avoid an upload. Call r.should_check(), and if it says False, you can skip the upload and use the filecap returned by was_uploaded(). If should_check() returns True, you should perform a filecheck on the filecap returned by was_uploaded(). If the check indicates the file is healthy, please call r.did_check_healthy(checker_results) so I can update the database, using the de-JSONized response from the webapi t=check call for 'checker_results'. If the check indicates the file is not healthy, please upload the file and call r.did_upload(filecap) when you're done. If use_timestamps=True (the default), I will compare ctime and mtime of the local file against an entry in my database, and consider the file to be unchanged if ctime, mtime, and filesize are all the same as the earlier version. If use_timestamps=False, I will not trust the timestamps, so more files (perhaps all) will be marked as needing upload. A future version of this database may hash the file to make equality decisions, in which case use_timestamps=False will not always imply r.must_upload()==True. 'path' points to a local file on disk, possibly relative to the current working directory. The database stores absolute pathnames. """ path = abspath_expanduser_unicode(path) # TODO: consider using get_pathinfo. s = os.stat(path) size = s[stat.ST_SIZE] ctime = s[stat.ST_CTIME] mtime = s[stat.ST_MTIME] now = time.time() c = self.cursor c.execute("SELECT size,mtime,ctime,fileid" " FROM local_files" " WHERE path=?", (path,)) row = self.cursor.fetchone() if not row: return FileResult(self, None, False, path, mtime, ctime, size) (last_size,last_mtime,last_ctime,last_fileid) = row c.execute("SELECT caps.filecap, last_upload.last_checked" " FROM caps,last_upload" " WHERE caps.fileid=? AND last_upload.fileid=?", (last_fileid, last_fileid)) row2 = c.fetchone() if ((last_size != size or not use_timestamps or last_mtime != mtime or last_ctime != ctime) # the file has been changed or (not row2) # we somehow forgot where we put the file last time ): c.execute("DELETE FROM local_files WHERE path=?", (path,)) self.connection.commit() return FileResult(self, None, False, path, mtime, ctime, size) # at this point, we're allowed to assume the file hasn't been changed (filecap, last_checked) = row2 age = now - last_checked probability = ((age - self.NO_CHECK_BEFORE) / (self.ALWAYS_CHECK_AFTER - self.NO_CHECK_BEFORE)) probability = min(max(probability, 0.0), 1.0) should_check = bool(random.random() < probability) return FileResult(self, to_bytes(filecap), should_check, path, mtime, ctime, size) def get_or_allocate_fileid_for_cap(self, filecap): # find an existing fileid for this filecap, or insert a new one. The # caller is required to commit() afterwards. # mysql has "INSERT ... ON DUPLICATE KEY UPDATE", but not sqlite # sqlite has "INSERT ON CONFLICT REPLACE", but not mysql # So we use INSERT, ignore any error, then a SELECT c = self.cursor try: c.execute("INSERT INTO caps (filecap) VALUES (?)", (filecap,)) except (self.sqlite_module.IntegrityError, self.sqlite_module.OperationalError): # sqlite3 on sid gives IntegrityError # pysqlite2 (which we don't use, so maybe no longer relevant) on dapper gives OperationalError pass c.execute("SELECT fileid FROM caps WHERE filecap=?", (filecap,)) foundrow = c.fetchone() assert foundrow fileid = foundrow[0] return fileid def did_upload_file(self, filecap, path, mtime, ctime, size): now = time.time() fileid = self.get_or_allocate_fileid_for_cap(filecap) try: self.cursor.execute("INSERT INTO last_upload VALUES (?,?,?)", (fileid, now, now)) except (self.sqlite_module.IntegrityError, self.sqlite_module.OperationalError): self.cursor.execute("UPDATE last_upload" " SET last_uploaded=?, last_checked=?" " WHERE fileid=?", (now, now, fileid)) try: self.cursor.execute("INSERT INTO local_files VALUES (?,?,?,?,?)", (path, size, mtime, ctime, fileid)) except (self.sqlite_module.IntegrityError, self.sqlite_module.OperationalError): self.cursor.execute("UPDATE local_files" " SET size=?, mtime=?, ctime=?, fileid=?" " WHERE path=?", (size, mtime, ctime, fileid, path)) self.connection.commit() def did_check_file_healthy(self, filecap, results): now = time.time() fileid = self.get_or_allocate_fileid_for_cap(filecap) self.cursor.execute("UPDATE last_upload" " SET last_checked=?" " WHERE fileid=?", (now, fileid)) self.connection.commit() def check_directory(self, contents): """I will tell you if a new directory needs to be created for a given set of directory contents, or if I know of an existing (immutable) directory that can be used instead. 'contents' should be a dictionary that maps from child name (a single unicode string) to immutable childcap (filecap or dircap). I return a DirectoryResult object, synchronously. If r.was_created() returns False, you should create the directory (with t=mkdir-immutable). When you are finished, call r.did_create(dircap) so I can update my database. If was_created() returns a dircap, you might be able to avoid the mkdir. Call r.should_check(), and if it says False, you can skip the mkdir and use the dircap returned by was_created(). If should_check() returns True, you should perform a check operation on the dircap returned by was_created(). If the check indicates the directory is healthy, please call r.did_check_healthy(checker_results) so I can update the database, using the de-JSONized response from the webapi t=check call for 'checker_results'. If the check indicates the directory is not healthy, please repair or re-create the directory and call r.did_create(dircap) when you're done. """ now = time.time() entries = [] for name in contents: entries.append( [name.encode("utf-8"), contents[name]] ) entries.sort() data = b"".join([netstring(name_utf8)+netstring(cap) for (name_utf8,cap) in entries]) dirhash = backupdb_dirhash(data) dirhash_s = base32.b2a(dirhash) c = self.cursor c.execute("SELECT dircap, last_checked" " FROM directories WHERE dirhash=?", (dirhash_s,)) row = c.fetchone() if not row: return DirectoryResult(self, dirhash_s, None, False) (dircap, last_checked) = row age = now - last_checked probability = ((age - self.NO_CHECK_BEFORE) / (self.ALWAYS_CHECK_AFTER - self.NO_CHECK_BEFORE)) probability = min(max(probability, 0.0), 1.0) should_check = bool(random.random() < probability) return DirectoryResult(self, dirhash_s, to_bytes(dircap), should_check) def did_create_directory(self, dircap, dirhash): now = time.time() # if the dirhash is already present (i.e. we've re-uploaded an # existing directory, possibly replacing the dircap with a new one), # update the record in place. Otherwise create a new record.) self.cursor.execute("REPLACE INTO directories VALUES (?,?,?,?)", (dirhash, dircap, now, now)) self.connection.commit() def did_check_directory_healthy(self, dircap, results): now = time.time() self.cursor.execute("UPDATE directories" " SET last_checked=?" " WHERE dircap=?", (now, dircap)) self.connection.commit() tahoe_lafs-1.20.0/src/allmydata/scripts/cli.py0000644000000000000000000005573213615410400016202 0ustar00""" Ported to Python 3. """ import os.path, re, fnmatch from allmydata.scripts.types_ import SubCommands, Parameters from twisted.python import usage from allmydata.scripts.common import get_aliases, get_default_nodedir, \ DEFAULT_ALIAS, BaseOptions from allmydata.util.encodingutil import argv_to_unicode, argv_to_abspath, quote_local_unicode_path from .tahoe_status import TahoeStatusCommand NODEURL_RE=re.compile("http(s?)://([^:]*)(:([1-9][0-9]*))?") _default_nodedir = get_default_nodedir() class FileStoreOptions(BaseOptions): optParameters : Parameters = [ ["node-url", "u", None, "Specify the URL of the Tahoe gateway node, such as " "'http://127.0.0.1:3456'. " "This overrides the URL found in the --node-directory ."], ["dir-cap", None, None, "Specify which dirnode URI should be used as the 'tahoe' alias."] ] def postOptions(self): self["quiet"] = self.parent["quiet"] if self.parent['node-directory']: self['node-directory'] = argv_to_abspath(self.parent['node-directory']) else: self['node-directory'] = _default_nodedir # compute a node-url from the existing options, put in self['node-url'] if self['node-url']: if (not isinstance(self['node-url'], (bytes, str)) or not NODEURL_RE.match(self['node-url'])): msg = ("--node-url is required to be a string and look like " "\"http://HOSTNAMEORADDR:PORT\", not: %r" % (self['node-url'],)) raise usage.UsageError(msg) else: node_url_file = os.path.join(self['node-directory'], "node.url") with open(node_url_file, "r") as f: self['node-url'] = f.read().strip() if self['node-url'][-1] != "/": self['node-url'] += "/" aliases = get_aliases(self['node-directory']) if self['dir-cap']: aliases[DEFAULT_ALIAS] = self['dir-cap'] self.aliases = aliases # maps alias name to dircap class MakeDirectoryOptions(FileStoreOptions): optParameters = [ ("format", None, None, "Create a directory with the given format: SDMF or MDMF (case-insensitive)"), ] def parseArgs(self, where=""): self.where = argv_to_unicode(where) if self['format']: if self['format'].upper() not in ("SDMF", "MDMF"): raise usage.UsageError("%s is an invalid format" % self['format']) synopsis = "[options] [REMOTE_DIR]" description = """Create a new directory, either unlinked or as a subdirectory.""" class AddAliasOptions(FileStoreOptions): def parseArgs(self, alias, cap): self.alias = argv_to_unicode(alias) if self.alias.endswith(u':'): self.alias = self.alias[:-1] self.cap = cap synopsis = "[options] ALIAS[:] DIRCAP" description = """Add a new alias for an existing directory.""" class CreateAliasOptions(FileStoreOptions): def parseArgs(self, alias): self.alias = argv_to_unicode(alias) if self.alias.endswith(u':'): self.alias = self.alias[:-1] synopsis = "[options] ALIAS[:]" description = """Create a new directory and add an alias for it.""" class ListAliasesOptions(FileStoreOptions): synopsis = "[options]" description = """Display a table of all configured aliases.""" optFlags = [ ("readonly-uri", None, "Show read-only dircaps instead of readwrite"), ("json", None, "Show JSON output"), ] class ListOptions(FileStoreOptions): optFlags = [ ("long", "l", "Use long format: show file sizes, and timestamps."), ("uri", None, "Show file/directory URIs."), ("readonly-uri", None, "Show read-only file/directory URIs."), ("classify", "F", "Append '/' to directory names, and '*' to mutable."), ("json", None, "Show the raw JSON output."), ] def parseArgs(self, where=""): self.where = argv_to_unicode(where) synopsis = "[options] [PATH]" description = """ List the contents of some portion of the grid. If PATH is omitted, "tahoe:" is assumed. When the -l or --long option is used, each line is shown in the following format: drwx where each of the letters on the left may be replaced by '-'. If 'd' is present, it indicates that the object is a directory. If the 'd' is replaced by a '?', the object type is unknown. 'rwx' is a Unix-like permissions mask: if the mask includes 'w', then the object is writeable through its link in this directory (note that the link might be replaceable even if the object is not writeable through the current link). The 'x' is a legacy of Unix filesystems. In Tahoe it is used only to indicate that the contents of a directory can be listed. Directories have no size, so their size field is shown as '-'. Otherwise the size of the file, when known, is given in bytes. The size of mutable files or unknown objects is shown as '?'. The date/time shows when this link in the Tahoe grid was last modified. """ class GetOptions(FileStoreOptions): def parseArgs(self, arg1, arg2=None): # tahoe get FOO |less # write to stdout # tahoe get tahoe:FOO |less # same # tahoe get FOO bar # write to local file # tahoe get tahoe:FOO bar # same if arg2 == "-": arg2 = None self.from_file = argv_to_unicode(arg1) self.to_file = None if arg2 is None else argv_to_abspath(arg2) synopsis = "[options] REMOTE_FILE LOCAL_FILE" description = """ Retrieve a file from the grid and write it to the local filesystem. If LOCAL_FILE is omitted or '-', the contents of the file will be written to stdout.""" description_unwrapped = """ Examples: % tahoe get FOO |less # write to stdout % tahoe get tahoe:FOO |less # same % tahoe get FOO bar # write to local file % tahoe get tahoe:FOO bar # same """ class PutOptions(FileStoreOptions): optFlags = [ ("mutable", "m", "Create a mutable file instead of an immutable one (like --format=SDMF)"), ] optParameters = [ ("format", None, None, "Create a file with the given format: SDMF and MDMF for mutable, CHK (default) for immutable. (case-insensitive)"), ("private-key-path", None, None, "***Warning*** " "It is possible to use this option to spoil the normal security properties of mutable objects. " "It is also possible to corrupt or destroy data with this option. " "Most users will not need this option and can ignore it. " "For mutables only, " "this gives a file containing a PEM-encoded 2048 bit RSA private key to use as the signature key for the mutable. " "The private key must be handled at least as strictly as the resulting capability string. " "A single private key must not be used for more than one mutable." ), ] def parseArgs(self, arg1=None, arg2=None): # see Examples below if arg1 == "-": arg1 = None self.from_file = None if arg1 is None else argv_to_abspath(arg1) self.to_file = None if arg2 is None else argv_to_unicode(arg2) if self['format']: if self['format'].upper() not in ("SDMF", "MDMF", "CHK"): raise usage.UsageError("%s is an invalid format" % self['format']) synopsis = "[options] LOCAL_FILE REMOTE_FILE" description = """ Put a file into the grid, copying its contents from the local filesystem. If REMOTE_FILE is missing, upload the file but do not link it into a directory; also print the new filecap to stdout. If LOCAL_FILE is missing or '-', data will be copied from stdin. REMOTE_FILE is assumed to start with tahoe: unless otherwise specified. If the destination file already exists and is mutable, it will be modified in-place, whether or not --mutable is specified. (--mutable only affects creation of new files.) """ description_unwrapped = """ Examples: % cat FILE | tahoe put # create unlinked file from stdin % cat FILE | tahoe put - # same % tahoe put bar # create unlinked file from local 'bar' % cat FILE | tahoe put - FOO # create tahoe:FOO from stdin % tahoe put bar FOO # copy local 'bar' to tahoe:FOO % tahoe put bar tahoe:FOO # same % tahoe put bar MUTABLE-FILE-WRITECAP # modify the mutable file in-place """ class CpOptions(FileStoreOptions): optFlags = [ ("recursive", "r", "Copy source directory recursively."), ("verbose", "v", "Be noisy about what is happening."), ("caps-only", None, "When copying to local files, write out filecaps instead of actual " "data (only useful for debugging and tree-comparison purposes)."), ] def parseArgs(self, *args): if len(args) < 2: raise usage.UsageError("cp requires at least two arguments") self.sources = [argv_to_unicode(arg) for arg in args[:-1]] self.destination = argv_to_unicode(args[-1]) synopsis = "[options] FROM.. TO" description = """ Use 'tahoe cp' to copy files between a local filesystem and a Tahoe grid. Any FROM/TO arguments that begin with an alias indicate Tahoe-side files or non-file arguments. Directories will be copied recursively. New Tahoe-side directories will be created when necessary. Assuming that you have previously set up an alias 'home' with 'tahoe create-alias home', here are some examples: tahoe cp ~/foo.txt home: # creates tahoe-side home:foo.txt tahoe cp ~/foo.txt /tmp/bar.txt home: # copies two files to home: tahoe cp ~/Pictures home:stuff/my-pictures # copies directory recursively You can also use a dircap as either FROM or TO target: tahoe cp URI:DIR2-RO:ixqhc4kdbjxc7o65xjnveoewym:5x6lwoxghrd5rxhwunzavft2qygfkt27oj3fbxlq4c6p45z5uneq/blog.html ./ # copy Zooko's wiki page to a local file This command still has some limitations: symlinks and special files (device nodes, named pipes) are not handled very well. Arguments should not have trailing slashes (they are ignored for directory arguments, but trigger errors for file arguments). When copying directories, it can be unclear whether you mean to copy the contents of a source directory, or the source directory itself (i.e. whether the output goes under the target directory, or one directory lower). Tahoe's rule is that source directories with names are referring to the directory as a whole, and source directories without names (e.g. a raw dircap) are referring to the contents. """ class UnlinkOptions(FileStoreOptions): def parseArgs(self, where): self.where = argv_to_unicode(where) synopsis = "[options] REMOTE_FILE" description = "Remove a named file from its parent directory." class MvOptions(FileStoreOptions): def parseArgs(self, frompath, topath): self.from_file = argv_to_unicode(frompath) self.to_file = argv_to_unicode(topath) synopsis = "[options] FROM TO" description = """ Use 'tahoe mv' to move files that are already on the grid elsewhere on the grid, e.g., 'tahoe mv alias:some_file alias:new_file'. If moving a remote file into a remote directory, you'll need to append a '/' to the name of the remote directory, e.g., 'tahoe mv tahoe:file1 tahoe:dir/', not 'tahoe mv tahoe:file1 tahoe:dir'. Note that it is not possible to use this command to move local files to the grid -- use 'tahoe cp' for that. """ class LnOptions(FileStoreOptions): def parseArgs(self, frompath, topath): self.from_file = argv_to_unicode(frompath) self.to_file = argv_to_unicode(topath) synopsis = "[options] FROM_LINK TO_LINK" description = """ Use 'tahoe ln' to duplicate a link (directory entry) already on the grid to elsewhere on the grid. For example 'tahoe ln alias:some_file alias:new_file'. causes 'alias:new_file' to point to the same object that 'alias:some_file' points to. (The argument order is the same as Unix ln. To remember the order, you can think of this command as copying a link, rather than copying a file as 'tahoe cp' does. Then the argument order is consistent with that of 'tahoe cp'.) When linking a remote file into a remote directory, you'll need to append a '/' to the name of the remote directory, e.g. 'tahoe ln tahoe:file1 tahoe:dir/' (which is shorthand for 'tahoe ln tahoe:file1 tahoe:dir/file1'). If you forget the '/', e.g. 'tahoe ln tahoe:file1 tahoe:dir', the 'ln' command will refuse to overwrite the 'tahoe:dir' directory, and will exit with an error. Note that it is not possible to use this command to create links between local and remote files. """ class BackupConfigurationError(Exception): pass class BackupOptions(FileStoreOptions): optFlags = [ ("verbose", "v", "Be noisy about what is happening."), ("ignore-timestamps", None, "Do not use backupdb timestamps to decide whether a local file is unchanged."), ] vcs_patterns = ('CVS', 'RCS', 'SCCS', '.git', '.gitignore', '.cvsignore', '.svn', '.arch-ids','{arch}', '=RELEASE-ID', '=meta-update', '=update', '.bzr', '.bzrignore', '.bzrtags', '.hg', '.hgignore', '_darcs') def __init__(self): super(BackupOptions, self).__init__() self['exclude'] = set() def parseArgs(self, localdir, topath): self.from_dir = argv_to_abspath(localdir) self.to_dir = argv_to_unicode(topath) synopsis = "[options] FROM ALIAS:TO" def opt_exclude(self, pattern): """Ignore files matching a glob pattern. You may give multiple '--exclude' options.""" g = argv_to_unicode(pattern).strip() if g: exclude = self['exclude'] exclude.add(g) def opt_exclude_from_utf_8(self, filepath): """Ignore file matching glob patterns listed in file, one per line. The file is assumed to be in the argv encoding.""" abs_filepath = argv_to_abspath(filepath) try: exclude_file = open(abs_filepath, "r", encoding="utf-8") except Exception as e: raise BackupConfigurationError('Error opening exclude file %s. (Error: %s)' % ( quote_local_unicode_path(abs_filepath), e)) try: for line in exclude_file: self.opt_exclude(line) finally: exclude_file.close() def opt_exclude_vcs(self): """Exclude files and directories used by following version control systems: CVS, RCS, SCCS, Git, SVN, Arch, Bazaar(bzr), Mercurial, Darcs.""" for pattern in self.vcs_patterns: self.opt_exclude(pattern) def filter_listdir(self, listdir): """Yields non-excluded childpaths in path.""" exclude = self['exclude'] exclude_regexps = [re.compile(fnmatch.translate(pat)) for pat in exclude] for filename in listdir: for regexp in exclude_regexps: if regexp.match(filename): break else: yield filename description = """ Add a versioned backup of the local FROM directory to a timestamped subdirectory of the TO/Archives directory on the grid, sharing as many files and directories as possible with earlier backups. Create TO/Latest as a reference to the latest backup. Behaves somewhat like 'rsync -a --link-dest=TO/Archives/(previous) FROM TO/Archives/(new); ln -sf TO/Archives/(new) TO/Latest'.""" class WebopenOptions(FileStoreOptions): optFlags = [ ("info", "i", "Open the t=info page for the file"), ] def parseArgs(self, where=''): self.where = argv_to_unicode(where) synopsis = "[options] [ALIAS:PATH]" description = """ Open a web browser to the contents of some file or directory on the grid. When run without arguments, open the Welcome page.""" class ManifestOptions(FileStoreOptions): optFlags = [ ("storage-index", "s", "Only print storage index strings, not pathname+cap."), ("verify-cap", None, "Only print verifycap, not pathname+cap."), ("repair-cap", None, "Only print repaircap, not pathname+cap."), ("raw", "r", "Display raw JSON data instead of parsed."), ] def parseArgs(self, where=''): self.where = argv_to_unicode(where) synopsis = "[options] [ALIAS:PATH]" description = """ Print a list of all files and directories reachable from the given starting point.""" class StatsOptions(FileStoreOptions): optFlags = [ ("raw", "r", "Display raw JSON data instead of parsed"), ] def parseArgs(self, where=''): self.where = argv_to_unicode(where) synopsis = "[options] [ALIAS:PATH]" description = """ Print statistics about of all files and directories reachable from the given starting point.""" class CheckOptions(FileStoreOptions): optFlags = [ ("raw", None, "Display raw JSON data instead of parsed."), ("verify", None, "Verify all hashes, instead of merely querying share presence."), ("repair", None, "Automatically repair any problems found."), ("add-lease", None, "Add/renew lease on all shares."), ] def parseArgs(self, *locations): self.locations = list(map(argv_to_unicode, locations)) synopsis = "[options] [ALIAS:PATH]" description = """ Check a single file or directory: count how many shares are available and verify their hashes. Optionally repair the file if any problems were found.""" class DeepCheckOptions(FileStoreOptions): optFlags = [ ("raw", None, "Display raw JSON data instead of parsed."), ("verify", None, "Verify all hashes, instead of merely querying share presence."), ("repair", None, "Automatically repair any problems found."), ("add-lease", None, "Add/renew lease on all shares."), ("verbose", "v", "Be noisy about what is happening."), ] def parseArgs(self, *locations): self.locations = list(map(argv_to_unicode, locations)) synopsis = "[options] [ALIAS:PATH]" description = """ Check all files and directories reachable from the given starting point (which must be a directory), like 'tahoe check' but for multiple files. Optionally repair any problems found.""" subCommands : SubCommands = [ ("mkdir", None, MakeDirectoryOptions, "Create a new directory."), ("add-alias", None, AddAliasOptions, "Add a new alias cap."), ("create-alias", None, CreateAliasOptions, "Create a new alias cap."), ("list-aliases", None, ListAliasesOptions, "List all alias caps."), ("ls", None, ListOptions, "List a directory."), ("get", None, GetOptions, "Retrieve a file from the grid."), ("put", None, PutOptions, "Upload a file into the grid."), ("cp", None, CpOptions, "Copy one or more files or directories."), ("unlink", None, UnlinkOptions, "Unlink a file or directory on the grid."), ("mv", None, MvOptions, "Move a file within the grid."), ("ln", None, LnOptions, "Make an additional link to an existing file or directory."), ("backup", None, BackupOptions, "Make target dir look like local dir."), ("webopen", None, WebopenOptions, "Open a web browser to a grid file or directory."), ("manifest", None, ManifestOptions, "List all files/directories in a subtree."), ("stats", None, StatsOptions, "Print statistics about all files/directories in a subtree."), ("check", None, CheckOptions, "Check a single file or directory."), ("deep-check", None, DeepCheckOptions, "Check all files/directories reachable from a starting point."), ("status", None, TahoeStatusCommand, "Various status information."), ] def mkdir(options): from allmydata.scripts import tahoe_mkdir rc = tahoe_mkdir.mkdir(options) return rc def add_alias(options): from allmydata.scripts import tahoe_add_alias rc = tahoe_add_alias.add_alias(options) return rc def create_alias(options): from allmydata.scripts import tahoe_add_alias rc = tahoe_add_alias.create_alias(options) return rc def list_aliases(options): from allmydata.scripts import tahoe_add_alias rc = tahoe_add_alias.list_aliases(options) return rc def list_(options): from allmydata.scripts import tahoe_ls rc = tahoe_ls.ls(options) return rc def get(options): from allmydata.scripts import tahoe_get rc = tahoe_get.get(options) if rc == 0: if options.to_file is None: # be quiet, since the file being written to stdout should be # proof enough that it worked, unless the user is unlucky # enough to have picked an empty file pass else: print("%s retrieved and written to %s" % \ (options.from_file, options.to_file), file=options.stderr) return rc def put(options): from allmydata.scripts import tahoe_put rc = tahoe_put.put(options) return rc def cp(options): from allmydata.scripts import tahoe_cp rc = tahoe_cp.copy(options) return rc def unlink(options, command="unlink"): from allmydata.scripts import tahoe_unlink rc = tahoe_unlink.unlink(options, command=command) return rc def rm(options): return unlink(options, command="rm") def mv(options): from allmydata.scripts import tahoe_mv rc = tahoe_mv.mv(options, mode="move") return rc def ln(options): from allmydata.scripts import tahoe_mv rc = tahoe_mv.mv(options, mode="link") return rc def backup(options): from allmydata.scripts import tahoe_backup rc = tahoe_backup.backup(options) return rc def webopen(options, opener=None): from allmydata.scripts import tahoe_webopen rc = tahoe_webopen.webopen(options, opener=opener) return rc def manifest(options): from allmydata.scripts import tahoe_manifest rc = tahoe_manifest.manifest(options) return rc def stats(options): from allmydata.scripts import tahoe_manifest rc = tahoe_manifest.stats(options) return rc def check(options): from allmydata.scripts import tahoe_check rc = tahoe_check.check(options) return rc def deepcheck(options): from allmydata.scripts import tahoe_check rc = tahoe_check.deepcheck(options) return rc def status(options): from allmydata.scripts import tahoe_status return tahoe_status.do_status(options) dispatch = { "mkdir": mkdir, "add-alias": add_alias, "create-alias": create_alias, "list-aliases": list_aliases, "ls": list_, "get": get, "put": put, "cp": cp, "unlink": unlink, "rm": rm, "mv": mv, "ln": ln, "backup": backup, "webopen": webopen, "manifest": manifest, "stats": stats, "check": check, "deep-check": deepcheck, "status": status, } tahoe_lafs-1.20.0/src/allmydata/scripts/common.py0000644000000000000000000002401613615410400016712 0ustar00# coding: utf-8 """ Ported to Python 3. """ from typing import Union, Optional import os, sys, textwrap import codecs from os.path import join import urllib.parse from yaml import ( safe_dump, ) from twisted.python import usage from allmydata.util.assertutil import precondition from allmydata.util.encodingutil import quote_output, \ quote_local_unicode_path, argv_to_abspath from allmydata.scripts.default_nodedir import _default_nodedir from .types_ import Parameters def get_default_nodedir(): return _default_nodedir def wrap_paragraphs(text, width): # like textwrap.wrap(), but preserve paragraphs (delimited by double # newlines) and leading whitespace, and remove internal whitespace. text = textwrap.dedent(text) if text.startswith("\n"): text = text[1:] return "\n\n".join([textwrap.fill(paragraph, width=width) for paragraph in text.split("\n\n")]) class BaseOptions(usage.Options): def __init__(self): super(BaseOptions, self).__init__() self.command_name = os.path.basename(sys.argv[0]) # Only allow "tahoe --version", not e.g. "tahoe --version" def opt_version(self): raise usage.UsageError("--version not allowed on subcommands") description : Optional[str] = None description_unwrapped = None # type: Optional[str] def __str__(self): width = int(os.environ.get('COLUMNS', '80')) s = (self.getSynopsis() + '\n' + "(use 'tahoe --help' to view global options)\n" + '\n' + self.getUsage()) if self.description: s += '\n' + wrap_paragraphs(self.description, width) + '\n' if self.description_unwrapped: du = textwrap.dedent(self.description_unwrapped) if du.startswith("\n"): du = du[1:] s += '\n' + du + '\n' return s class BasedirOptions(BaseOptions): default_nodedir = _default_nodedir optParameters : Parameters = [ ["basedir", "C", None, "Specify which Tahoe base directory should be used. [default: %s]" % quote_local_unicode_path(_default_nodedir)], ] def parseArgs(self, basedir=None): # This finds the node-directory option correctly even if we are in a subcommand. root = self.parent while root.parent is not None: root = root.parent if root['node-directory'] and self['basedir']: raise usage.UsageError("The --node-directory (or -d) and --basedir (or -C) options cannot both be used.") if root['node-directory'] and basedir: raise usage.UsageError("The --node-directory (or -d) option and a basedir argument cannot both be used.") if self['basedir'] and basedir: raise usage.UsageError("The --basedir (or -C) option and a basedir argument cannot both be used.") if basedir: b = argv_to_abspath(basedir) elif self['basedir']: b = argv_to_abspath(self['basedir']) elif root['node-directory']: b = argv_to_abspath(root['node-directory']) elif self.default_nodedir: b = self.default_nodedir else: raise usage.UsageError("No default basedir available, you must provide one with --node-directory, --basedir, or a basedir argument") self['basedir'] = b self['node-directory'] = b def postOptions(self): if not self['basedir']: raise usage.UsageError("A base directory for the node must be provided.") class NoDefaultBasedirOptions(BasedirOptions): default_nodedir = None optParameters = [ ["basedir", "C", None, "Specify which Tahoe base directory should be used."], ] # type: Parameters # This is overridden in order to ensure we get a "Wrong number of arguments." # error when more than one argument is given. def parseArgs(self, basedir=None): BasedirOptions.parseArgs(self, basedir) def getSynopsis(self): return "Usage: %s [global-options] %s [options] NODEDIR" % (self.command_name, self.subcommand_name) DEFAULT_ALIAS = u"tahoe" def write_introducer(basedir, petname, furl): """ Overwrite the node's ``introducers.yaml`` with a file containing the given introducer information. """ if isinstance(furl, bytes): furl = furl.decode("utf-8") private = basedir.child(b"private") private.makedirs(ignoreExistingDirectory=True) private.child(b"introducers.yaml").setContent( safe_dump({ "introducers": { petname: { "furl": furl, }, }, }).encode("ascii"), ) def get_introducer_furl(nodedir, config): """ :return: the introducer FURL for the given node (no matter if it's a client-type node or an introducer itself) """ for petname, (furl, cache) in config.get_introducer_configuration().items(): return furl # We have no configured introducers. Maybe this is running *on* the # introducer? Let's guess, sure why not. try: with open(join(nodedir, "private", "introducer.furl"), "r") as f: return f.read().strip() except IOError: raise Exception( "Can't find introducer FURL in tahoe.cfg nor " "{}/private/introducer.furl".format(nodedir) ) def get_aliases(nodedir): aliases = {} aliasfile = os.path.join(nodedir, "private", "aliases") rootfile = os.path.join(nodedir, "private", "root_dir.cap") try: with open(rootfile, "r") as f: rootcap = f.read().strip() if rootcap: aliases[DEFAULT_ALIAS] = rootcap except EnvironmentError: pass try: with codecs.open(aliasfile, "r", "utf-8") as f: for line in f: line = line.strip() if line.startswith("#") or not line: continue name, cap = line.split(u":", 1) # normalize it: remove http: prefix, urldecode cap = cap.strip().encode('utf-8') aliases[name] = cap except EnvironmentError: pass return aliases class DefaultAliasMarker(object): pass pretend_platform_uses_lettercolon = False # for tests def platform_uses_lettercolon_drivename(): if ("win32" in sys.platform.lower() or "cygwin" in sys.platform.lower() or pretend_platform_uses_lettercolon): return True return False class TahoeError(Exception): def __init__(self, msg): Exception.__init__(self, msg) self.msg = msg def display(self, err): print(self.msg, file=err) class UnknownAliasError(TahoeError): def __init__(self, msg): TahoeError.__init__(self, "error: " + msg) def get_alias(aliases, path_unicode, default): """ Transform u"work:path/filename" into (aliases[u"work"], u"path/filename".encode('utf-8')). If default=None, then an empty alias is indicated by returning DefaultAliasMarker. We special-case strings with a recognized cap URI prefix, to make it easy to access specific files/directories by their caps. If the transformed alias is either not found in aliases, or is blank and default is not found in aliases, an UnknownAliasError is raised. """ precondition(isinstance(path_unicode, str), path_unicode) from allmydata import uri path = path_unicode.encode('utf-8').strip(b" ") if uri.has_uri_prefix(path): # We used to require "URI:blah:./foo" in order to get a subpath, # stripping out the ":./" sequence. We still allow that for compatibility, # but now also allow just "URI:blah/foo". sep = path.find(b":./") if sep != -1: return path[:sep], path[sep+3:] sep = path.find(b"/") if sep != -1: return path[:sep], path[sep+1:] return path, b"" colon = path.find(b":") if colon == -1: # no alias if default == None: return DefaultAliasMarker, path if default not in aliases: raise UnknownAliasError("No alias specified, and the default %s alias doesn't exist. " "To create it, use 'tahoe create-alias %s'." % (quote_output(default), quote_output(default, quotemarks=False))) return uri.from_string_dirnode(aliases[default]).to_string(), path if colon == 1 and default is None and platform_uses_lettercolon_drivename(): # treat C:\why\must\windows\be\so\weird as a local path, not a tahoe # file in the "C:" alias return DefaultAliasMarker, path # decoding must succeed because path is valid UTF-8 and colon & space are ASCII alias = path[:colon].decode('utf-8') if u"/" in alias: # no alias, but there's a colon in a dirname/filename, like # "foo/bar:7" if default == None: return DefaultAliasMarker, path if default not in aliases: raise UnknownAliasError("No alias specified, and the default %s alias doesn't exist. " "To create it, use 'tahoe create-alias %s'." % (quote_output(default), quote_output(default, quotemarks=False))) return uri.from_string_dirnode(aliases[default]).to_string(), path if alias not in aliases: raise UnknownAliasError("Unknown alias %s, please create it with 'tahoe add-alias' or 'tahoe create-alias'." % quote_output(alias)) return uri.from_string_dirnode(aliases[alias]).to_string(), path[colon+1:] def escape_path(path: Union[str, bytes]) -> str: """ Return path quoted to US-ASCII, valid URL characters. >>> path = u'/føö/bar/☃' >>> escaped = escape_path(path) >>> escaped u'/f%C3%B8%C3%B6/bar/%E2%98%83' """ if isinstance(path, str): path = path.encode("utf-8") segments = path.split(b"/") result = str( b"/".join([ urllib.parse.quote(s).encode("ascii") for s in segments ]), "ascii" ) return result tahoe_lafs-1.20.0/src/allmydata/scripts/common_http.py0000644000000000000000000000656413615410400017761 0ustar00""" Blocking HTTP client APIs. """ import os from io import BytesIO from http import client as http_client import urllib import allmydata # for __full_version__ from allmydata.util.encodingutil import quote_output from allmydata.scripts.common import TahoeError from socket import error as socket_error # copied from twisted/web/client.py def parse_url(url, defaultPort=None): url = url.strip() parsed = urllib.parse.urlparse(url) scheme = parsed[0] path = urllib.parse.urlunparse(('','')+parsed[2:]) if defaultPort is None: if scheme == 'https': defaultPort = 443 else: defaultPort = 80 host, port = parsed[1], defaultPort if ':' in host: host, port = host.split(':') port = int(port) if path == "": path = "/" return scheme, host, port, path class BadResponse(object): def __init__(self, url, err): self.status = -1 self.reason = "Error trying to connect to %s: %s" % (url, err) self.error = err def read(self, length=0): return "" def do_http(method, url, body=b""): if isinstance(body, bytes): body = BytesIO(body) elif isinstance(body, str): raise TypeError("do_http body must be a bytestring, not unicode") else: # We must give a Content-Length header to twisted.web, otherwise it # seems to get a zero-length file. I suspect that "chunked-encoding" # may fix this. assert body.tell assert body.seek assert body.read scheme, host, port, path = parse_url(url) # For testing purposes, allow setting a timeout on HTTP requests. If this # ever become a user-facing feature, this should probably be a CLI option? timeout = os.environ.get("__TAHOE_CLI_HTTP_TIMEOUT", None) if timeout is not None: timeout = float(timeout) if scheme == "http": c = http_client.HTTPConnection(host, port, timeout=timeout, blocksize=65536) elif scheme == "https": c = http_client.HTTPSConnection(host, port, timeout=timeout, blocksize=65536) else: raise ValueError("unknown scheme '%s', need http or https" % scheme) c.putrequest(method, path) c.putheader("Hostname", host) c.putheader("User-Agent", allmydata.__full_version__ + " (tahoe-client)") c.putheader("Accept", "text/plain, application/octet-stream") c.putheader("Connection", "close") old = body.tell() body.seek(0, os.SEEK_END) length = body.tell() body.seek(old) c.putheader("Content-Length", str(length)) try: c.endheaders() except socket_error as err: return BadResponse(url, err) while True: data = body.read(65536) if not data: break c.send(data) return c.getresponse() def format_http_success(resp): return quote_output( "%s %s" % (resp.status, resp.reason), quotemarks=False) def format_http_error(msg, resp): return quote_output( "%s: %s %s\n%r" % (msg, resp.status, resp.reason, resp.read()), quotemarks=False) def check_http_error(resp, stderr): if resp.status < 200 or resp.status >= 300: print(format_http_error("Error during HTTP request", resp), file=stderr) return 1 class HTTPError(TahoeError): def __init__(self, msg, resp): TahoeError.__init__(self, format_http_error(msg, resp)) tahoe_lafs-1.20.0/src/allmydata/scripts/create_node.py0000644000000000000000000005173213615410400017677 0ustar00 from __future__ import annotations from typing import Optional import io import os from allmydata.scripts.types_ import ( SubCommands, Parameters, Flags, ) from twisted.internet import reactor, defer from twisted.python.usage import UsageError from twisted.python.filepath import ( FilePath, ) from allmydata.scripts.common import ( BasedirOptions, NoDefaultBasedirOptions, write_introducer, ) from allmydata.scripts.default_nodedir import _default_nodedir from allmydata.util import dictutil from allmydata.util.assertutil import precondition from allmydata.util.encodingutil import listdir_unicode, argv_to_unicode, quote_local_unicode_path, get_io_encoding i2p_provider: Listener tor_provider: Listener from allmydata.util import fileutil, i2p_provider, tor_provider, jsonbytes as json from ..listeners import ListenerConfig, Listener, TCPProvider, StaticProvider def _get_listeners() -> dict[str, Listener]: """ Get all of the kinds of listeners we might be able to use. """ return { "tor": tor_provider, "i2p": i2p_provider, "tcp": TCPProvider(), "none": StaticProvider( available=True, hide_ip=False, config=defer.succeed(None), # This is supposed to be an IAddressFamily but we have none for # this kind of provider. We could implement new client and server # endpoint types that always fail and pass an IAddressFamily here # that uses those. Nothing would ever even ask for them (at # least, yet), let alone try to use them, so that's a lot of extra # work for no practical result so I'm not doing it now. address=None, # type: ignore[arg-type] ), } _LISTENERS = _get_listeners() dummy_tac = """ import sys print("Nodes created by Tahoe-LAFS v1.11.0 or later cannot be run by") print("releases of Tahoe-LAFS before v1.10.0.") sys.exit(1) """ def write_tac(basedir, nodetype): fileutil.write(os.path.join(basedir, "tahoe-%s.tac" % (nodetype,)), dummy_tac) WHERE_OPTS : Parameters = [ ("location", None, None, "Server location to advertise (e.g. tcp:example.org:12345)"), ("port", None, None, "Server endpoint to listen on (e.g. tcp:12345, or tcp:12345:interface=127.0.0.1."), ("hostname", None, None, "Hostname to automatically set --location/--port when --listen=tcp"), ("listen", None, "tcp", "Comma-separated list of listener types (tcp,tor,i2p,none)."), ] TOR_OPTS : Parameters = [ ("tor-control-port", None, None, "Tor's control port endpoint descriptor string (e.g. tcp:127.0.0.1:9051 or unix:/var/run/tor/control)"), ("tor-executable", None, None, "The 'tor' executable to run (default is to search $PATH)."), ] TOR_FLAGS : Flags = [ ("tor-launch", None, "Launch a tor instead of connecting to a tor control port."), ] I2P_OPTS : Parameters = [ ("i2p-sam-port", None, None, "I2P's SAM API port endpoint descriptor string (e.g. tcp:127.0.0.1:7656)"), ("i2p-executable", None, None, "(future) The 'i2prouter' executable to run (default is to search $PATH)."), ] I2P_FLAGS : Flags = [ ("i2p-launch", None, "(future) Launch an I2P router instead of connecting to a SAM API port."), ] def validate_where_options(o): if o['listen'] == "none": # no other arguments are accepted if o['hostname']: raise UsageError("--hostname cannot be used when --listen=none") if o['port'] or o['location']: raise UsageError("--port/--location cannot be used when --listen=none") # --location and --port: overrides all others, rejects all others if o['location'] and not o['port']: raise UsageError("--location must be used with --port") if o['port'] and not o['location']: raise UsageError("--port must be used with --location") if o['location'] and o['port']: if o['hostname']: raise UsageError("--hostname cannot be used with --location/--port") # TODO: really, we should reject an explicit --listen= option (we # want them to omit it entirely, because --location/--port would # override anything --listen= might allocate). For now, just let it # pass, because that allows us to use --listen=tcp as the default in # optParameters, which (I think) gets included in the rendered --help # output, which is useful. In the future, let's reconsider the value # of that --help text (or achieve that documentation in some other # way), change the default to None, complain here if it's not None, # then change parseArgs() to transform the None into "tcp" else: # no --location and --port? expect --listen= (maybe the default), and # --listen=tcp requires --hostname. But --listen=none is special. if o['listen'] != "none" and o.get('join', None) is None: listeners = o['listen'].split(",") for l in listeners: if l not in _LISTENERS: raise UsageError( "--listen= must be one/some of: " f"{', '.join(sorted(_LISTENERS))}", ) if 'tcp' in listeners and not o['hostname']: raise UsageError("--listen=tcp requires --hostname=") if 'tcp' not in listeners and o['hostname']: raise UsageError("--listen= must be tcp to use --hostname") def validate_tor_options(o): use_tor = "tor" in o["listen"].split(",") if use_tor or any((o["tor-launch"], o["tor-control-port"])): if not _LISTENERS["tor"].is_available(): raise UsageError( "Specifying any Tor options requires the 'txtorcon' module" ) if not use_tor: if o["tor-launch"]: raise UsageError("--tor-launch requires --listen=tor") if o["tor-control-port"]: raise UsageError("--tor-control-port= requires --listen=tor") if o["tor-launch"] and o["tor-control-port"]: raise UsageError("use either --tor-launch or --tor-control-port=, not both") def validate_i2p_options(o): use_i2p = "i2p" in o["listen"].split(",") if use_i2p or any((o["i2p-launch"], o["i2p-sam-port"])): if not _LISTENERS["i2p"].is_available(): raise UsageError( "Specifying any I2P options requires the 'txi2p' module" ) if not use_i2p: if o["i2p-launch"]: raise UsageError("--i2p-launch requires --listen=i2p") if o["i2p-sam-port"]: raise UsageError("--i2p-sam-port= requires --listen=i2p") if o["i2p-launch"] and o["i2p-sam-port"]: raise UsageError("use either --i2p-launch or --i2p-sam-port=, not both") if o["i2p-launch"]: raise UsageError("--i2p-launch is under development") class _CreateBaseOptions(BasedirOptions): optFlags = [ ("hide-ip", None, "prohibit any configuration that would reveal the node's IP address"), ] def postOptions(self): super(_CreateBaseOptions, self).postOptions() if self['hide-ip']: ip_hiders = dictutil.filter(lambda v: v.can_hide_ip(), _LISTENERS) available = dictutil.filter(lambda v: v.is_available(), ip_hiders) if not available: raise UsageError( "--hide-ip was specified but no IP-hiding listener is installed.\n" "Try one of these:\n" + "".join([ f"\tpip install tahoe-lafs[{name}]\n" for name in ip_hiders ]) ) class CreateClientOptions(_CreateBaseOptions): synopsis = "[options] [NODEDIR]" description = "Create a client-only Tahoe-LAFS node (no storage server)." optParameters = [ # we provide 'create-node'-time options for the most common # configuration knobs. The rest can be controlled by editing # tahoe.cfg before node startup. ("nickname", "n", None, "Specify the nickname for this node."), ("introducer", "i", None, "Specify the introducer FURL to use."), ("webport", "p", "tcp:3456:interface=127.0.0.1", "Specify which TCP port to run the HTTP interface on. Use 'none' to disable."), ("basedir", "C", None, "Specify which Tahoe base directory should be used. This has the same effect as the global --node-directory option. [default: %s]" % quote_local_unicode_path(_default_nodedir)), ("shares-needed", None, 3, "Needed shares required for uploaded files."), ("shares-happy", None, 7, "How many servers new files must be placed on."), ("shares-total", None, 10, "Total shares required for uploaded files."), ("join", None, None, "Join a grid with the given Invite Code."), ] # type: Parameters # This is overridden in order to ensure we get a "Wrong number of # arguments." error when more than one argument is given. def parseArgs(self, basedir=None): BasedirOptions.parseArgs(self, basedir) for name in ["shares-needed", "shares-happy", "shares-total"]: try: int(self[name]) except ValueError: raise UsageError( "--{} must be an integer".format(name) ) class CreateNodeOptions(CreateClientOptions): optFlags = [ ("no-storage", None, "Do not offer storage service to other nodes."), ("helper", None, "Enable helper"), ] + TOR_FLAGS + I2P_FLAGS synopsis = "[options] [NODEDIR]" description = "Create a full Tahoe-LAFS node (client+server)." optParameters = [ ("storage-dir", None, None, "Path where the storage will be placed."), ] + CreateClientOptions.optParameters + WHERE_OPTS + TOR_OPTS + I2P_OPTS def parseArgs(self, basedir=None): CreateClientOptions.parseArgs(self, basedir) validate_where_options(self) validate_tor_options(self) validate_i2p_options(self) class CreateIntroducerOptions(NoDefaultBasedirOptions): subcommand_name = "create-introducer" description = "Create a Tahoe-LAFS introducer." optFlags = [ ("hide-ip", None, "prohibit any configuration that would reveal the node's IP address"), ] + TOR_FLAGS + I2P_FLAGS optParameters = NoDefaultBasedirOptions.optParameters + WHERE_OPTS + TOR_OPTS + I2P_OPTS def parseArgs(self, basedir=None): NoDefaultBasedirOptions.parseArgs(self, basedir) validate_where_options(self) validate_tor_options(self) validate_i2p_options(self) def merge_config( left: Optional[ListenerConfig], right: Optional[ListenerConfig], ) -> Optional[ListenerConfig]: """ Merge two listener configurations into one configuration representing both of them. If either is ``None`` then the result is ``None``. This supports the "disable listeners" functionality. :raise ValueError: If the keys in the node configs overlap. """ if left is None or right is None: return None overlap = set(left.node_config) & set(right.node_config) if overlap: raise ValueError(f"Node configs overlap: {overlap}") return ListenerConfig( list(left.tub_ports) + list(right.tub_ports), list(left.tub_locations) + list(right.tub_locations), dict(list(left.node_config.items()) + list(right.node_config.items())), ) async def write_node_config(c, config): # this is shared between clients and introducers c.write("# -*- mode: conf; coding: {c.encoding} -*-\n".format(c=c)) c.write("\n") c.write("# This file controls the configuration of the Tahoe node that\n") c.write("# lives in this directory. It is only read at node startup.\n") c.write("# For details about the keys that can be set here, please\n") c.write("# read the 'docs/configuration.rst' file that came with your\n") c.write("# Tahoe installation.\n") c.write("\n\n") if config["hide-ip"]: c.write("[connections]\n") if _LISTENERS["tor"].is_available(): c.write("tcp = tor\n") else: # XXX What about i2p? c.write("tcp = disabled\n") c.write("\n") c.write("[node]\n") nickname = argv_to_unicode(config.get("nickname") or "") c.write("nickname = %s\n" % (nickname,)) if config["hide-ip"]: c.write("reveal-IP-address = false\n") else: c.write("reveal-IP-address = true\n") # TODO: validate webport webport = argv_to_unicode(config.get("webport") or "none") if webport.lower() == "none": webport = "" c.write("web.port = %s\n" % (webport,)) c.write("web.static = public_html\n") listener_config = ListenerConfig([], [], {}) for listener_name in config['listen'].split(","): listener = _LISTENERS[listener_name] listener_config = merge_config( (await listener.create_config(reactor, config)), listener_config, ) if listener_config is None: tub_ports = ["disabled"] tub_locations = ["disabled"] else: tub_ports = listener_config.tub_ports tub_locations = listener_config.tub_locations c.write("tub.port = %s\n" % ",".join(tub_ports)) c.write("tub.location = %s\n" % ",".join(tub_locations)) c.write("\n") c.write("#log_gatherer.furl =\n") c.write("#timeout.keepalive =\n") c.write("#timeout.disconnect =\n") c.write("#ssh.port = 8022\n") c.write("#ssh.authorized_keys_file = ~/.ssh/authorized_keys\n") c.write("\n") if listener_config is not None: for section, items in listener_config.node_config.items(): c.write(f"[{section}]\n") for k, v in items: c.write(f"{k} = {v}\n") c.write("\n") def write_client_config(c, config): introducer = config.get("introducer", None) if introducer is not None: write_introducer( FilePath(config["basedir"]), "default", introducer, ) c.write("[client]\n") c.write("helper.furl =\n") c.write("\n") c.write("# Encoding parameters this client will use for newly-uploaded files\n") c.write("# This can be changed at any time: the encoding is saved in\n") c.write("# each filecap, and we can download old files with any encoding\n") c.write("# settings\n") c.write("shares.needed = {}\n".format(config['shares-needed'])) c.write("shares.happy = {}\n".format(config['shares-happy'])) c.write("shares.total = {}\n".format(config['shares-total'])) c.write("\n") boolstr = {True:"true", False:"false"} c.write("[storage]\n") c.write("# Shall this node provide storage service?\n") storage_enabled = not config.get("no-storage", None) c.write("enabled = %s\n" % boolstr[storage_enabled]) c.write("#readonly =\n") c.write("reserved_space = 1G\n") storage_dir = config.get("storage-dir") if storage_dir: c.write("storage_dir = %s\n" % (storage_dir,)) else: c.write("#storage_dir =\n") c.write("#expire.enabled =\n") c.write("#expire.mode =\n") c.write("\n") c.write("[helper]\n") c.write("# Shall this node run a helper service that clients can use?\n") if config.get("helper"): c.write("enabled = true\n") else: c.write("enabled = false\n") c.write("\n") @defer.inlineCallbacks def _get_config_via_wormhole(config): out = config.stdout print("Opening wormhole with code '{}'".format(config['join']), file=out) relay_url = config.parent['wormhole-server'] print("Connecting to '{}'".format(relay_url), file=out) wh = config.parent.wormhole.create( appid=config.parent['wormhole-invite-appid'], relay_url=relay_url, reactor=reactor, ) code = str(config['join']) wh.set_code(code) yield wh.get_welcome() print("Connected to wormhole server", file=out) intro = { u"abilities": { "client-v1": {}, } } wh.send_message(json.dumps_bytes(intro)) server_intro = yield wh.get_message() server_intro = json.loads(server_intro) print(" received server introduction", file=out) if u'abilities' not in server_intro: raise RuntimeError(" Expected 'abilities' in server introduction") if u'server-v1' not in server_intro['abilities']: raise RuntimeError(" Expected 'server-v1' in server abilities") remote_data = yield wh.get_message() print(" received configuration", file=out) defer.returnValue(json.loads(remote_data)) @defer.inlineCallbacks def create_node(config): out = config.stdout err = config.stderr basedir = config['basedir'] # This should always be called with an absolute Unicode basedir. precondition(isinstance(basedir, str), basedir) if os.path.exists(basedir): if listdir_unicode(basedir): print("The base directory %s is not empty." % quote_local_unicode_path(basedir), file=err) print("To avoid clobbering anything, I am going to quit now.", file=err) print("Please use a different directory, or empty this one.", file=err) defer.returnValue(-1) # we're willing to use an empty directory else: os.mkdir(basedir) write_tac(basedir, "client") # if we're doing magic-wormhole stuff, do it now if config['join'] is not None: try: remote_config = yield _get_config_via_wormhole(config) except RuntimeError as e: print(str(e), file=err) defer.returnValue(1) # configuration we'll allow the inviter to set whitelist = [ 'shares-happy', 'shares-needed', 'shares-total', 'introducer', 'nickname', ] sensitive_keys = ['introducer'] print("Encoding: {shares-needed} of {shares-total} shares, on at least {shares-happy} servers".format(**remote_config), file=out) print("Overriding the following config:", file=out) for k in whitelist: v = remote_config.get(k, None) if v is not None: # we're faking usually argv-supplied options :/ v_orig = v if isinstance(v, str): v = v.encode(get_io_encoding()) config[k] = v if k not in sensitive_keys: if k not in ['shares-happy', 'shares-total', 'shares-needed']: print(" {}: {}".format(k, v_orig), file=out) else: print(" {}: [sensitive data; see tahoe.cfg]".format(k), file=out) fileutil.make_dirs(os.path.join(basedir, "private"), 0o700) cfg_name = os.path.join(basedir, "tahoe.cfg") with io.open(cfg_name, "w", encoding='utf-8') as c: yield defer.Deferred.fromCoroutine(write_node_config(c, config)) write_client_config(c, config) print("Node created in %s" % quote_local_unicode_path(basedir), file=out) tahoe_cfg = quote_local_unicode_path(os.path.join(basedir, "tahoe.cfg")) introducers_yaml = quote_local_unicode_path( os.path.join(basedir, "private", "introducers.yaml"), ) if not config.get("introducer", ""): print(" Please add introducers to %s!" % (introducers_yaml,), file=out) print(" The node cannot connect to a grid without it.", file=out) if not config.get("nickname", ""): print(" Please set [node]nickname= in %s" % tahoe_cfg, file=out) defer.returnValue(0) def create_client(config): config['no-storage'] = True config['listen'] = "none" return create_node(config) @defer.inlineCallbacks def create_introducer(config): out = config.stdout err = config.stderr basedir = config['basedir'] # This should always be called with an absolute Unicode basedir. precondition(isinstance(basedir, str), basedir) if os.path.exists(basedir): if listdir_unicode(basedir): print("The base directory %s is not empty." % quote_local_unicode_path(basedir), file=err) print("To avoid clobbering anything, I am going to quit now.", file=err) print("Please use a different directory, or empty this one.", file=err) defer.returnValue(-1) # we're willing to use an empty directory else: os.mkdir(basedir) write_tac(basedir, "introducer") fileutil.make_dirs(os.path.join(basedir, "private"), 0o700) cfg_name = os.path.join(basedir, "tahoe.cfg") with io.open(cfg_name, "w", encoding='utf-8') as c: yield defer.Deferred.fromCoroutine(write_node_config(c, config)) print("Introducer created in %s" % quote_local_unicode_path(basedir), file=out) defer.returnValue(0) subCommands : SubCommands = [ ("create-node", None, CreateNodeOptions, "Create a node that acts as a client, server or both."), ("create-client", None, CreateClientOptions, "Create a client node (with storage initially disabled)."), ("create-introducer", None, CreateIntroducerOptions, "Create an introducer node."), ] dispatch = { "create-node": create_node, "create-client": create_client, "create-introducer": create_introducer, } tahoe_lafs-1.20.0/src/allmydata/scripts/debug.py0000644000000000000000000012350413615410400016512 0ustar00""" Ported to Python 3. """ from future.utils import bchr import struct, time, os, sys from twisted.python import usage, failure from twisted.internet import defer from foolscap.logging import cli as foolscap_cli from allmydata.scripts.common import BaseOptions from allmydata import uri from allmydata.storage.mutable import MutableShareFile from allmydata.storage.immutable import ShareFile from allmydata.mutable.layout import unpack_share from allmydata.mutable.layout import MDMFSlotReadProxy from allmydata.mutable.common import NeedMoreDataError from allmydata.immutable.layout import ReadBucketProxy from allmydata.util import base32 from allmydata.util.encodingutil import quote_output from allmydata.scripts.types_ import SubCommands class DumpOptions(BaseOptions): def getSynopsis(self): return "Usage: tahoe [global-options] debug dump-share SHARE_FILENAME" optFlags = [ ["offsets", None, "Display a table of section offsets."], ["leases-only", None, "Dump leases but not CHK contents."], ] description = """ Print lots of information about the given share, by parsing the share's contents. This includes share type, lease information, encoding parameters, hash-tree roots, public keys, and segment sizes. This command also emits a verify-cap for the file that uses the share. tahoe debug dump-share testgrid/node-3/storage/shares/4v/4vozh77tsrw7mdhnj7qvp5ky74/0 """ def parseArgs(self, filename): from allmydata.util.encodingutil import argv_to_abspath self['filename'] = argv_to_abspath(filename) def dump_share(options): from allmydata.storage.mutable import MutableShareFile from allmydata.util.encodingutil import quote_output out = options.stdout # check the version, to see if we have a mutable or immutable share print("share filename: %s" % quote_output(options['filename']), file=out) with open(options['filename'], "rb") as f: if MutableShareFile.is_valid_header(f.read(32)): return dump_mutable_share(options) # otherwise assume it's immutable return dump_immutable_share(options) def dump_immutable_share(options): from allmydata.storage.immutable import ShareFile out = options.stdout f = ShareFile(options['filename']) if not options["leases-only"]: dump_immutable_chk_share(f, out, options) dump_immutable_lease_info(f, out) print(file=out) return 0 def dump_immutable_chk_share(f, out, options): from allmydata import uri from allmydata.util import base32 from allmydata.immutable.layout import ReadBucketProxy from allmydata.util.encodingutil import quote_output, to_bytes # use a ReadBucketProxy to parse the bucket and find the uri extension bp = ReadBucketProxy(None, None, '') offsets = bp._parse_offsets(f.read_share_data(0, 0x44)) print("%20s: %d" % ("version", bp._version), file=out) seek = offsets['uri_extension'] length = struct.unpack(bp._fieldstruct, f.read_share_data(seek, bp._fieldsize))[0] seek += bp._fieldsize UEB_data = f.read_share_data(seek, length) unpacked = uri.unpack_extension_readable(UEB_data) keys1 = ("size", "num_segments", "segment_size", "needed_shares", "total_shares") keys2 = ("codec_name", "codec_params", "tail_codec_params") keys3 = ("plaintext_hash", "plaintext_root_hash", "crypttext_hash", "crypttext_root_hash", "share_root_hash", "UEB_hash") display_keys = {"size": "file_size"} def to_string(v): if isinstance(v, bytes): return str(v, "utf-8") else: return str(v) for k in keys1: if k in unpacked: dk = display_keys.get(k, k) print("%20s: %s" % (dk, to_string(unpacked[k])), file=out) print(file=out) for k in keys2: if k in unpacked: dk = display_keys.get(k, k) print("%20s: %s" % (dk, to_string(unpacked[k])), file=out) print(file=out) for k in keys3: if k in unpacked: dk = display_keys.get(k, k) print("%20s: %s" % (dk, to_string(unpacked[k])), file=out) leftover = set(unpacked.keys()) - set(keys1 + keys2 + keys3) if leftover: print(file=out) print("LEFTOVER:", file=out) for k in sorted(leftover): print("%20s: %s" % (k, to_string(unpacked[k])), file=out) # the storage index isn't stored in the share itself, so we depend upon # knowing the parent directory name to get it pieces = options['filename'].split(os.sep) if len(pieces) >= 2: piece = to_bytes(pieces[-2]) if base32.could_be_base32_encoded(piece): storage_index = base32.a2b(piece) uri_extension_hash = base32.a2b(unpacked["UEB_hash"]) u = uri.CHKFileVerifierURI(storage_index, uri_extension_hash, unpacked["needed_shares"], unpacked["total_shares"], unpacked["size"]) verify_cap = u.to_string() print("%20s: %s" % ("verify-cap", quote_output(verify_cap, quotemarks=False)), file=out) sizes = {} sizes['data'] = (offsets['plaintext_hash_tree'] - offsets['data']) sizes['validation'] = (offsets['uri_extension'] - offsets['plaintext_hash_tree']) sizes['uri-extension'] = len(UEB_data) print(file=out) print(" Size of data within the share:", file=out) for k in sorted(sizes): print("%20s: %s" % (k, sizes[k]), file=out) if options['offsets']: print(file=out) print(" Section Offsets:", file=out) print("%20s: %s" % ("share data", f._data_offset), file=out) for k in ["data", "plaintext_hash_tree", "crypttext_hash_tree", "block_hashes", "share_hashes", "uri_extension"]: name = {"data": "block data"}.get(k,k) offset = f._data_offset + offsets[k] print(" %20s: %s (0x%x)" % (name, offset, offset), file=out) print("%20s: %s" % ("leases", f._lease_offset), file=out) def dump_immutable_lease_info(f, out): # display lease information too print(file=out) leases = list(f.get_leases()) if leases: for i,lease in enumerate(leases): when = format_expiration_time(lease.get_expiration_time()) print(" Lease #%d: owner=%d, expire in %s" \ % (i, lease.owner_num, when), file=out) else: print(" No leases.", file=out) def format_expiration_time(expiration_time): now = time.time() remains = expiration_time - now when = "%ds" % remains if remains > 24*3600: when += " (%d days)" % (remains // (24*3600)) elif remains > 3600: when += " (%d hours)" % (remains // 3600) return when def dump_mutable_share(options): from allmydata.storage.mutable import MutableShareFile from allmydata.util import base32, idlib out = options.stdout m = MutableShareFile(options['filename']) f = open(options['filename'], "rb") WE, nodeid = m._read_write_enabler_and_nodeid(f) num_extra_leases = m._read_num_extra_leases(f) data_length = m._read_data_length(f) extra_lease_offset = m._read_extra_lease_offset(f) container_size = extra_lease_offset - m.DATA_OFFSET leases = list(m._enumerate_leases(f)) share_type = "unknown" f.seek(m.DATA_OFFSET) version = f.read(1) if version == b"\x00": # this slot contains an SMDF share share_type = "SDMF" elif version == b"\x01": share_type = "MDMF" f.close() print(file=out) print("Mutable slot found:", file=out) print(" share_type: %s" % share_type, file=out) print(" write_enabler: %s" % str(base32.b2a(WE), "utf-8"), file=out) print(" WE for nodeid: %s" % idlib.nodeid_b2a(nodeid), file=out) print(" num_extra_leases: %d" % num_extra_leases, file=out) print(" container_size: %d" % container_size, file=out) print(" data_length: %d" % data_length, file=out) if leases: for (leasenum, lease) in leases: print(file=out) print(" Lease #%d:" % leasenum, file=out) print(" ownerid: %d" % lease.owner_num, file=out) when = format_expiration_time(lease.get_expiration_time()) print(" expires in %s" % when, file=out) print(" renew_secret: %s" % lease.present_renew_secret(), file=out) print(" cancel_secret: %s" % lease.present_cancel_secret(), file=out) print(" secrets are for nodeid: %s" % idlib.nodeid_b2a(lease.nodeid), file=out) else: print("No leases.", file=out) print(file=out) if share_type == "SDMF": dump_SDMF_share(m, data_length, options) elif share_type == "MDMF": dump_MDMF_share(m, data_length, options) return 0 def dump_SDMF_share(m, length, options): from allmydata.mutable.layout import unpack_share, unpack_header from allmydata.mutable.common import NeedMoreDataError from allmydata.util import base32, hashutil from allmydata.uri import SSKVerifierURI from allmydata.util.encodingutil import quote_output, to_bytes offset = m.DATA_OFFSET out = options.stdout f = open(options['filename'], "rb") f.seek(offset) data = f.read(min(length, 2000)) f.close() try: pieces = unpack_share(data) except NeedMoreDataError as e: # retry once with the larger size size = e.needed_bytes f = open(options['filename'], "rb") f.seek(offset) data = f.read(min(length, size)) f.close() pieces = unpack_share(data) (seqnum, root_hash, IV, k, N, segsize, datalen, pubkey, signature, share_hash_chain, block_hash_tree, share_data, enc_privkey) = pieces (ig_version, ig_seqnum, ig_roothash, ig_IV, ig_k, ig_N, ig_segsize, ig_datalen, offsets) = unpack_header(data) print(" SDMF contents:", file=out) print(" seqnum: %d" % seqnum, file=out) print(" root_hash: %s" % str(base32.b2a(root_hash), "utf-8"), file=out) print(" IV: %s" % str(base32.b2a(IV), "utf-8"), file=out) print(" required_shares: %d" % k, file=out) print(" total_shares: %d" % N, file=out) print(" segsize: %d" % segsize, file=out) print(" datalen: %d" % datalen, file=out) print(" enc_privkey: %d bytes" % len(enc_privkey), file=out) print(" pubkey: %d bytes" % len(pubkey), file=out) print(" signature: %d bytes" % len(signature), file=out) share_hash_ids = ",".join(sorted([str(hid) for hid in share_hash_chain.keys()])) print(" share_hash_chain: %s" % share_hash_ids, file=out) print(" block_hash_tree: %d nodes" % len(block_hash_tree), file=out) # the storage index isn't stored in the share itself, so we depend upon # knowing the parent directory name to get it pieces = options['filename'].split(os.sep) if len(pieces) >= 2: piece = to_bytes(pieces[-2]) if base32.could_be_base32_encoded(piece): storage_index = base32.a2b(piece) fingerprint = hashutil.ssk_pubkey_fingerprint_hash(pubkey) u = SSKVerifierURI(storage_index, fingerprint) verify_cap = u.to_string() print(" verify-cap:", quote_output(verify_cap, quotemarks=False), file=out) if options['offsets']: # NOTE: this offset-calculation code is fragile, and needs to be # merged with MutableShareFile's internals. print(file=out) print(" Section Offsets:", file=out) def printoffset(name, value, shift=0): print("%s%20s: %s (0x%x)" % (" "*shift, name, value, value), file=out) printoffset("first lease", m.HEADER_SIZE) printoffset("share data", m.DATA_OFFSET) o_seqnum = m.DATA_OFFSET + struct.calcsize(">B") printoffset("seqnum", o_seqnum, 2) o_root_hash = m.DATA_OFFSET + struct.calcsize(">BQ") printoffset("root_hash", o_root_hash, 2) for k in ["signature", "share_hash_chain", "block_hash_tree", "share_data", "enc_privkey", "EOF"]: name = {"share_data": "block data", "EOF": "end of share data"}.get(k,k) offset = m.DATA_OFFSET + offsets[k] printoffset(name, offset, 2) f = open(options['filename'], "rb") printoffset("extra leases", m._read_extra_lease_offset(f) + 4) f.close() print(file=out) def dump_MDMF_share(m, length, options): from allmydata.mutable.layout import MDMFSlotReadProxy from allmydata.util import base32, hashutil from allmydata.uri import MDMFVerifierURI from allmydata.util.encodingutil import quote_output, to_bytes offset = m.DATA_OFFSET out = options.stdout f = open(options['filename'], "rb") storage_index = None; shnum = 0 class ShareDumper(MDMFSlotReadProxy): def _read(self, readvs, force_remote=False, queue=False): data = [] for (where,length) in readvs: f.seek(offset+where) data.append(f.read(length)) return defer.succeed({shnum: data}) p = ShareDumper(None, storage_index, shnum) def extract(func): stash = [] # these methods return Deferreds, but we happen to know that they run # synchronously when not actually talking to a remote server d = func() d.addCallback(stash.append) return stash[0] verinfo = extract(p.get_verinfo) encprivkey = extract(p.get_encprivkey) signature = extract(p.get_signature) pubkey = extract(p.get_verification_key) block_hash_tree = extract(p.get_blockhashes) share_hash_chain = extract(p.get_sharehashes) f.close() (seqnum, root_hash, salt_to_use, segsize, datalen, k, N, prefix, offsets) = verinfo print(" MDMF contents:", file=out) print(" seqnum: %d" % seqnum, file=out) print(" root_hash: %s" % str(base32.b2a(root_hash), "utf-8"), file=out) #print(" IV: %s" % base32.b2a(IV), file=out) print(" required_shares: %d" % k, file=out) print(" total_shares: %d" % N, file=out) print(" segsize: %d" % segsize, file=out) print(" datalen: %d" % datalen, file=out) print(" enc_privkey: %d bytes" % len(encprivkey), file=out) print(" pubkey: %d bytes" % len(pubkey), file=out) print(" signature: %d bytes" % len(signature), file=out) share_hash_ids = ",".join([str(hid) for hid in sorted(share_hash_chain.keys())]) print(" share_hash_chain: %s" % share_hash_ids, file=out) print(" block_hash_tree: %d nodes" % len(block_hash_tree), file=out) # the storage index isn't stored in the share itself, so we depend upon # knowing the parent directory name to get it pieces = options['filename'].split(os.sep) if len(pieces) >= 2: piece = to_bytes(pieces[-2]) if base32.could_be_base32_encoded(piece): storage_index = base32.a2b(piece) fingerprint = hashutil.ssk_pubkey_fingerprint_hash(pubkey) u = MDMFVerifierURI(storage_index, fingerprint) verify_cap = u.to_string() print(" verify-cap:", quote_output(verify_cap, quotemarks=False), file=out) if options['offsets']: # NOTE: this offset-calculation code is fragile, and needs to be # merged with MutableShareFile's internals. print(file=out) print(" Section Offsets:", file=out) def printoffset(name, value, shift=0): print("%s%.20s: %s (0x%x)" % (" "*shift, name, value, value), file=out) printoffset("first lease", m.HEADER_SIZE, 2) printoffset("share data", m.DATA_OFFSET, 2) o_seqnum = m.DATA_OFFSET + struct.calcsize(">B") printoffset("seqnum", o_seqnum, 4) o_root_hash = m.DATA_OFFSET + struct.calcsize(">BQ") printoffset("root_hash", o_root_hash, 4) for k in ["enc_privkey", "share_hash_chain", "signature", "verification_key", "verification_key_end", "share_data", "block_hash_tree", "EOF"]: name = {"share_data": "block data", "verification_key": "pubkey", "verification_key_end": "end of pubkey", "EOF": "end of share data"}.get(k,k) offset = m.DATA_OFFSET + offsets[k] printoffset(name, offset, 4) f = open(options['filename'], "rb") printoffset("extra leases", m._read_extra_lease_offset(f) + 4, 2) f.close() print(file=out) class DumpCapOptions(BaseOptions): def getSynopsis(self): return "Usage: tahoe [global-options] debug dump-cap [options] FILECAP" optParameters = [ ["nodeid", "n", None, "Specify the storage server nodeid (ASCII), to construct WE and secrets."], ["client-secret", "c", None, "Specify the client's base secret (ASCII), to construct secrets."], ["client-dir", "d", None, "Specify the client's base directory, from which a -c secret will be read."], ] def parseArgs(self, cap): self.cap = cap description = """ Print information about the given cap-string (aka: URI, file-cap, dir-cap, read-cap, write-cap). The URI string is parsed and unpacked. This prints the type of the cap, its storage index, and any derived keys. tahoe debug dump-cap URI:SSK-Verifier:4vozh77tsrw7mdhnj7qvp5ky74:q7f3dwz76sjys4kqfdt3ocur2pay3a6rftnkqmi2uxu3vqsdsofq This may be useful to determine if a read-cap and a write-cap refer to the same time, or to extract the storage-index from a file-cap (to then use with find-shares) If additional information is provided (storage server nodeid and/or client base secret), this command will compute the shared secrets used for the write-enabler and for lease-renewal. """ def dump_cap(options): from allmydata import uri from allmydata.util import base32 from base64 import b32decode from urllib.parse import unquote, urlparse out = options.stdout cap = options.cap nodeid = None if options['nodeid']: nodeid = b32decode(options['nodeid'].upper()) secret = None if options['client-secret']: secret = base32.a2b(options['client-secret'].encode("ascii")) elif options['client-dir']: secretfile = os.path.join(options['client-dir'], "private", "secret") try: secret = base32.a2b(open(secretfile, "rb").read().strip()) except EnvironmentError: pass if cap.startswith("http"): scheme, netloc, path, params, query, fragment = urlparse(cap) assert path.startswith("/uri/") cap = unquote(path[len("/uri/"):]) u = uri.from_string(cap) print(file=out) dump_uri_instance(u, nodeid, secret, out) def _dump_secrets(storage_index, secret, nodeid, out): from allmydata.util import hashutil from allmydata.util import base32 if secret: crs = hashutil.my_renewal_secret_hash(secret) print(" client renewal secret:", str(base32.b2a(crs), "ascii"), file=out) frs = hashutil.file_renewal_secret_hash(crs, storage_index) print(" file renewal secret:", str(base32.b2a(frs), "ascii"), file=out) if nodeid: renew = hashutil.bucket_renewal_secret_hash(frs, nodeid) print(" lease renewal secret:", str(base32.b2a(renew), "ascii"), file=out) ccs = hashutil.my_cancel_secret_hash(secret) print(" client cancel secret:", str(base32.b2a(ccs), "ascii"), file=out) fcs = hashutil.file_cancel_secret_hash(ccs, storage_index) print(" file cancel secret:", str(base32.b2a(fcs), "ascii"), file=out) if nodeid: cancel = hashutil.bucket_cancel_secret_hash(fcs, nodeid) print(" lease cancel secret:", str(base32.b2a(cancel), "ascii"), file=out) def dump_uri_instance(u, nodeid, secret, out, show_header=True): from allmydata import uri from allmydata.storage.server import si_b2a from allmydata.util import base32, hashutil from allmydata.util.encodingutil import quote_output if isinstance(u, uri.CHKFileURI): if show_header: print("CHK File:", file=out) print(" key:", str(base32.b2a(u.key), "ascii"), file=out) print(" UEB hash:", str(base32.b2a(u.uri_extension_hash), "ascii"), file=out) print(" size:", u.size, file=out) print(" k/N: %d/%d" % (u.needed_shares, u.total_shares), file=out) print(" storage index:", str(si_b2a(u.get_storage_index()), "ascii"), file=out) _dump_secrets(u.get_storage_index(), secret, nodeid, out) elif isinstance(u, uri.CHKFileVerifierURI): if show_header: print("CHK Verifier URI:", file=out) print(" UEB hash:", str(base32.b2a(u.uri_extension_hash), "ascii"), file=out) print(" size:", u.size, file=out) print(" k/N: %d/%d" % (u.needed_shares, u.total_shares), file=out) print(" storage index:", str(si_b2a(u.get_storage_index()), "ascii"), file=out) elif isinstance(u, uri.LiteralFileURI): if show_header: print("Literal File URI:", file=out) print(" data:", quote_output(u.data), file=out) elif isinstance(u, uri.WriteableSSKFileURI): # SDMF if show_header: print("SDMF Writeable URI:", file=out) print(" writekey:", str(base32.b2a(u.writekey), "ascii"), file=out) print(" readkey:", str(base32.b2a(u.readkey), "ascii"), file=out) print(" storage index:", str(si_b2a(u.get_storage_index()), "ascii"), file=out) print(" fingerprint:", str(base32.b2a(u.fingerprint), "ascii"), file=out) print(file=out) if nodeid: we = hashutil.ssk_write_enabler_hash(u.writekey, nodeid) print(" write_enabler:", str(base32.b2a(we), "ascii"), file=out) print(file=out) _dump_secrets(u.get_storage_index(), secret, nodeid, out) elif isinstance(u, uri.ReadonlySSKFileURI): if show_header: print("SDMF Read-only URI:", file=out) print(" readkey:", str(base32.b2a(u.readkey), "ascii"), file=out) print(" storage index:", str(si_b2a(u.get_storage_index()), "ascii"), file=out) print(" fingerprint:", str(base32.b2a(u.fingerprint), "ascii"), file=out) elif isinstance(u, uri.SSKVerifierURI): if show_header: print("SDMF Verifier URI:", file=out) print(" storage index:", str(si_b2a(u.get_storage_index()), "ascii"), file=out) print(" fingerprint:", str(base32.b2a(u.fingerprint), "ascii"), file=out) elif isinstance(u, uri.WriteableMDMFFileURI): # MDMF if show_header: print("MDMF Writeable URI:", file=out) print(" writekey:", str(base32.b2a(u.writekey), "ascii"), file=out) print(" readkey:", str(base32.b2a(u.readkey), "ascii"), file=out) print(" storage index:", str(si_b2a(u.get_storage_index()), "ascii"), file=out) print(" fingerprint:", str(base32.b2a(u.fingerprint), "ascii"), file=out) print(file=out) if nodeid: we = hashutil.ssk_write_enabler_hash(u.writekey, nodeid) print(" write_enabler:", str(base32.b2a(we), "ascii"), file=out) print(file=out) _dump_secrets(u.get_storage_index(), secret, nodeid, out) elif isinstance(u, uri.ReadonlyMDMFFileURI): if show_header: print("MDMF Read-only URI:", file=out) print(" readkey:", str(base32.b2a(u.readkey), "ascii"), file=out) print(" storage index:", str(si_b2a(u.get_storage_index()), "ascii"), file=out) print(" fingerprint:", str(base32.b2a(u.fingerprint), "ascii"), file=out) elif isinstance(u, uri.MDMFVerifierURI): if show_header: print("MDMF Verifier URI:", file=out) print(" storage index:", str(si_b2a(u.get_storage_index()), "ascii"), file=out) print(" fingerprint:", str(base32.b2a(u.fingerprint), "ascii"), file=out) elif isinstance(u, uri.ImmutableDirectoryURI): # CHK-based directory if show_header: print("CHK Directory URI:", file=out) dump_uri_instance(u._filenode_uri, nodeid, secret, out, False) elif isinstance(u, uri.ImmutableDirectoryURIVerifier): if show_header: print("CHK Directory Verifier URI:", file=out) dump_uri_instance(u._filenode_uri, nodeid, secret, out, False) elif isinstance(u, uri.DirectoryURI): # SDMF-based directory if show_header: print("Directory Writeable URI:", file=out) dump_uri_instance(u._filenode_uri, nodeid, secret, out, False) elif isinstance(u, uri.ReadonlyDirectoryURI): if show_header: print("Directory Read-only URI:", file=out) dump_uri_instance(u._filenode_uri, nodeid, secret, out, False) elif isinstance(u, uri.DirectoryURIVerifier): if show_header: print("Directory Verifier URI:", file=out) dump_uri_instance(u._filenode_uri, nodeid, secret, out, False) elif isinstance(u, uri.MDMFDirectoryURI): # MDMF-based directory if show_header: print("Directory Writeable URI:", file=out) dump_uri_instance(u._filenode_uri, nodeid, secret, out, False) elif isinstance(u, uri.ReadonlyMDMFDirectoryURI): if show_header: print("Directory Read-only URI:", file=out) dump_uri_instance(u._filenode_uri, nodeid, secret, out, False) elif isinstance(u, uri.MDMFDirectoryURIVerifier): if show_header: print("Directory Verifier URI:", file=out) dump_uri_instance(u._filenode_uri, nodeid, secret, out, False) else: print("unknown cap type", file=out) class FindSharesOptions(BaseOptions): def getSynopsis(self): return "Usage: tahoe [global-options] debug find-shares STORAGE_INDEX NODEDIRS.." def parseArgs(self, storage_index_s, *nodedirs): from allmydata.util.encodingutil import argv_to_abspath self.si_s = storage_index_s self.nodedirs = list(map(argv_to_abspath, nodedirs)) description = """ Locate all shares for the given storage index. This command looks through one or more node directories to find the shares. It returns a list of filenames, one per line, for each share file found. tahoe debug find-shares 4vozh77tsrw7mdhnj7qvp5ky74 testgrid/node-* It may be useful during testing, when running a test grid in which all the nodes are on a local disk. The share files thus located can be counted, examined (with dump-share), or corrupted/deleted to test checker/repairer. """ def find_shares(options): """Given a storage index and a list of node directories, emit a list of all matching shares to stdout, one per line. For example: find-shares.py 44kai1tui348689nrw8fjegc8c ~/testnet/node-* gives: /home/warner/testnet/node-1/storage/shares/44k/44kai1tui348689nrw8fjegc8c/5 /home/warner/testnet/node-1/storage/shares/44k/44kai1tui348689nrw8fjegc8c/9 /home/warner/testnet/node-2/storage/shares/44k/44kai1tui348689nrw8fjegc8c/2 """ from allmydata.storage.server import si_a2b, storage_index_to_dir from allmydata.util.encodingutil import listdir_unicode, quote_local_unicode_path out = options.stdout sharedir = storage_index_to_dir(si_a2b(options.si_s.encode("utf-8"))) for d in options.nodedirs: d = os.path.join(d, "storage", "shares", sharedir) if os.path.exists(d): for shnum in listdir_unicode(d): print(quote_local_unicode_path(os.path.join(d, shnum), quotemarks=False), file=out) return 0 class CatalogSharesOptions(BaseOptions): def parseArgs(self, *nodedirs): from allmydata.util.encodingutil import argv_to_abspath self.nodedirs = list(map(argv_to_abspath, nodedirs)) if not nodedirs: raise usage.UsageError("must specify at least one node directory") def getSynopsis(self): return "Usage: tahoe [global-options] debug catalog-shares NODEDIRS.." description = """ Locate all shares in the given node directories, and emit a one-line summary of each share. Run it like this: tahoe debug catalog-shares testgrid/node-* >allshares.txt The lines it emits will look like the following: CHK $SI $k/$N $filesize $UEB_hash $expiration $abspath_sharefile SDMF $SI $k/$N $filesize $seqnum/$roothash $expiration $abspath_sharefile UNKNOWN $abspath_sharefile This command can be used to build up a catalog of shares from many storage servers and then sort the results to compare all shares for the same file. If you see shares with the same SI but different parameters/filesize/UEB_hash, then something is wrong. The misc/find-share/anomalies.py script may be useful for purpose. """ def call(c, *args, **kwargs): # take advantage of the fact that ImmediateReadBucketProxy returns # Deferreds that are already fired results = [] failures = [] d = defer.maybeDeferred(c, *args, **kwargs) d.addCallbacks(results.append, failures.append) if failures: failures[0].raiseException() return results[0] def describe_share(abs_sharefile, si_s, shnum_s, now, out): with open(abs_sharefile, "rb") as f: prefix = f.read(32) if MutableShareFile.is_valid_header(prefix): _describe_mutable_share(abs_sharefile, f, now, si_s, out) elif ShareFile.is_valid_header(prefix): _describe_immutable_share(abs_sharefile, now, si_s, out) else: print("UNKNOWN really-unknown %s" % quote_output(abs_sharefile), file=out) def _describe_mutable_share(abs_sharefile, f, now, si_s, out): # mutable share m = MutableShareFile(abs_sharefile) WE, nodeid = m._read_write_enabler_and_nodeid(f) data_length = m._read_data_length(f) expiration_time = min( [lease.get_expiration_time() for (i,lease) in m._enumerate_leases(f)] ) expiration = max(0, expiration_time - now) share_type = "unknown" f.seek(m.DATA_OFFSET) version = f.read(1) if version == b"\x00": # this slot contains an SMDF share share_type = "SDMF" elif version == b"\x01": share_type = "MDMF" if share_type == "SDMF": f.seek(m.DATA_OFFSET) # Read at least the mutable header length, if possible. If there's # less data than that in the share, don't try to read more (we won't # be able to unpack the header in this case but we surely don't want # to try to unpack bytes *following* the data section as if they were # header data). Rather than 2000 we could use HEADER_LENGTH from # allmydata/mutable/layout.py, probably. data = f.read(min(data_length, 2000)) try: pieces = unpack_share(data) except NeedMoreDataError as e: # retry once with the larger size size = e.needed_bytes f.seek(m.DATA_OFFSET) data = f.read(min(data_length, size)) pieces = unpack_share(data) (seqnum, root_hash, IV, k, N, segsize, datalen, pubkey, signature, share_hash_chain, block_hash_tree, share_data, enc_privkey) = pieces print("SDMF %s %d/%d %d #%d:%s %d %s" % \ (si_s, k, N, datalen, seqnum, str(base32.b2a(root_hash), "utf-8"), expiration, quote_output(abs_sharefile)), file=out) elif share_type == "MDMF": fake_shnum = 0 # TODO: factor this out with dump_MDMF_share() class ShareDumper(MDMFSlotReadProxy): def _read(self, readvs, force_remote=False, queue=False): data = [] for (where,length) in readvs: f.seek(m.DATA_OFFSET+where) data.append(f.read(length)) return defer.succeed({fake_shnum: data}) p = ShareDumper(None, "fake-si", fake_shnum) def extract(func): stash = [] # these methods return Deferreds, but we happen to know that # they run synchronously when not actually talking to a # remote server d = func() d.addCallback(stash.append) return stash[0] verinfo = extract(p.get_verinfo) (seqnum, root_hash, salt_to_use, segsize, datalen, k, N, prefix, offsets) = verinfo print("MDMF %s %d/%d %d #%d:%s %d %s" % \ (si_s, k, N, datalen, seqnum, str(base32.b2a(root_hash), "utf-8"), expiration, quote_output(abs_sharefile)), file=out) else: print("UNKNOWN mutable %s" % quote_output(abs_sharefile), file=out) def _describe_immutable_share(abs_sharefile, now, si_s, out): class ImmediateReadBucketProxy(ReadBucketProxy): def __init__(self, sf): self.sf = sf ReadBucketProxy.__init__(self, None, None, "") def __repr__(self): return "" def _read(self, offset, size): return defer.succeed(sf.read_share_data(offset, size)) # use a ReadBucketProxy to parse the bucket and find the uri extension sf = ShareFile(abs_sharefile) bp = ImmediateReadBucketProxy(sf) expiration_time = min(lease.get_expiration_time() for lease in sf.get_leases()) expiration = max(0, expiration_time - now) UEB_data = call(bp.get_uri_extension) unpacked = uri.unpack_extension_readable(UEB_data) k = unpacked["needed_shares"] N = unpacked["total_shares"] filesize = unpacked["size"] ueb_hash = unpacked["UEB_hash"] print("CHK %s %d/%d %d %s %d %s" % (si_s, k, N, filesize, str(ueb_hash, "utf-8"), expiration, quote_output(abs_sharefile)), file=out) def catalog_shares(options): from allmydata.util.encodingutil import listdir_unicode, quote_output out = options.stdout err = options.stderr now = time.time() for d in options.nodedirs: d = os.path.join(d, "storage", "shares") try: abbrevs = listdir_unicode(d) except EnvironmentError: # ignore nodes that have storage turned off altogether pass else: for abbrevdir in sorted(abbrevs): if abbrevdir == "incoming": continue abbrevdir = os.path.join(d, abbrevdir) # this tool may get run against bad disks, so we can't assume # that listdir_unicode will always succeed. Try to catalog as much # as possible. try: sharedirs = listdir_unicode(abbrevdir) for si_s in sorted(sharedirs): si_dir = os.path.join(abbrevdir, si_s) catalog_shares_one_abbrevdir(si_s, si_dir, now, out,err) except: print("Error processing %s" % quote_output(abbrevdir), file=err) failure.Failure().printTraceback(err) return 0 def _as_number(s): try: return int(s) except ValueError: return "not int" def catalog_shares_one_abbrevdir(si_s, si_dir, now, out, err): from allmydata.util.encodingutil import listdir_unicode, quote_output try: for shnum_s in sorted(listdir_unicode(si_dir), key=_as_number): abs_sharefile = os.path.join(si_dir, shnum_s) assert os.path.isfile(abs_sharefile) try: describe_share(abs_sharefile, si_s, shnum_s, now, out) except: print("Error processing %s" % quote_output(abs_sharefile), file=err) failure.Failure().printTraceback(err) except: print("Error processing %s" % quote_output(si_dir), file=err) failure.Failure().printTraceback(err) class CorruptShareOptions(BaseOptions): def getSynopsis(self): return "Usage: tahoe [global-options] debug corrupt-share SHARE_FILENAME" optParameters = [ ["offset", "o", "block-random", "Specify which bit to flip."], ] description = """ Corrupt the given share by flipping a bit. This will cause a verifying/downloading client to log an integrity-check failure incident, and downloads will proceed with a different share. The --offset parameter controls which bit should be flipped. The default is to flip a single random bit of the block data. tahoe debug corrupt-share testgrid/node-3/storage/shares/4v/4vozh77tsrw7mdhnj7qvp5ky74/0 Obviously, this command should not be used in normal operation. """ def parseArgs(self, filename): self['filename'] = filename def corrupt_share(options): import random from allmydata.storage.mutable import MutableShareFile from allmydata.storage.immutable import ShareFile from allmydata.mutable.layout import unpack_header from allmydata.immutable.layout import ReadBucketProxy out = options.stdout fn = options['filename'] assert options["offset"] == "block-random", "other offsets not implemented" # first, what kind of share is it? def flip_bit(start, end): offset = random.randrange(start, end) bit = random.randrange(0, 8) print("[%d..%d): %d.b%d" % (start, end, offset, bit), file=out) f = open(fn, "rb+") f.seek(offset) d = f.read(1) d = bchr(ord(d) ^ 0x01) f.seek(offset) f.write(d) f.close() with open(fn, "rb") as f: prefix = f.read(32) if MutableShareFile.is_valid_header(prefix): # mutable m = MutableShareFile(fn) with open(fn, "rb") as f: f.seek(m.DATA_OFFSET) # Read enough data to get a mutable header to unpack. data = f.read(2000) # make sure this slot contains an SMDF share assert data[0:1] == b"\x00", "non-SDMF mutable shares not supported" f.close() (version, ig_seqnum, ig_roothash, ig_IV, ig_k, ig_N, ig_segsize, ig_datalen, offsets) = unpack_header(data) assert version == 0, "we only handle v0 SDMF files" start = m.DATA_OFFSET + offsets["share_data"] end = m.DATA_OFFSET + offsets["enc_privkey"] flip_bit(start, end) else: # otherwise assume it's immutable f = ShareFile(fn) bp = ReadBucketProxy(None, None, '') offsets = bp._parse_offsets(f.read_share_data(0, 0x24)) start = f._data_offset + offsets["data"] end = f._data_offset + offsets["plaintext_hash_tree"] flip_bit(start, end) class ReplOptions(BaseOptions): def getSynopsis(self): return "Usage: tahoe debug repl (OBSOLETE)" def repl(options): print("'tahoe debug repl' is obsolete. Please run 'python' in a virtualenv.", file=options.stderr) return 1 DEFAULT_TESTSUITE = 'allmydata' class TrialOptions(BaseOptions): def getSynopsis(self): return "Usage: tahoe debug trial (OBSOLETE)" def trial(config): print("'tahoe debug trial' is obsolete. Please run 'tox', or use 'trial' in a virtualenv.", file=config.stderr) return 1 def fixOptionsClass(args): (subcmd, shortcut, OptionsClass, desc) = args class FixedOptionsClass(OptionsClass): def getSynopsis(self): t = OptionsClass.getSynopsis(self) i = t.find("Usage: flogtool ") if i >= 0: return "Usage: tahoe [global-options] debug flogtool " + t[i+len("Usage: flogtool "):] else: return "Usage: tahoe [global-options] debug flogtool %s [options]" % (subcmd,) return (subcmd, shortcut, FixedOptionsClass, desc) class FlogtoolOptions(foolscap_cli.Options): def __init__(self): super(FlogtoolOptions, self).__init__() self.subCommands = list(map(fixOptionsClass, self.subCommands)) def getSynopsis(self): return "Usage: tahoe [global-options] debug flogtool COMMAND [flogtool-options]" def parseOptions(self, all_subargs, *a, **kw): self.flogtool_args = list(all_subargs) return super(FlogtoolOptions, self).parseOptions(self.flogtool_args, *a, **kw) def getUsage(self, width=None): t = super(FlogtoolOptions, self).getUsage(width) t += """ The 'tahoe debug flogtool' command uses the correct imports for this instance of Tahoe-LAFS. Please run 'tahoe debug flogtool COMMAND --help' for more details on each subcommand. """ return t def opt_help(self): print(str(self)) sys.exit(0) def flogtool(config): sys.argv = ['flogtool'] + config.flogtool_args return foolscap_cli.run_flogtool() class DebugCommand(BaseOptions): subCommands = [ ["dump-share", None, DumpOptions, "Unpack and display the contents of a share (uri_extension and leases)."], ["dump-cap", None, DumpCapOptions, "Unpack a read-cap or write-cap."], ["find-shares", None, FindSharesOptions, "Locate sharefiles in node dirs."], ["catalog-shares", None, CatalogSharesOptions, "Describe all shares in node dirs."], ["corrupt-share", None, CorruptShareOptions, "Corrupt a share by flipping a bit."], ["repl", None, ReplOptions, "OBSOLETE"], ["trial", None, TrialOptions, "OBSOLETE"], ["flogtool", None, FlogtoolOptions, "Utilities to access log files."], ] def postOptions(self): if not hasattr(self, 'subOptions'): raise usage.UsageError("must specify a subcommand") synopsis = "COMMAND" def getUsage(self, width=None): t = BaseOptions.getUsage(self, width) t += """\ Please run e.g. 'tahoe debug dump-share --help' for more details on each subcommand. """ return t subDispatch = { "dump-share": dump_share, "dump-cap": dump_cap, "find-shares": find_shares, "catalog-shares": catalog_shares, "corrupt-share": corrupt_share, "repl": repl, "trial": trial, "flogtool": flogtool, } def do_debug(options): so = options.subOptions so.stdout = options.stdout so.stderr = options.stderr f = subDispatch[options.subCommand] return f(so) subCommands : SubCommands = [ ("debug", None, DebugCommand, "debug subcommands: use 'tahoe debug' for a list."), ] dispatch = { "debug": do_debug, } tahoe_lafs-1.20.0/src/allmydata/scripts/default_nodedir.py0000644000000000000000000000107013615410400020545 0ustar00""" Ported to Python 3. """ import sys from allmydata.util.assertutil import precondition from allmydata.util.fileutil import abspath_expanduser_unicode _default_nodedir = None if sys.platform == 'win32': from allmydata.windows import registry path = registry.get_base_dir_path() if path: precondition(isinstance(path, str), path) _default_nodedir = abspath_expanduser_unicode(path) if _default_nodedir is None: path = abspath_expanduser_unicode("~/.tahoe") precondition(isinstance(path, str), path) _default_nodedir = path tahoe_lafs-1.20.0/src/allmydata/scripts/runner.py0000644000000000000000000002764413615410400016745 0ustar00import os, sys from io import StringIO import six from twisted.python import usage from twisted.internet import defer, task, threads from allmydata.scripts.common import get_default_nodedir from allmydata.scripts import debug, create_node, cli, \ admin, tahoe_run, tahoe_invite from allmydata.scripts.types_ import SubCommands from allmydata.util.encodingutil import quote_local_unicode_path, argv_to_unicode from allmydata.util.eliotutil import ( opt_eliot_destination, opt_help_eliot_destinations, eliot_logging_service, ) from .. import ( __full_version__, ) _default_nodedir = get_default_nodedir() NODEDIR_HELP = ("Specify which Tahoe node directory should be used. The " "directory should either contain a full Tahoe node, or a " "file named node.url that points to some other Tahoe node. " "It should also contain a file named '" + os.path.join('private', 'aliases') + "' which contains the mapping from alias name to root " "dirnode URI.") if _default_nodedir: NODEDIR_HELP += " [default for most commands: " + quote_local_unicode_path(_default_nodedir) + "]" process_control_commands : SubCommands = [ ("run", None, tahoe_run.RunOptions, "run a node without daemonizing"), ] class Options(usage.Options): """ :ivar wormhole: An object exposing the magic-wormhole API (mainly a test hook). """ # unit tests can override these to point at StringIO instances stdin = sys.stdin stdout = sys.stdout stderr = sys.stderr from wormhole import wormhole subCommands = ( create_node.subCommands + admin.subCommands + process_control_commands + debug.subCommands + cli.subCommands + tahoe_invite.subCommands ) optFlags = [ ["quiet", "q", "Operate silently."], ["version", "V", "Display version numbers."], ["version-and-path", None, "Display version numbers and paths to their locations."], ] optParameters = [ ["node-directory", "d", None, NODEDIR_HELP], ["wormhole-server", None, u"ws://wormhole.tahoe-lafs.org:4000/v1", "The magic wormhole server to use.", str], ["wormhole-invite-appid", None, u"tahoe-lafs.org/invite", "The appid to use on the wormhole server.", str], ] def opt_version(self): print(__full_version__, file=self.stdout) self.no_command_needed = True opt_version_and_path = opt_version opt_eliot_destination = opt_eliot_destination opt_help_eliot_destinations = opt_help_eliot_destinations def __str__(self): return ("\nUsage: tahoe [global-options] [command-options]\n" + self.getUsage()) synopsis = "\nUsage: tahoe [global-options]" # used only for subcommands def getUsage(self, **kwargs): t = usage.Options.getUsage(self, **kwargs) t = t.replace("Options:", "\nGlobal options:", 1) return t + "\nPlease run 'tahoe --help' for more details on each command.\n" def postOptions(self): if not hasattr(self, 'subOptions'): if not hasattr(self, 'no_command_needed'): raise usage.UsageError("must specify a command") sys.exit(0) create_dispatch = {} for module in (create_node,): create_dispatch.update(module.dispatch) # type: ignore def parse_options(argv, config=None): if not config: config = Options() try: config.parseOptions(argv) except usage.error: raise return config def parse_or_exit(config, argv, stdout, stderr): """ Parse Tahoe-LAFS CLI arguments and return a configuration object if they are valid. If they are invalid, write an explanation to ``stdout`` and exit. :param allmydata.scripts.runner.Options config: An instance of the argument-parsing class to use. :param [unicode] argv: The argument list to parse, including the name of the program being run as ``argv[0]``. :param stdout: The file-like object to use as stdout. :param stderr: The file-like object to use as stderr. :raise SystemExit: If there is an argument-parsing problem. :return: ``config``, after using it to parse the argument list. """ try: config.stdout = stdout config.stderr = stderr parse_options(argv[1:], config=config) except usage.error as e: # `parse_options` may have the side-effect of initializing a # "sub-option" of the given configuration, even if it ultimately # raises an exception. For example, `tahoe run --invalid-option` will # set `config.subOptions` to an instance of # `allmydata.scripts.tahoe_run.RunOptions` and then raise a # `usage.error` because `RunOptions` does not recognize # `--invalid-option`. If `run` itself had a sub-options then the same # thing could happen but with another layer of nesting. We can # present the user with the most precise information about their usage # error possible by finding the most "sub" of the sub-options and then # showing that to the user along with the usage error. c = config while hasattr(c, 'subOptions'): c = c.subOptions print(str(c), file=stdout) exc_str = str(e) exc_bytes = six.ensure_binary(exc_str, "utf-8") msg_bytes = b"%s: %s\n" % (six.ensure_binary(argv[0]), exc_bytes) print(six.ensure_text(msg_bytes, "utf-8"), file=stdout) sys.exit(1) return config def dispatch(config, reactor, stdin=sys.stdin, stdout=sys.stdout, stderr=sys.stderr): command = config.subCommand so = config.subOptions if config['quiet']: stdout = StringIO() so.stdout = stdout so.stderr = stderr so.stdin = stdin config.stdin = stdin if command in create_dispatch: f = create_dispatch[command] elif command == "run": f = lambda config: tahoe_run.run(reactor, config) elif command in debug.dispatch: f = debug.dispatch[command] elif command in admin.dispatch: f = admin.dispatch[command] elif command in cli.dispatch: # these are blocking, and must be run in a thread f0 = cli.dispatch[command] f = lambda so: threads.deferToThread(f0, so) elif command in tahoe_invite.dispatch: f = tahoe_invite.dispatch[command] else: raise usage.UsageError() d = defer.maybeDeferred(f, so) # the calling convention for CLI dispatch functions is that they either: # 1: succeed and return rc=0 # 2: print explanation to stderr and return rc!=0 # 3: raise an exception that should just be printed normally # 4: return a Deferred that does 1 or 2 or 3 def _raise_sys_exit(rc): sys.exit(rc) d.addCallback(_raise_sys_exit) return d def _maybe_enable_eliot_logging(options, reactor): if options.get("destinations"): service = eliot_logging_service(reactor, options["destinations"]) # There is no Twisted "Application" around to hang this on so start # and stop it ourselves. service.startService() reactor.addSystemEventTrigger("after", "shutdown", service.stopService) # Pass on the options so we can dispatch the subcommand. return options def run(configFactory=Options, argv=sys.argv, stdout=sys.stdout, stderr=sys.stderr): """ Run a Tahoe-LAFS node. :param configFactory: A zero-argument callable which creates the config object to use to parse the argument list. :param [str] argv: The argument list to use to configure the run. :param stdout: The file-like object to use for stdout. :param stderr: The file-like object to use for stderr. :raise SystemExit: Always raised after the run is complete. """ if sys.platform == "win32": from allmydata.windows.fixups import initialize initialize() # doesn't return: calls sys.exit(rc) task.react( lambda reactor: _run_with_reactor( reactor, configFactory(), argv, stdout, stderr, ), ) def _setup_coverage(reactor, argv): """ If coverage measurement was requested, start collecting coverage measurements and arrange to record those measurements when the process is done. Coverage measurement is considered requested if ``"--coverage"`` is in ``argv`` (and it will be removed from ``argv`` if it is found). There should be a ``.coveragerc`` file in the working directory if coverage measurement is requested. This is only necessary to support multi-process coverage measurement, typically when the test suite is running, and with the pytest-based *integration* test suite (at ``integration/`` in the root of the source tree) foremost in mind. The idea is that if you are running Tahoe-LAFS in a configuration where multiple processes are involved - for example, a test process and a client node process, if you only measure coverage from the test process then you will fail to observe most Tahoe-LAFS code that is being run. This function arranges to have any Tahoe-LAFS process (such as that client node process) collect and report coverage measurements as well. """ # can we put this _setup_coverage call after we hit # argument-parsing? # ensure_str() only necessary on Python 2. if '--coverage' not in sys.argv: return argv.remove('--coverage') try: import coverage except ImportError: raise RuntimeError( "The 'coveage' package must be installed to use --coverage" ) # this doesn't change the shell's notion of the environment, but # it makes the test in process_startup() succeed, which is the # goal here. os.environ["COVERAGE_PROCESS_START"] = '.coveragerc' # maybe-start the global coverage, unless it already got started cov = coverage.process_startup() if cov is None: cov = coverage.process_startup.coverage def write_coverage_data(): """ Make sure that coverage has stopped; internally, it depends on ataxit handlers running which doesn't always happen (Twisted's shutdown hook also won't run if os._exit() is called, but it runs more-often than atexit handlers). """ cov.stop() cov.save() reactor.addSystemEventTrigger('after', 'shutdown', write_coverage_data) def _run_with_reactor(reactor, config, argv, stdout, stderr): """ Run a Tahoe-LAFS node using the given reactor. :param reactor: The reactor to use. This implementation largely ignores this and lets the rest of the implementation pick its own reactor. Oops. :param twisted.python.usage.Options config: The config object to use to parse the argument list. :param [str] argv: The argument list to parse, *excluding* the name of the program being run. :param stdout: See ``run``. :param stderr: See ``run``. :return: A ``Deferred`` that fires when the run is complete. """ _setup_coverage(reactor, argv) argv = list(map(argv_to_unicode, argv)) d = defer.maybeDeferred( parse_or_exit, config, argv, stdout, stderr, ) d.addCallback(_maybe_enable_eliot_logging, reactor) d.addCallback(dispatch, reactor, stdout=stdout, stderr=stderr) def _show_exception(f): # when task.react() notices a non-SystemExit exception, it does # log.err() with the failure and then exits with rc=1. We want this # to actually print the exception to stderr, like it would do if we # weren't using react(). if f.check(SystemExit): return f # dispatch function handled it f.printTraceback(file=stderr) sys.exit(1) d.addErrback(_show_exception) return d if __name__ == "__main__": run() tahoe_lafs-1.20.0/src/allmydata/scripts/slow_operation.py0000644000000000000000000000574213615410400020473 0ustar00""" Ported to Python 3. """ from six import ensure_str import os, time from allmydata.scripts.common import get_alias, DEFAULT_ALIAS, escape_path, \ UnknownAliasError from allmydata.scripts.common_http import do_http, format_http_error from allmydata.util import base32 from allmydata.util.encodingutil import quote_output, is_printable_ascii from urllib.parse import quote as url_quote import json class SlowOperationRunner(object): def run(self, options): stderr = options.stderr self.options = options self.ophandle = ophandle = ensure_str(base32.b2a(os.urandom(16))) nodeurl = options['node-url'] if not nodeurl.endswith("/"): nodeurl += "/" self.nodeurl = nodeurl where = options.where try: rootcap, path = get_alias(options.aliases, where, DEFAULT_ALIAS) except UnknownAliasError as e: e.display(stderr) return 1 path = str(path, "utf-8") if path == '/': path = '' url = nodeurl + "uri/%s" % url_quote(rootcap) if path: url += "/" + escape_path(path) # todo: should it end with a slash? url = self.make_url(url, ophandle) resp = do_http("POST", url) if resp.status not in (200, 302): print(format_http_error("ERROR", resp), file=stderr) return 1 # now we poll for results. We nominally poll at t=1, 5, 10, 30, 60, # 90, k*120 seconds, but if the poll takes non-zero time, that will # be slightly longer. I'm not worried about trying to make up for # that time. return self.wait_for_results() def poll_times(self): for i in (1,5,10,30,60,90): yield i i = 120 while True: yield i i += 120 def wait_for_results(self): last = 0 for next_item in self.poll_times(): delay = next_item - last time.sleep(delay) last = next_item if self.poll(): return 0 def poll(self): url = self.nodeurl + "operations/" + self.ophandle url += "?t=status&output=JSON&release-after-complete=true" stdout = self.options.stdout stderr = self.options.stderr resp = do_http("GET", url) if resp.status != 200: print(format_http_error("ERROR", resp), file=stderr) return True jdata = resp.read() data = json.loads(jdata) if not data["finished"]: return False if self.options.get("raw"): stdout = stdout.buffer if is_printable_ascii(jdata): stdout.write(jdata) stdout.write(b"\n") stdout.flush() else: print("The JSON response contained unprintable characters:\n%s" % quote_output(jdata), file=stderr) return True self.write_results(data) return True tahoe_lafs-1.20.0/src/allmydata/scripts/tahoe_add_alias.py0000644000000000000000000001431313615410400020502 0ustar00""" Ported to Python 3. """ import os.path import codecs from allmydata.util.assertutil import precondition from allmydata import uri from allmydata.scripts.common_http import do_http, check_http_error from allmydata.scripts.common import get_aliases from allmydata.util.fileutil import move_into_place from allmydata.util.encodingutil import quote_output, quote_output_u from allmydata.util import jsonbytes as json def add_line_to_aliasfile(aliasfile, alias, cap): # we use os.path.exists, rather than catching EnvironmentError, to avoid # clobbering the valuable alias file in case of spurious or transient # filesystem errors. if os.path.exists(aliasfile): f = codecs.open(aliasfile, "r", "utf-8") aliases = f.read() f.close() if not aliases.endswith("\n"): aliases += "\n" else: aliases = "" aliases += "%s: %s\n" % (alias, cap) f = codecs.open(aliasfile+".tmp", "w", "utf-8") f.write(aliases) f.close() move_into_place(aliasfile+".tmp", aliasfile) def add_alias(options): nodedir = options['node-directory'] alias = options.alias precondition(isinstance(alias, str), alias=alias) cap = options.cap stdout = options.stdout stderr = options.stderr if u":" in alias: # a single trailing colon will already have been stripped if present print("Alias names cannot contain colons.", file=stderr) return 1 if u" " in alias: print("Alias names cannot contain spaces.", file=stderr) return 1 old_aliases = get_aliases(nodedir) if alias in old_aliases: show_output(stderr, "Alias {alias} already exists!", alias=alias) return 1 aliasfile = os.path.join(nodedir, "private", "aliases") cap = str(uri.from_string_dirnode(cap).to_string(), 'utf-8') add_line_to_aliasfile(aliasfile, alias, cap) show_output(stdout, "Alias {alias} added", alias=alias) return 0 def create_alias(options): # mkdir+add_alias nodedir = options['node-directory'] alias = options.alias precondition(isinstance(alias, str), alias=alias) stdout = options.stdout stderr = options.stderr if u":" in alias: # a single trailing colon will already have been stripped if present print("Alias names cannot contain colons.", file=stderr) return 1 if u" " in alias: print("Alias names cannot contain spaces.", file=stderr) return 1 old_aliases = get_aliases(nodedir) if alias in old_aliases: show_output(stderr, "Alias {alias} already exists!", alias=alias) return 1 aliasfile = os.path.join(nodedir, "private", "aliases") nodeurl = options['node-url'] if not nodeurl.endswith("/"): nodeurl += "/" url = nodeurl + "uri?t=mkdir" resp = do_http("POST", url) rc = check_http_error(resp, stderr) if rc: return rc new_uri = resp.read().strip() # probably check for others.. add_line_to_aliasfile(aliasfile, alias, str(new_uri, "utf-8")) show_output(stdout, "Alias {alias} created", alias=alias) return 0 def show_output(fp, template, **kwargs): """ Print to just about anything. :param fp: A file-like object to which to print. This handles the case where ``fp`` declares a support encoding with the ``encoding`` attribute (eg sys.stdout on Python 3). It handles the case where ``fp`` declares no supported encoding via ``None`` for its ``encoding`` attribute (eg sys.stdout on Python 2 when stdout is not a tty). It handles the case where ``fp`` declares an encoding that does not support all of the characters in the output by forcing the "namereplace" error handler. It handles the case where there is no ``encoding`` attribute at all (eg StringIO.StringIO) by writing utf-8-encoded bytes. """ assert isinstance(template, str) # On Python 3 fp has an encoding attribute under all real usage. On # Python 2, the encoding attribute is None if stdio is not a tty. The # test suite often passes StringIO which has no such attribute. Make # allowances for this until the test suite is fixed and Python 2 is no # more. try: encoding = fp.encoding or "utf-8" except AttributeError: has_encoding = False encoding = "utf-8" else: has_encoding = True output = template.format(**{ k: quote_output_u(v, encoding=encoding) for (k, v) in kwargs.items() }) safe_output = output.encode(encoding, "namereplace") if has_encoding: safe_output = safe_output.decode(encoding) print(safe_output, file=fp) def _get_alias_details(nodedir): aliases = get_aliases(nodedir) alias_names = sorted(aliases.keys()) data = {} for name in alias_names: dircap = uri.from_string(aliases[name]) data[name] = { "readwrite": dircap.to_string(), "readonly": dircap.get_readonly().to_string(), } return data def _escape_format(t): """ _escape_format(t).format() == t :param unicode t: The text to escape. """ return t.replace("{", "{{").replace("}", "}}") def list_aliases(options): """ Show aliases that exist. """ data = _get_alias_details(options['node-directory']) if options['json']: dumped = json.dumps(data, indent=4) if isinstance(dumped, bytes): dumped = dumped.decode("utf-8") output = _escape_format(dumped) else: def dircap(details): return ( details['readonly'] if options['readonly-uri'] else details['readwrite'] ).decode("utf-8") def format_dircap(name, details): return fmt % (name, dircap(details)) max_width = max([len(quote_output(name)) for name in data.keys()] + [0]) fmt = "%" + str(max_width) + "s: %s" output = "\n".join(list( format_dircap(name, details) for name, details in data.items() )) if output: # Show whatever we computed. Skip this if there is no output to avoid # a spurious blank line. show_output(options.stdout, output) return 0 tahoe_lafs-1.20.0/src/allmydata/scripts/tahoe_backup.py0000644000000000000000000005035513615410400020054 0ustar00""" Ported to Python 3. """ import os.path import time from urllib.parse import quote as url_quote import datetime from allmydata.scripts.common import get_alias, escape_path, DEFAULT_ALIAS, \ UnknownAliasError from allmydata.scripts.common_http import do_http, HTTPError, format_http_error from allmydata.util import time_format, jsonbytes as json from allmydata.scripts import backupdb from allmydata.util.encodingutil import listdir_unicode, quote_output, \ quote_local_unicode_path, to_bytes, FilenameEncodingError, unicode_to_url from allmydata.util.assertutil import precondition from allmydata.util.fileutil import abspath_expanduser_unicode, precondition_abspath def get_local_metadata(path): metadata = {} # posix stat(2) metadata, depends on the platform s = os.stat(path) metadata["ctime"] = s.st_ctime metadata["mtime"] = s.st_mtime misc_fields = ("st_mode", "st_ino", "st_dev", "st_uid", "st_gid") macos_misc_fields = ("st_rsize", "st_creator", "st_type") for field in misc_fields + macos_misc_fields: if hasattr(s, field): metadata[field] = getattr(s, field) # TODO: extended attributes, like on OS-X's HFS+ return metadata def mkdir(contents, options): kids = dict([ (childname, (contents[childname][0], {"ro_uri": contents[childname][1], "metadata": contents[childname][2], })) for childname in contents ]) body = json.dumps(kids).encode("utf-8") url = options['node-url'] + "uri?t=mkdir-immutable" resp = do_http("POST", url, body) if resp.status < 200 or resp.status >= 300: raise HTTPError("Error during mkdir", resp) dircap = to_bytes(resp.read().strip()) return dircap def put_child(dirurl, childname, childcap): assert dirurl[-1] != "/" url = dirurl + "/" + url_quote(unicode_to_url(childname)) + "?t=uri" resp = do_http("PUT", url, childcap) if resp.status not in (200, 201): raise HTTPError("Error during put_child", resp) class BackerUpper(object): """ :ivar int _files_checked: The number of files which the backup process has so-far inspected on the grid to determine if they need to be re-uploaded. :ivar int _directories_checked: The number of directories which the backup process has so-far inspected on the grid to determine if they need to be re-uploaded. """ def __init__(self, options): self.options = options self._files_checked = 0 self._directories_checked = 0 def run(self): options = self.options nodeurl = options['node-url'] self.verbosity = 1 if options['quiet']: self.verbosity = 0 if options['verbose']: self.verbosity = 2 stdout = options.stdout stderr = options.stderr start_timestamp = datetime.datetime.now() bdbfile = os.path.join(options["node-directory"], "private", "backupdb.sqlite") bdbfile = abspath_expanduser_unicode(bdbfile) self.backupdb = backupdb.get_backupdb(bdbfile, stderr) if not self.backupdb: print("ERROR: Unable to load backup db.", file=stderr) return 1 try: rootcap, path = get_alias(options.aliases, options.to_dir, DEFAULT_ALIAS) except UnknownAliasError as e: e.display(stderr) return 1 to_url = nodeurl + "uri/%s/" % url_quote(rootcap) if path: to_url += escape_path(path) if not to_url.endswith("/"): to_url += "/" archives_url = to_url + "Archives/" archives_url = archives_url.rstrip("/") to_url = to_url.rstrip("/") # first step: make sure the target directory exists, as well as the # Archives/ subdirectory. resp = do_http("GET", archives_url + "?t=json") if resp.status == 404: resp = do_http("POST", archives_url + "?t=mkdir") if resp.status != 200: print(format_http_error("Unable to create target directory", resp), file=stderr) return 1 # second step: process the tree targets = list(collect_backup_targets( options.from_dir, listdir_unicode, self.options.filter_listdir, )) completed = run_backup( warn=self.warn, upload_file=self.upload, upload_directory=self.upload_directory, targets=targets, start_timestamp=start_timestamp, stdout=stdout, ) new_backup_dircap = completed.dircap # third: attach the new backup to the list now = time_format.iso_utc(int(time.time()), sep="_") + "Z" put_child(archives_url, now, new_backup_dircap) put_child(to_url, "Latest", new_backup_dircap) print(completed.report( self.verbosity, self._files_checked, self._directories_checked, ), file=stdout) # The command exits with code 2 if files or directories were skipped if completed.any_skips(): return 2 # done! return 0 def verboseprint(self, msg): precondition(isinstance(msg, str), msg) if self.verbosity >= 2: print(msg, file=self.options.stdout) def warn(self, msg): precondition(isinstance(msg, str), msg) print(msg, file=self.options.stderr) def upload_directory(self, path, compare_contents, create_contents): must_create, r = self.check_backupdb_directory(compare_contents) if must_create: self.verboseprint(" creating directory for %s" % quote_local_unicode_path(path)) newdircap = mkdir(create_contents, self.options) assert isinstance(newdircap, bytes) if r: r.did_create(newdircap) return True, newdircap else: self.verboseprint(" re-using old directory for %s" % quote_local_unicode_path(path)) return False, r.was_created() def check_backupdb_file(self, childpath): if not self.backupdb: return True, None use_timestamps = not self.options["ignore-timestamps"] r = self.backupdb.check_file(childpath, use_timestamps) if not r.was_uploaded(): return True, r if not r.should_check(): # the file was uploaded or checked recently, so we can just use # it return False, r # we must check the file before using the results filecap = r.was_uploaded() self.verboseprint("checking %s" % quote_output(filecap)) nodeurl = self.options['node-url'] checkurl = nodeurl + "uri/%s?t=check&output=JSON" % url_quote(filecap) self._files_checked += 1 resp = do_http("POST", checkurl) if resp.status != 200: # can't check, so we must assume it's bad return True, r cr = json.loads(resp.read()) healthy = cr["results"]["healthy"] if not healthy: # must upload return True, r # file is healthy, no need to upload r.did_check_healthy(cr) return False, r def check_backupdb_directory(self, compare_contents): if not self.backupdb: return True, None r = self.backupdb.check_directory(compare_contents) if not r.was_created(): return True, r if not r.should_check(): # the file was uploaded or checked recently, so we can just use # it return False, r # we must check the directory before re-using it dircap = r.was_created() self.verboseprint("checking %s" % quote_output(dircap)) nodeurl = self.options['node-url'] checkurl = nodeurl + "uri/%s?t=check&output=JSON" % url_quote(dircap) self._directories_checked += 1 resp = do_http("POST", checkurl) if resp.status != 200: # can't check, so we must assume it's bad return True, r cr = json.loads(resp.read()) healthy = cr["results"]["healthy"] if not healthy: # must create return True, r # directory is healthy, no need to upload r.did_check_healthy(cr) return False, r # This function will raise an IOError exception when called on an unreadable file def upload(self, childpath): precondition_abspath(childpath) #self.verboseprint("uploading %s.." % quote_local_unicode_path(childpath)) metadata = get_local_metadata(childpath) # we can use the backupdb here must_upload, bdb_results = self.check_backupdb_file(childpath) if must_upload: self.verboseprint("uploading %s.." % quote_local_unicode_path(childpath)) infileobj = open(childpath, "rb") url = self.options['node-url'] + "uri" resp = do_http("PUT", url, infileobj) if resp.status not in (200, 201): raise HTTPError("Error during file PUT", resp) filecap = resp.read().strip() self.verboseprint(" %s -> %s" % (quote_local_unicode_path(childpath, quotemarks=False), quote_output(filecap, quotemarks=False))) #self.verboseprint(" metadata: %s" % (quote_output(metadata, quotemarks=False),)) if bdb_results: bdb_results.did_upload(filecap) return True, filecap, metadata else: self.verboseprint("skipping %s.." % quote_local_unicode_path(childpath)) return False, bdb_results.was_uploaded(), metadata def backup(options): bu = BackerUpper(options) return bu.run() def collect_backup_targets(root, listdir, filter_children): """ Yield BackupTargets in a suitable order for processing (deepest targets before their parents). """ try: children = listdir(root) except EnvironmentError: yield PermissionDeniedTarget(root, isdir=True) except FilenameEncodingError: yield FilenameUndecodableTarget(root, isdir=True) else: for child in filter_children(children): assert isinstance(child, str), child childpath = os.path.join(root, child) if os.path.islink(childpath): yield LinkTarget(childpath, isdir=False) elif os.path.isdir(childpath): child_targets = collect_backup_targets( childpath, listdir, filter_children, ) for child_target in child_targets: yield child_target elif os.path.isfile(childpath): yield FileTarget(childpath) else: yield SpecialTarget(childpath) yield DirectoryTarget(root) def run_backup( warn, upload_file, upload_directory, targets, start_timestamp, stdout, ): progress = BackupProgress(warn, start_timestamp, len(targets)) for target in targets: # Pass in the progress and get back a progress. It would be great if # progress objects were immutable. Then the target's backup would # make a new progress with the desired changes and return it to us. # Currently, BackupProgress is mutable, though, and everything just # mutates it. progress = target.backup(progress, upload_file, upload_directory) print(progress.report(datetime.datetime.now()), file=stdout) return progress.backup_finished() class FileTarget(object): def __init__(self, path): self._path = path def __repr__(self): return "".format(self._path) def backup(self, progress, upload_file, upload_directory): try: created, childcap, metadata = upload_file(self._path) except EnvironmentError: target = PermissionDeniedTarget(self._path, isdir=False) return target.backup(progress, upload_file, upload_directory) else: assert isinstance(childcap, bytes) if created: return progress.created_file(self._path, childcap, metadata) return progress.reused_file(self._path, childcap, metadata) class DirectoryTarget(object): def __init__(self, path): self._path = path def __repr__(self): return "".format(self._path) def backup(self, progress, upload_file, upload_directory): metadata = get_local_metadata(self._path) progress, create, compare = progress.consume_directory(self._path) did_create, dircap = upload_directory(self._path, compare, create) if did_create: return progress.created_directory(self._path, dircap, metadata) return progress.reused_directory(self._path, dircap, metadata) class _ErrorTarget(object): def __init__(self, path, isdir=False): self._path = path self._quoted_path = quote_local_unicode_path(path) self._isdir = isdir class PermissionDeniedTarget(_ErrorTarget): def backup(self, progress, upload_file, upload_directory): return progress.permission_denied(self._isdir, self._quoted_path) class FilenameUndecodableTarget(_ErrorTarget): def backup(self, progress, upload_file, upload_directory): return progress.decoding_failed(self._isdir, self._quoted_path) class LinkTarget(_ErrorTarget): def backup(self, progress, upload_file, upload_directory): return progress.unsupported_filetype( self._isdir, self._quoted_path, "symlink", ) class SpecialTarget(_ErrorTarget): def backup(self, progress, upload_file, upload_directory): return progress.unsupported_filetype( self._isdir, self._quoted_path, "special", ) class BackupComplete(object): def __init__(self, start_timestamp, end_timestamp, files_created, files_reused, files_skipped, directories_created, directories_reused, directories_skipped, dircap, ): self._start_timestamp = start_timestamp self._end_timestamp = end_timestamp self._files_created = files_created self._files_reused = files_reused self._files_skipped = files_skipped self._directories_created = directories_created self._directories_reused = directories_reused self._directories_skipped = directories_skipped self.dircap = dircap def any_skips(self): return self._files_skipped or self._directories_skipped def report(self, verbosity, files_checked, directories_checked): result = [] if verbosity >= 1: result.append( " %d files uploaded (%d reused)," " %d files skipped," " %d directories created (%d reused)," " %d directories skipped" % ( self._files_created, self._files_reused, self._files_skipped, self._directories_created, self._directories_reused, self._directories_skipped, ), ) if verbosity >= 2: result.append( " %d files checked, %d directories checked" % ( files_checked, directories_checked, ), ) # calc elapsed time, omitting microseconds elapsed_time = str( self._end_timestamp - self._start_timestamp ).split('.')[0] result.append(" backup done, elapsed time: %s" % (elapsed_time,)) return "\n".join(result) class BackupProgress(object): # Would be nice if this data structure were immutable and its methods were # transformations that created a new slightly different object. Not there # yet, though. def __init__(self, warn, start_timestamp, target_count): self._warn = warn self._start_timestamp = start_timestamp self._target_count = target_count self._files_created = 0 self._files_reused = 0 self._files_skipped = 0 self._directories_created = 0 self._directories_reused = 0 self._directories_skipped = 0 self.last_dircap = None self._create_contents = {} self._compare_contents = {} def report(self, now): report_format = ( "Backing up {target_progress}/{target_total}... {elapsed} elapsed..." ) return report_format.format( target_progress=( self._files_created + self._files_reused + self._files_skipped + self._directories_created + self._directories_reused + self._directories_skipped ), target_total=self._target_count, elapsed=self._format_elapsed(now - self._start_timestamp), ) def _format_elapsed(self, elapsed): seconds = int(elapsed.total_seconds()) hours = seconds // 3600 minutes = (seconds // 60) % 60 seconds = seconds % 60 return "{}h {}m {}s".format( hours, minutes, seconds, ) def backup_finished(self): end_timestamp = datetime.datetime.now() return BackupComplete( self._start_timestamp, end_timestamp, self._files_created, self._files_reused, self._files_skipped, self._directories_created, self._directories_reused, self._directories_skipped, self.last_dircap, ) def consume_directory(self, dirpath): return self, { os.path.basename(create_path): create_value for (create_path, create_value) in list(self._create_contents.items()) if os.path.dirname(create_path) == dirpath }, { os.path.basename(compare_path): compare_value for (compare_path, compare_value) in list(self._compare_contents.items()) if os.path.dirname(compare_path) == dirpath } def created_directory(self, path, dircap, metadata): self._create_contents[path] = ("dirnode", dircap, metadata) self._compare_contents[path] = dircap self._directories_created += 1 self.last_dircap = dircap return self def reused_directory(self, path, dircap, metadata): self._create_contents[path] = ("dirnode", dircap, metadata) self._compare_contents[path] = dircap self._directories_reused += 1 self.last_dircap = dircap return self def created_file(self, path, cap, metadata): self._create_contents[path] = ("filenode", cap, metadata) self._compare_contents[path] = cap self._files_created += 1 return self def reused_file(self, path, cap, metadata): self._create_contents[path] = ("filenode", cap, metadata) self._compare_contents[path] = cap self._files_reused += 1 return self def permission_denied(self, isdir, quoted_path): return self._skip( "WARNING: permission denied on {kind} {path}", isdir, path=quoted_path, ) def decoding_failed(self, isdir, quoted_path): return self._skip( "WARNING: could not list {kind} {path} due to a filename encoding error", isdir, path=quoted_path, ) def unsupported_filetype(self, isdir, quoted_path, filetype): return self._skip( "WARNING: cannot backup {filetype} {path}", isdir, path=quoted_path, filetype=filetype, ) def _skip(self, message, isdir, **kw): if isdir: self._directories_skipped += 1 kind = "directory" else: self._files_skipped += 1 kind = "file" self._warn(message.format(kind=kind, **kw)) # Pretend we're a persistent data structure being transformed. return self tahoe_lafs-1.20.0/src/allmydata/scripts/tahoe_check.py0000644000000000000000000003134613615410400017663 0ustar00""" Ported to Python 3. """ from six import ensure_text from urllib.parse import quote as url_quote import json from twisted.protocols.basic import LineOnlyReceiver from allmydata.scripts.common import get_alias, DEFAULT_ALIAS, escape_path, \ UnknownAliasError from allmydata.scripts.common_http import do_http, format_http_error from allmydata.util.encodingutil import quote_output, quote_path, get_io_encoding class Checker(object): pass def _quote_serverid_index_share(serverid, storage_index, sharenum): return "server %s, SI %s, shnum %r" % (quote_output(serverid, quotemarks=False), quote_output(storage_index, quotemarks=False), sharenum) def check_location(options, where): stdout = options.stdout stderr = options.stderr nodeurl = options['node-url'] if not nodeurl.endswith("/"): nodeurl += "/" try: rootcap, path = get_alias(options.aliases, where, DEFAULT_ALIAS) except UnknownAliasError as e: e.display(stderr) return 1 path = str(path, "utf-8") if path == '/': path = '' url = nodeurl + "uri/%s" % url_quote(rootcap) if path: url += "/" + escape_path(path) # todo: should it end with a slash? url += "?t=check&output=JSON" if options["verify"]: url += "&verify=true" if options["repair"]: url += "&repair=true" if options["add-lease"]: url += "&add-lease=true" resp = do_http("POST", url) if resp.status != 200: print(format_http_error("ERROR", resp), file=stderr) return 1 jdata = resp.read().decode() if options.get("raw"): stdout.write(jdata) stdout.write("\n") return 0 data = json.loads(jdata) if options["repair"]: # show repair status if data["pre-repair-results"]["results"]["healthy"]: summary = "healthy" else: summary = "not healthy" stdout.write("Summary: %s\n" % summary) cr = data["pre-repair-results"]["results"] stdout.write(" storage index: %s\n" % quote_output(data["storage-index"], quotemarks=False)) stdout.write(" good-shares: %r (encoding is %r-of-%r)\n" % (cr["count-shares-good"], cr["count-shares-needed"], cr["count-shares-expected"])) stdout.write(" wrong-shares: %r\n" % cr["count-wrong-shares"]) corrupt = cr["list-corrupt-shares"] if corrupt: stdout.write(" corrupt shares:\n") for (serverid, storage_index, sharenum) in corrupt: stdout.write(" %s\n" % _quote_serverid_index_share(serverid, storage_index, sharenum)) if data["repair-attempted"]: if data["repair-successful"]: stdout.write(" repair successful\n") else: stdout.write(" repair failed\n") else: # LIT files and directories do not have a "summary" field. summary = data.get("summary", "Healthy (LIT)") stdout.write("Summary: %s\n" % quote_output(summary, quotemarks=False)) cr = data["results"] stdout.write(" storage index: %s\n" % quote_output(data["storage-index"], quotemarks=False)) if all([field in cr for field in ("count-shares-good", "count-shares-needed", "count-shares-expected", "count-wrong-shares")]): stdout.write(" good-shares: %r (encoding is %r-of-%r)\n" % (cr["count-shares-good"], cr["count-shares-needed"], cr["count-shares-expected"])) stdout.write(" wrong-shares: %r\n" % cr["count-wrong-shares"]) corrupt = cr.get("list-corrupt-shares", []) if corrupt: stdout.write(" corrupt shares:\n") for (serverid, storage_index, sharenum) in corrupt: stdout.write(" %s\n" % _quote_serverid_index_share(serverid, storage_index, sharenum)) return 0; def check(options): if len(options.locations) == 0: errno = check_location(options, str()) if errno != 0: return errno return 0 for location in options.locations: errno = check_location(options, location) if errno != 0: return errno return 0 class FakeTransport(object): disconnecting = False class DeepCheckOutput(LineOnlyReceiver, object): delimiter = b"\n" def __init__(self, streamer, options): self.streamer = streamer self.transport = FakeTransport() self.verbose = bool(options["verbose"]) self.stdout = options.stdout self.stderr = options.stderr self.num_objects = 0 self.files_healthy = 0 self.files_unhealthy = 0 self.in_error = False def lineReceived(self, line): if self.in_error: print(quote_output(line, quotemarks=False), file=self.stderr) return if line.startswith(b"ERROR:"): self.in_error = True self.streamer.rc = 1 print(quote_output(line, quotemarks=False), file=self.stderr) return d = json.loads(line) stdout = self.stdout if d["type"] not in ("file", "directory"): return self.num_objects += 1 # non-verbose means print a progress marker every 100 files if self.num_objects % 100 == 0: print("%d objects checked.." % self.num_objects, file=stdout) cr = d["check-results"] if cr["results"]["healthy"]: self.files_healthy += 1 else: self.files_unhealthy += 1 if self.verbose: # verbose means also print one line per file path = d["path"] if not path: path = [""] # LIT files and directories do not have a "summary" field. summary = cr.get("summary", "Healthy (LIT)") # When Python 2 is dropped the ensure_text()/ensure_str() will be unnecessary. print(ensure_text("%s: %s" % (quote_path(path), quote_output(summary, quotemarks=False)), encoding=get_io_encoding()), file=stdout) # always print out corrupt shares for shareloc in cr["results"].get("list-corrupt-shares", []): (serverid, storage_index, sharenum) = shareloc print(" corrupt: %s" % _quote_serverid_index_share(serverid, storage_index, sharenum), file=stdout) def done(self): if self.in_error: return stdout = self.stdout print("done: %d objects checked, %d healthy, %d unhealthy" \ % (self.num_objects, self.files_healthy, self.files_unhealthy), file=stdout) class DeepCheckAndRepairOutput(LineOnlyReceiver, object): delimiter = b"\n" def __init__(self, streamer, options): self.streamer = streamer self.transport = FakeTransport() self.verbose = bool(options["verbose"]) self.stdout = options.stdout self.stderr = options.stderr self.num_objects = 0 self.pre_repair_files_healthy = 0 self.pre_repair_files_unhealthy = 0 self.repairs_attempted = 0 self.repairs_successful = 0 self.post_repair_files_healthy = 0 self.post_repair_files_unhealthy = 0 self.in_error = False def lineReceived(self, line): if self.in_error: print(quote_output(line, quotemarks=False), file=self.stderr) return if line.startswith(b"ERROR:"): self.in_error = True self.streamer.rc = 1 print(quote_output(line, quotemarks=False), file=self.stderr) return d = json.loads(line) stdout = self.stdout if d["type"] not in ("file", "directory"): return self.num_objects += 1 # non-verbose means print a progress marker every 100 files if self.num_objects % 100 == 0: print("%d objects checked.." % self.num_objects, file=stdout) crr = d["check-and-repair-results"] if d["storage-index"]: if crr["pre-repair-results"]["results"]["healthy"]: was_healthy = True self.pre_repair_files_healthy += 1 else: was_healthy = False self.pre_repair_files_unhealthy += 1 if crr["post-repair-results"]["results"]["healthy"]: self.post_repair_files_healthy += 1 else: self.post_repair_files_unhealthy += 1 else: # LIT file was_healthy = True self.pre_repair_files_healthy += 1 self.post_repair_files_healthy += 1 if crr["repair-attempted"]: self.repairs_attempted += 1 if crr["repair-successful"]: self.repairs_successful += 1 if self.verbose: # verbose means also print one line per file path = d["path"] if not path: path = [""] # we don't seem to have a summary available, so build one if was_healthy: summary = "healthy" else: summary = "not healthy" print(ensure_text("%s: %s" % (quote_path(path), summary), encoding=get_io_encoding()), file=stdout) # always print out corrupt shares prr = crr.get("pre-repair-results", {}) for shareloc in prr.get("results", {}).get("list-corrupt-shares", []): (serverid, storage_index, sharenum) = shareloc print(" corrupt: %s" % _quote_serverid_index_share(serverid, storage_index, sharenum), file=stdout) # always print out repairs if crr["repair-attempted"]: if crr["repair-successful"]: print(" repair successful", file=stdout) else: print(" repair failed", file=stdout) def done(self): if self.in_error: return stdout = self.stdout print("done: %d objects checked" % self.num_objects, file=stdout) print(" pre-repair: %d healthy, %d unhealthy" \ % (self.pre_repair_files_healthy, self.pre_repair_files_unhealthy), file=stdout) print(" %d repairs attempted, %d successful, %d failed" \ % (self.repairs_attempted, self.repairs_successful, (self.repairs_attempted - self.repairs_successful)), file=stdout) print(" post-repair: %d healthy, %d unhealthy" \ % (self.post_repair_files_healthy, self.post_repair_files_unhealthy), file=stdout) class DeepCheckStreamer(LineOnlyReceiver, object): def deepcheck_location(self, options, where): stdout = options.stdout stderr = options.stderr self.rc = 0 self.options = options nodeurl = options['node-url'] if not nodeurl.endswith("/"): nodeurl += "/" self.nodeurl = nodeurl try: rootcap, path = get_alias(options.aliases, where, DEFAULT_ALIAS) except UnknownAliasError as e: e.display(stderr) return 1 path = str(path, "utf-8") if path == '/': path = '' url = nodeurl + "uri/%s" % url_quote(rootcap) if path: url += "/" + escape_path(path) # todo: should it end with a slash? url += "?t=stream-deep-check" if options["verify"]: url += "&verify=true" if options["repair"]: url += "&repair=true" output = DeepCheckAndRepairOutput(self, options) else: output = DeepCheckOutput(self, options) if options["add-lease"]: url += "&add-lease=true" resp = do_http("POST", url) if resp.status not in (200, 302): print(format_http_error("ERROR", resp), file=stderr) return 1 # use Twisted to split this into lines while True: chunk = resp.read(100) if not chunk: break if self.options["raw"]: stdout.write(chunk.decode()) else: output.dataReceived(chunk) if not self.options["raw"]: output.done() return 0 def run(self, options): if len(options.locations) == 0: errno = self.deepcheck_location(options, str()) if errno != 0: return errno return 0 for location in options.locations: errno = self.deepcheck_location(options, location) if errno != 0: return errno return self.rc def deepcheck(options): return DeepCheckStreamer().run(options) tahoe_lafs-1.20.0/src/allmydata/scripts/tahoe_cp.py0000644000000000000000000010747513615410400017217 0ustar00""" Ported to Python 3. """ import os.path from urllib.parse import quote as url_quote from collections import defaultdict from io import BytesIO from twisted.python.failure import Failure from allmydata.scripts.common import get_alias, escape_path, \ DefaultAliasMarker, TahoeError from allmydata.scripts.common_http import do_http, HTTPError from allmydata import uri from allmydata.util import fileutil from allmydata.util.fileutil import abspath_expanduser_unicode, precondition_abspath from allmydata.util.encodingutil import unicode_to_url, listdir_unicode, quote_output, \ quote_local_unicode_path, to_bytes from allmydata.util.assertutil import precondition, _assert from allmydata.util import jsonbytes as json class MissingSourceError(TahoeError): def __init__(self, name, quotefn=quote_output): TahoeError.__init__(self, "No such file or directory %s" % quotefn(name)) class FilenameWithTrailingSlashError(TahoeError): def __init__(self, name, quotefn=quote_output): TahoeError.__init__(self, "source '%s' is not a directory, but ends with a slash" % quotefn(name)) class WeirdSourceError(TahoeError): def __init__(self, absname): quoted = quote_local_unicode_path(absname) TahoeError.__init__(self, "source '%s' is neither a file nor a directory, I can't handle it" % quoted) def GET_to_file(url): resp = do_http("GET", url) if resp.status == 200: return resp raise HTTPError("Error during GET", resp) def GET_to_string(url): f = GET_to_file(url) return f.read() def PUT(url, data): resp = do_http("PUT", url, data) if resp.status in (200, 201): return resp.read() raise HTTPError("Error during PUT", resp) def POST(url, data): resp = do_http("POST", url, data) if resp.status in (200, 201): return resp.read() raise HTTPError("Error during POST", resp) def mkdir(targeturl): url = targeturl + "?t=mkdir" resp = do_http("POST", url) if resp.status in (200, 201): return resp.read().strip() raise HTTPError("Error during mkdir", resp) def make_tahoe_subdirectory(nodeurl, parent_writecap, name): url = nodeurl + "/".join(["uri", url_quote(parent_writecap), url_quote(unicode_to_url(name)), ]) + "?t=mkdir" resp = do_http("POST", url) if resp.status in (200, 201): return resp.read().strip() raise HTTPError("Error during mkdir", resp) class LocalFileSource(object): def __init__(self, pathname, basename): precondition_abspath(pathname) self.pathname = pathname self._basename = basename def basename(self): return self._basename def need_to_copy_bytes(self): return True def open(self, caps_only): return open(self.pathname, "rb") class LocalFileTarget(object): def __init__(self, pathname): precondition_abspath(pathname) self.pathname = pathname def put_file(self, inf): fileutil.put_file(self.pathname, inf) class LocalMissingTarget(object): def __init__(self, pathname): precondition_abspath(pathname) self.pathname = pathname def put_file(self, inf): fileutil.put_file(self.pathname, inf) class LocalDirectorySource(object): def __init__(self, progressfunc, pathname, basename): precondition_abspath(pathname) self.progressfunc = progressfunc self.pathname = pathname self.children = None self._basename = basename def basename(self): return self._basename def populate(self, recurse): if self.children is not None: return self.children = {} children = listdir_unicode(self.pathname) for i,n in enumerate(children): self.progressfunc("examining %d of %d" % (i+1, len(children))) pn = os.path.join(self.pathname, n) if os.path.isdir(pn): child = LocalDirectorySource(self.progressfunc, pn, n) self.children[n] = child if recurse: child.populate(recurse=True) elif os.path.isfile(pn): self.children[n] = LocalFileSource(pn, n) else: # Could be dangling symlink; probably not copy-able. # TODO: output a warning pass class LocalDirectoryTarget(object): def __init__(self, progressfunc, pathname): precondition_abspath(pathname) self.progressfunc = progressfunc self.pathname = pathname self.children = None def populate(self, recurse): if self.children is not None: return self.children = {} children = listdir_unicode(self.pathname) for i,n in enumerate(children): self.progressfunc("examining %d of %d" % (i+1, len(children))) pn = os.path.join(self.pathname, n) if os.path.isdir(pn): child = LocalDirectoryTarget(self.progressfunc, pn) self.children[n] = child if recurse: child.populate(recurse=True) else: assert os.path.isfile(pn) self.children[n] = LocalFileTarget(pn) def get_child_target(self, name): precondition(isinstance(name, str), name) precondition(len(name), name) # don't want "" if self.children is None: self.populate(recurse=False) if name in self.children: return self.children[name] pathname = os.path.join(self.pathname, name) os.makedirs(pathname) child = LocalDirectoryTarget(self.progressfunc, pathname) self.children[name] = child return child def put_file(self, name, inf): precondition(isinstance(name, str), name) pathname = os.path.join(self.pathname, name) fileutil.put_file(pathname, inf) def set_children(self): pass class TahoeFileSource(object): def __init__(self, nodeurl, mutable, writecap, readcap, basename): self.nodeurl = nodeurl self.mutable = mutable self.writecap = writecap self.readcap = readcap self._basename = basename # unicode, or None for raw filecaps def basename(self): return self._basename def need_to_copy_bytes(self): if self.mutable: return True return False def open(self, caps_only): if caps_only: return BytesIO(self.readcap) url = self.nodeurl + "uri/" + url_quote(self.readcap) return GET_to_file(url) def bestcap(self): return self.writecap or self.readcap def seekable(file_like): """Return whether the file-like object is seekable.""" return hasattr(file_like, "seek") and ( not hasattr(file_like, "seekable") or file_like.seekable() ) class TahoeFileTarget(object): def __init__(self, nodeurl, mutable, writecap, readcap, url): self.nodeurl = nodeurl self.mutable = mutable self.writecap = writecap self.readcap = readcap self.url = url def put_file(self, inf): # We want to replace this object in-place. assert self.url # our do_http() call currently requires a string or a filehandle with # a real .seek if not seekable(inf): inf = inf.read() PUT(self.url, inf) # TODO: this always creates immutable files. We might want an option # to always create mutable files, or to copy mutable files into new # mutable files. ticket #835 class TahoeDirectorySource(object): def __init__(self, nodeurl, cache, progressfunc, basename): self.nodeurl = nodeurl self.cache = cache self.progressfunc = progressfunc self._basename = basename # unicode, or None for raw dircaps def basename(self): return self._basename def init_from_grid(self, writecap, readcap): self.writecap = writecap self.readcap = readcap bestcap = writecap or readcap url = self.nodeurl + "uri/%s" % url_quote(bestcap) resp = do_http("GET", url + "?t=json") if resp.status != 200: raise HTTPError("Error examining source directory", resp) parsed = json.loads(resp.read()) nodetype, d = parsed assert nodetype == "dirnode" self.mutable = d.get("mutable", False) # older nodes don't provide it self.children_d = dict( [(str(name),value) for (name,value) in d["children"].items()] ) self.children = None def init_from_parsed(self, parsed): nodetype, d = parsed self.writecap = to_bytes(d.get("rw_uri")) self.readcap = to_bytes(d.get("ro_uri")) self.mutable = d.get("mutable", False) # older nodes don't provide it self.children_d = dict( [(str(name),value) for (name,value) in d["children"].items()] ) self.children = None def populate(self, recurse): if self.children is not None: return self.children = {} for i,(name, data) in enumerate(self.children_d.items()): self.progressfunc("examining %d of %d" % (i+1, len(self.children_d))) if data[0] == "filenode": mutable = data[1].get("mutable", False) writecap = to_bytes(data[1].get("rw_uri")) readcap = to_bytes(data[1].get("ro_uri")) self.children[name] = TahoeFileSource(self.nodeurl, mutable, writecap, readcap, name) elif data[0] == "dirnode": writecap = to_bytes(data[1].get("rw_uri")) readcap = to_bytes(data[1].get("ro_uri")) if writecap and writecap in self.cache: child = self.cache[writecap] elif readcap and readcap in self.cache: child = self.cache[readcap] else: child = TahoeDirectorySource(self.nodeurl, self.cache, self.progressfunc, name) child.init_from_grid(writecap, readcap) if writecap: self.cache[writecap] = child if readcap: self.cache[readcap] = child if recurse: child.populate(recurse=True) self.children[name] = child else: # TODO: there should be an option to skip unknown nodes. raise TahoeError("Cannot copy unknown nodes (ticket #839). " "You probably need to use a later version of " "Tahoe-LAFS to copy this directory.") class TahoeMissingTarget(object): def __init__(self, url): self.url = url def put_file(self, inf): # We want to replace this object in-place. if not seekable(inf): inf = inf.read() PUT(self.url, inf) # TODO: this always creates immutable files. We might want an option # to always create mutable files, or to copy mutable files into new # mutable files. def put_uri(self, filecap): # I'm not sure this will always work return PUT(self.url + "?t=uri", filecap) class TahoeDirectoryTarget(object): def __init__(self, nodeurl, cache, progressfunc): self.nodeurl = nodeurl self.cache = cache self.progressfunc = progressfunc self.new_children = {} def init_from_parsed(self, parsed): nodetype, d = parsed self.writecap = to_bytes(d.get("rw_uri")) self.readcap = to_bytes(d.get("ro_uri")) self.mutable = d.get("mutable", False) # older nodes don't provide it self.children_d = dict( [(str(name),value) for (name,value) in d["children"].items()] ) self.children = None def init_from_grid(self, writecap, readcap): self.writecap = writecap self.readcap = readcap bestcap = writecap or readcap url = self.nodeurl + "uri/%s" % url_quote(bestcap) resp = do_http("GET", url + "?t=json") if resp.status != 200: raise HTTPError("Error examining target directory", resp) parsed = json.loads(resp.read()) nodetype, d = parsed assert nodetype == "dirnode" self.mutable = d.get("mutable", False) # older nodes don't provide it self.children_d = dict( [(str(name),value) for (name,value) in d["children"].items()] ) self.children = None def just_created(self, writecap): # TODO: maybe integrate this with the constructor self.writecap = writecap self.readcap = uri.from_string(writecap).get_readonly().to_string() self.mutable = True self.children_d = {} self.children = {} def populate(self, recurse): if self.children is not None: return self.children = {} for i,(name, data) in enumerate(self.children_d.items()): self.progressfunc("examining %d of %d" % (i+1, len(self.children_d))) if data[0] == "filenode": mutable = data[1].get("mutable", False) writecap = to_bytes(data[1].get("rw_uri")) readcap = to_bytes(data[1].get("ro_uri")) url = None if self.writecap: url = self.nodeurl + "/".join(["uri", url_quote(self.writecap), url_quote(unicode_to_url(name))]) self.children[name] = TahoeFileTarget(self.nodeurl, mutable, writecap, readcap, url) elif data[0] == "dirnode": writecap = to_bytes(data[1].get("rw_uri")) readcap = to_bytes(data[1].get("ro_uri")) if writecap and writecap in self.cache: child = self.cache[writecap] elif readcap and readcap in self.cache: child = self.cache[readcap] else: child = TahoeDirectoryTarget(self.nodeurl, self.cache, self.progressfunc) child.init_from_grid(writecap, readcap) if writecap: self.cache[writecap] = child if readcap: self.cache[readcap] = child if recurse: child.populate(recurse=True) self.children[name] = child else: # TODO: there should be an option to skip unknown nodes. raise TahoeError("Cannot copy unknown nodes (ticket #839). " "You probably need to use a later version of " "Tahoe-LAFS to copy this directory.") def get_child_target(self, name): # return a new target for a named subdirectory of this dir precondition(isinstance(name, str), name) if self.children is None: self.populate(recurse=False) if name in self.children: return self.children[name] writecap = make_tahoe_subdirectory(self.nodeurl, self.writecap, name) child = TahoeDirectoryTarget(self.nodeurl, self.cache, self.progressfunc) child.just_created(writecap) self.children[name] = child return child def put_file(self, name, inf): precondition(isinstance(name, str), name) url = self.nodeurl + "uri" if not seekable(inf): inf = inf.read() if self.children is None: self.populate(recurse=False) # Check to see if we already have a mutable file by this name. # If so, overwrite that file in place. if name in self.children and self.children[name].mutable: self.children[name].put_file(inf) else: filecap = PUT(url, inf) # TODO: this always creates immutable files. We might want an option # to always create mutable files, or to copy mutable files into new # mutable files. self.new_children[name] = filecap def put_uri(self, name, filecap): precondition(isinstance(name, str), name) self.new_children[name] = filecap def set_children(self): if not self.new_children: return url = (self.nodeurl + "uri/" + url_quote(self.writecap) + "?t=set_children") set_data = {} for (name, filecap) in list(self.new_children.items()): # it just so happens that ?t=set_children will accept both file # read-caps and write-caps as ['rw_uri'], and will handle either # correctly. So don't bother trying to figure out whether the one # we have is read-only or read-write. # TODO: think about how this affects forward-compatibility for # unknown caps set_data[name] = ["filenode", {"rw_uri": filecap}] body = json.dumps_bytes(set_data) POST(url, body) FileSources = (LocalFileSource, TahoeFileSource) DirectorySources = (LocalDirectorySource, TahoeDirectorySource) FileTargets = (LocalFileTarget, TahoeFileTarget) DirectoryTargets = (LocalDirectoryTarget, TahoeDirectoryTarget) MissingTargets = (LocalMissingTarget, TahoeMissingTarget) class Copier(object): def do_copy(self, options, progressfunc=None): if options['quiet']: verbosity = 0 elif options['verbose']: verbosity = 2 else: verbosity = 1 nodeurl = options['node-url'] if nodeurl[-1] != "/": nodeurl += "/" self.nodeurl = nodeurl self.progressfunc = progressfunc self.options = options self.aliases = options.aliases self.verbosity = verbosity self.stdout = options.stdout self.stderr = options.stderr if verbosity >= 2 and not self.progressfunc: def progress(message): print(message, file=self.stderr) self.progressfunc = progress self.caps_only = options["caps-only"] self.cache = {} try: status = self.try_copy() return status except TahoeError as te: if verbosity >= 2: Failure().printTraceback(self.stderr) print(file=self.stderr) te.display(self.stderr) return 1 def try_copy(self): """ All usage errors (except for target filename collisions) are caught here, not in a subroutine. This bottoms out in copy_file_to_file() or copy_things_to_directory(). """ source_specs = self.options.sources destination_spec = self.options.destination recursive = self.options["recursive"] target = self.get_target_info(destination_spec) precondition(isinstance(target, FileTargets + DirectoryTargets + MissingTargets), target) target_has_trailing_slash = destination_spec.endswith("/") sources = [] # list of source objects for ss in source_specs: try: si = self.get_source_info(ss) except FilenameWithTrailingSlashError as e: self.to_stderr(str(e)) return 1 precondition(isinstance(si, FileSources + DirectorySources), si) sources.append(si) # if any source is a directory, must use -r # if target is missing: # if source is a single file, target will be a file # else target will be a directory, so mkdir it # if there are multiple sources, target must be a dir # if target is a file, source must be a single file # if target is directory, sources must be named or a dir have_source_dirs = any([isinstance(s, DirectorySources) for s in sources]) if have_source_dirs and not recursive: # 'cp dir target' without -r: error self.to_stderr("cannot copy directories without --recursive") return 1 del recursive # -r is only used for signalling errors if isinstance(target, FileTargets): target_is_file = True elif isinstance(target, DirectoryTargets): target_is_file = False else: # isinstance(target, MissingTargets) if len(sources) == 1 and isinstance(sources[0], FileSources): target_is_file = True else: target_is_file = False if target_is_file and target_has_trailing_slash: self.to_stderr("target is not a directory, but ends with a slash") return 1 if len(sources) > 1 and target_is_file: self.to_stderr("copying multiple things requires target be a directory") return 1 if target_is_file: _assert(len(sources) == 1, sources) if not isinstance(sources[0], FileSources): # 'cp -r dir existingfile': error self.to_stderr("cannot copy directory into a file") return 1 return self.copy_file_to_file(sources[0], target) # else target is a directory, so each source must be one of: # * a named file (copied to a new file under the target) # * a named directory (causes a new directory of the same name to be # created under the target, then the contents of the source are # copied into that directory) # * an unnamed directory (the contents of the source are copied into # the target, without a new directory being made) # # If any source is an unnamed file, throw an error, since we have no # way to name the output file. _assert(isinstance(target, DirectoryTargets + MissingTargets), target) for source in sources: if isinstance(source, FileSources) and source.basename() is None: self.to_stderr("when copying into a directory, all source files must have names, but %s is unnamed" % quote_output(source_specs[0])) return 1 return self.copy_things_to_directory(sources, target) def to_stderr(self, text): print(text, file=self.stderr) # FIXME reduce the amount of near-duplicate code between get_target_info # and get_source_info. def get_target_info(self, destination_spec): precondition(isinstance(destination_spec, str), destination_spec) rootcap, path_utf8 = get_alias(self.aliases, destination_spec, None) path = path_utf8.decode("utf-8") if rootcap == DefaultAliasMarker: # no alias, so this is a local file pathname = abspath_expanduser_unicode(path) if not os.path.exists(pathname): t = LocalMissingTarget(pathname) elif os.path.isdir(pathname): t = LocalDirectoryTarget(self.progress, pathname) else: # TODO: should this be _assert? what happens if the target is # a special file? assert os.path.isfile(pathname), pathname t = LocalFileTarget(pathname) # non-empty else: # this is a tahoe object url = self.nodeurl + "uri/%s" % url_quote(rootcap) if path: url += "/" + escape_path(path) resp = do_http("GET", url + "?t=json") if resp.status == 404: # doesn't exist yet t = TahoeMissingTarget(url) elif resp.status == 200: parsed = json.loads(resp.read()) nodetype, d = parsed if nodetype == "dirnode": t = TahoeDirectoryTarget(self.nodeurl, self.cache, self.progress) t.init_from_parsed(parsed) else: writecap = to_bytes(d.get("rw_uri")) readcap = to_bytes(d.get("ro_uri")) mutable = d.get("mutable", False) t = TahoeFileTarget(self.nodeurl, mutable, writecap, readcap, url) else: raise HTTPError("Error examining target %s" % quote_output(destination_spec), resp) return t def get_source_info(self, source_spec): """ This turns an argv string into a (Local|Tahoe)(File|Directory)Source. """ precondition(isinstance(source_spec, str), source_spec) rootcap, path_utf8 = get_alias(self.aliases, source_spec, None) path = path_utf8.decode("utf-8") # any trailing slash is removed in abspath_expanduser_unicode(), so # make a note of it here, to throw an error later had_trailing_slash = path.endswith("/") if rootcap == DefaultAliasMarker: # no alias, so this is a local file pathname = abspath_expanduser_unicode(path) name = os.path.basename(pathname) if not os.path.exists(pathname): raise MissingSourceError(source_spec, quotefn=quote_local_unicode_path) if os.path.isdir(pathname): t = LocalDirectorySource(self.progress, pathname, name) else: if had_trailing_slash: raise FilenameWithTrailingSlashError(source_spec, quotefn=quote_local_unicode_path) if not os.path.isfile(pathname): raise WeirdSourceError(pathname) t = LocalFileSource(pathname, name) # non-empty else: # this is a tahoe object url = self.nodeurl + "uri/%s" % url_quote(rootcap) name = None if path: if path.endswith("/"): path = path[:-1] url += "/" + escape_path(path) last_slash = path.rfind(u"/") name = path if last_slash != -1: name = path[last_slash+1:] resp = do_http("GET", url + "?t=json") if resp.status == 404: raise MissingSourceError(source_spec) elif resp.status != 200: raise HTTPError("Error examining source %s" % quote_output(source_spec), resp) parsed = json.loads(resp.read()) nodetype, d = parsed if nodetype == "dirnode": t = TahoeDirectorySource(self.nodeurl, self.cache, self.progress, name) t.init_from_parsed(parsed) else: if had_trailing_slash: raise FilenameWithTrailingSlashError(source_spec) writecap = to_bytes(d.get("rw_uri")) readcap = to_bytes(d.get("ro_uri")) mutable = d.get("mutable", False) # older nodes don't provide it t = TahoeFileSource(self.nodeurl, mutable, writecap, readcap, name) return t def need_to_copy_bytes(self, source, target): # This should likley be a method call! but enabling that triggers # additional bugs. https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3719 if source.need_to_copy_bytes: # mutable tahoe files, and local files return True if isinstance(target, (LocalFileTarget, LocalDirectoryTarget)): return True return False def announce_success(self, msg): if self.verbosity >= 1: print("Success: %s" % msg, file=self.stdout) return 0 def copy_file_to_file(self, source, target): precondition(isinstance(source, FileSources), source) precondition(isinstance(target, FileTargets + MissingTargets), target) if self.need_to_copy_bytes(source, target): # if the target is a local directory, this will just write the # bytes to disk. If it is a tahoe directory, it will upload the # data, and stash the new filecap for a later set_children call. f = source.open(self.caps_only) target.put_file(f) return self.announce_success("file copied") # otherwise we're copying tahoe to tahoe, and using immutable files, # so we can just make a link. TODO: this probably won't always work: # need to enumerate the cases and analyze them. target.put_uri(source.bestcap()) return self.announce_success("file linked") def copy_things_to_directory(self, sources, target): # step one: if the target is missing, we should mkdir it target = self.maybe_create_target(target) target.populate(recurse=False) # step two: scan any source dirs, recursively, to find children for s in sources: if isinstance(s, DirectorySources): s.populate(recurse=True) if isinstance(s, FileSources): # each source must have a name, or be a directory _assert(s.basename() is not None, s) # step three: find a target for each source node, creating # directories as necessary. 'targetmap' is a dictionary that uses # target Directory instances as keys, and has values of (name: # sourceobject) dicts for all the files that need to wind up there. targetmap = self.build_targetmap(sources, target) # target name collisions are an error collisions = [] for target, sources in list(targetmap.items()): target_names = {} for source in sources: name = source.basename() if name in target_names: collisions.append((target, source, target_names[name])) else: target_names[name] = source if collisions: self.to_stderr("cannot copy multiple files with the same name into the same target directory") # I'm not sure how to show where the collisions are coming from #for (target, source1, source2) in collisions: # self.to_stderr(source1.basename()) return 1 # step four: walk through the list of targets. For each one, copy all # the files. If the target is a TahoeDirectory, upload and create # read-caps, then do a set_children to the target directory. self.copy_to_targetmap(targetmap) return self.announce_success("files copied") def maybe_create_target(self, target): if isinstance(target, LocalMissingTarget): os.makedirs(target.pathname) target = LocalDirectoryTarget(self.progress, target.pathname) elif isinstance(target, TahoeMissingTarget): writecap = mkdir(target.url) target = TahoeDirectoryTarget(self.nodeurl, self.cache, self.progress) target.just_created(writecap) # afterwards, or otherwise, it will be a directory precondition(isinstance(target, DirectoryTargets), target) return target def build_targetmap(self, sources, target): num_source_files = len([s for s in sources if isinstance(s, FileSources)]) num_source_dirs = len([s for s in sources if isinstance(s, DirectorySources)]) self.progress("attaching sources to targets, " "%d files / %d dirs in root" % (num_source_files, num_source_dirs)) # this maps each target directory to a list of source files that need # to be copied into it. All source files have names. targetmap = defaultdict(list) for s in sources: if isinstance(s, FileSources): targetmap[target].append(s) else: _assert(isinstance(s, DirectorySources), s) name = s.basename() if name is not None: # named sources get a new directory. see #2329 new_target = target.get_child_target(name) else: # unnamed sources have their contents copied directly new_target = target self.assign_targets(targetmap, s, new_target) self.progress("targets assigned, %s dirs, %s files" % (len(targetmap), self.count_files_to_copy(targetmap))) return targetmap def assign_targets(self, targetmap, source, target): # copy everything in the source into the target precondition(isinstance(source, DirectorySources), source) for name, child in list(source.children.items()): if isinstance(child, DirectorySources): # we will need a target directory for this one subtarget = target.get_child_target(name) self.assign_targets(targetmap, child, subtarget) else: _assert(isinstance(child, FileSources), child) targetmap[target].append(child) def copy_to_targetmap(self, targetmap): files_to_copy = self.count_files_to_copy(targetmap) self.progress("starting copy, %d files, %d directories" % (files_to_copy, len(targetmap))) files_copied = 0 targets_finished = 0 for target, sources in list(targetmap.items()): _assert(isinstance(target, DirectoryTargets), target) for source in sources: _assert(isinstance(source, FileSources), source) self.copy_file_into_dir(source, source.basename(), target) files_copied += 1 self.progress("%d/%d files, %d/%d directories" % (files_copied, files_to_copy, targets_finished, len(targetmap))) target.set_children() targets_finished += 1 self.progress("%d/%d directories" % (targets_finished, len(targetmap))) def count_files_to_copy(self, targetmap): return sum([len(sources) for sources in targetmap.values()]) def copy_file_into_dir(self, source, name, target): precondition(isinstance(source, FileSources), source) precondition(isinstance(target, DirectoryTargets), target) precondition(isinstance(name, str), name) if self.need_to_copy_bytes(source, target): # if the target is a local directory, this will just write the # bytes to disk. If it is a tahoe directory, it will upload the # data, and stash the new filecap for a later set_children call. f = source.open(self.caps_only) target.put_file(name, f) return # otherwise we're copying tahoe to tahoe, and using immutable files, # so we can just make a link target.put_uri(name, source.bestcap()) def progress(self, message): #print(message) if self.progressfunc: self.progressfunc(message) def copy(options): return Copier().do_copy(options) # error cases that need improvement: # local-file-in-the-way # touch proposed # tahoe cp -r my:docs/proposed/denver.txt proposed/denver.txt # handling of unknown nodes # things that maybe should be errors but aren't # local-dir-in-the-way # mkdir denver.txt # tahoe cp -r my:docs/proposed/denver.txt denver.txt # (creates denver.txt/denver.txt) # error cases that look good: # tahoe cp -r my:docs/missing missing # disconnect servers # tahoe cp -r my:docs/missing missing -> No JSON object could be decoded # tahoe-file-in-the-way (when we want to make a directory) # tahoe put README my:docs # tahoe cp -r docs/proposed my:docs/proposed tahoe_lafs-1.20.0/src/allmydata/scripts/tahoe_get.py0000644000000000000000000000265313615410400017364 0ustar00""" Ported to Python 3. """ from urllib.parse import quote as url_quote from allmydata.scripts.common import get_alias, DEFAULT_ALIAS, escape_path, \ UnknownAliasError from allmydata.scripts.common_http import do_http, format_http_error def get(options): nodeurl = options['node-url'] aliases = options.aliases from_file = options.from_file to_file = options.to_file stdout = options.stdout stderr = options.stderr if nodeurl[-1] != "/": nodeurl += "/" try: rootcap, path = get_alias(aliases, from_file, DEFAULT_ALIAS) except UnknownAliasError as e: e.display(stderr) return 1 url = nodeurl + "uri/%s" % url_quote(rootcap) if path: url += "/" + escape_path(path) resp = do_http("GET", url) if resp.status in (200, 201,): if to_file: outf = open(to_file, "wb") else: outf = stdout # Make sure we can write bytes; on Python 3 stdout is Unicode by # default. if getattr(outf, "encoding", None) is not None: outf = outf.buffer while True: data = resp.read(4096) if not data: break outf.write(data) if to_file: outf.close() rc = 0 else: print(format_http_error("Error during GET", resp), file=stderr) rc = 1 return rc tahoe_lafs-1.20.0/src/allmydata/scripts/tahoe_invite.py0000644000000000000000000000673713615410400020112 0ustar00""" Ported to Python 3. """ from twisted.python import usage from twisted.internet import defer, reactor from allmydata.util.encodingutil import argv_to_abspath from allmydata.util import jsonbytes as json from allmydata.scripts.common import get_default_nodedir, get_introducer_furl from allmydata.scripts.types_ import SubCommands from allmydata.client import read_config class InviteOptions(usage.Options): synopsis = "[options] " description = "Create a client-only Tahoe-LAFS node (no storage server)." optParameters = [ ("shares-needed", None, None, "How many shares are needed to reconstruct files from this node"), ("shares-happy", None, None, "Distinct storage servers new node will upload shares to"), ("shares-total", None, None, "Total number of shares new node will upload"), ] def parseArgs(self, *args): if len(args) != 1: raise usage.UsageError( "Provide a single argument: the new node's nickname" ) self['nick'] = args[0].strip() @defer.inlineCallbacks def _send_config_via_wormhole(options, config): out = options.stdout err = options.stderr relay_url = options.parent['wormhole-server'] print("Connecting to '{}'...".format(relay_url), file=out) wh = options.parent.wormhole.create( appid=options.parent['wormhole-invite-appid'], relay_url=relay_url, reactor=reactor, ) yield wh.get_welcome() print("Connected to wormhole server", file=out) # must call allocate_code before get_code will ever succeed wh.allocate_code() code = yield wh.get_code() print("Invite Code for client: {}".format(code), file=out) wh.send_message(json.dumps_bytes({ u"abilities": { u"server-v1": {}, } })) client_intro = yield wh.get_message() print(" received client introduction", file=out) client_intro = json.loads(client_intro) if not u'abilities' in client_intro: print("No 'abilities' from client", file=err) defer.returnValue(1) if not u'client-v1' in client_intro[u'abilities']: print("No 'client-v1' in abilities from client", file=err) defer.returnValue(1) print(" transmitting configuration", file=out) wh.send_message(json.dumps_bytes(config)) yield wh.close() @defer.inlineCallbacks def invite(options): if options.parent['node-directory']: basedir = argv_to_abspath(options.parent['node-directory']) else: basedir = get_default_nodedir() config = read_config(basedir, u"") out = options.stdout err = options.stderr try: introducer_furl = get_introducer_furl(basedir, config) except Exception as e: print("Can't find introducer FURL for node '{}': {}".format(basedir, str(e)), file=err) raise SystemExit(1) nick = options['nick'] remote_config = { "shares-needed": options["shares-needed"] or config.get_config('client', 'shares.needed'), "shares-total": options["shares-total"] or config.get_config('client', 'shares.total'), "shares-happy": options["shares-happy"] or config.get_config('client', 'shares.happy'), "nickname": nick, "introducer": introducer_furl, } yield _send_config_via_wormhole(options, remote_config) print("Completed successfully", file=out) subCommands : SubCommands = [ ("invite", None, InviteOptions, "Invite a new node to this grid"), ] dispatch = { "invite": invite, } tahoe_lafs-1.20.0/src/allmydata/scripts/tahoe_ls.py0000644000000000000000000001451313615410400017221 0ustar00""" Ported to Python 3. """ from six import ensure_text import time from urllib.parse import quote as url_quote import json from allmydata.scripts.common import get_alias, DEFAULT_ALIAS, escape_path, \ UnknownAliasError from allmydata.scripts.common_http import do_http, format_http_error from allmydata.util.encodingutil import unicode_to_output, quote_output, is_printable_ascii, to_bytes def ls(options): nodeurl = options['node-url'] aliases = options.aliases where = options.where stdout = options.stdout stderr = options.stderr if not nodeurl.endswith("/"): nodeurl += "/" if where.endswith("/"): where = where[:-1] try: rootcap, path = get_alias(aliases, where, DEFAULT_ALIAS) except UnknownAliasError as e: e.display(stderr) return 1 path = str(path, "utf-8") url = nodeurl + "uri/%s" % url_quote(rootcap) if path: # move where.endswith check here? url += "/" + escape_path(path) assert not url.endswith("/") url += "?t=json" resp = do_http("GET", url) if resp.status == 404: print("No such file or directory", file=stderr) return 2 if resp.status != 200: print(format_http_error("Error during GET", resp), file=stderr) if resp.status == 0: return 3 else: return resp.status data = resp.read() if options['json']: # The webapi server should always output printable ASCII. if is_printable_ascii(data): data = str(data, "ascii") print(data, file=stdout) return 0 else: print("The JSON response contained unprintable characters:", file=stderr) print(quote_output(data, quotemarks=False), file=stderr) return 1 try: parsed = json.loads(data) except Exception as e: print("error: %s" % quote_output(e.args[0], quotemarks=False), file=stderr) print("Could not parse JSON response:", file=stderr) print(quote_output(data, quotemarks=False), file=stderr) return 1 nodetype, d = parsed children = {} if nodetype == "dirnode": children = d['children'] else: # paths returned from get_alias are always valid UTF-8 childname = path.split("/")[-1] children = {childname: (nodetype, d)} if "metadata" not in d: d["metadata"] = {} childnames = sorted(children.keys()) now = time.time() # we build up a series of rows, then we loop through them to compute a # maxwidth so we can format them tightly. Size, filename, and URI are the # variable-width ones. rows = [] has_unknowns = False for name in childnames: child = children[name] name = str(name) childtype = child[0] # See webapi.txt for a discussion of the meanings of unix local # filesystem mtime and ctime, Tahoe mtime and ctime, and Tahoe # linkmotime and linkcrtime. ctime = child[1].get("metadata", {}).get('tahoe', {}).get("linkcrtime") if not ctime: ctime = child[1]["metadata"].get("ctime") mtime = child[1].get("metadata", {}).get('tahoe', {}).get("linkmotime") if not mtime: mtime = child[1]["metadata"].get("mtime") rw_uri = to_bytes(child[1].get("rw_uri")) ro_uri = to_bytes(child[1].get("ro_uri")) if ctime: # match for formatting that GNU 'ls' does if (now - ctime) > 6*30*24*60*60: # old files fmt = "%b %d %Y" else: fmt = "%b %d %H:%M" ctime_s = time.strftime(fmt, time.localtime(ctime)) else: ctime_s = "-" if childtype == "dirnode": t0 = "d" size = "-" classify = "/" elif childtype == "filenode": t0 = "-" size = str(child[1].get("size", "?")) classify = "" if rw_uri: classify = "*" else: has_unknowns = True t0 = "?" size = "?" classify = "?" t1 = "-" if ro_uri: t1 = "r" t2 = "-" if rw_uri: t2 = "w" t3 = "-" if childtype == "dirnode": t3 = "x" uri = rw_uri or ro_uri line = [] if options["long"]: line.append(t0+t1+t2+t3) line.append(size) line.append(ctime_s) if not options["classify"]: classify = "" line.append(name + classify) if options["uri"]: line.append(ensure_text(uri)) if options["readonly-uri"]: line.append(quote_output(ensure_text(ro_uri) or "-", quotemarks=False)) rows.append(line) max_widths = [] left_justifys = [] for row in rows: for i,cell in enumerate(row): while len(max_widths) <= i: max_widths.append(0) while len(left_justifys) <= i: left_justifys.append(False) max_widths[i] = max(max_widths[i], len(cell)) if ensure_text(cell).startswith("URI"): left_justifys[i] = True if len(left_justifys) == 1: left_justifys[0] = True fmt_pieces = [] for i in range(len(max_widths)): piece = "%" if left_justifys[i]: piece += "-" piece += str(max_widths[i]) piece += "s" fmt_pieces.append(piece) fmt = " ".join(fmt_pieces) rc = 0 for row in rows: row = (fmt % tuple(row)).rstrip() encoding_error = False try: row = unicode_to_output(row) except UnicodeEncodeError: encoding_error = True row = quote_output(row) if encoding_error: print(row, file=stderr) rc = 1 else: print(row, file=stdout) if rc == 1: print("\nThis listing included files whose names could not be converted to the terminal" \ "\noutput encoding. Their names are shown using backslash escapes and in quotes.", file=stderr) if has_unknowns: print("\nThis listing included unknown objects. Using a webapi server that supports" \ "\na later version of Tahoe may help.", file=stderr) return rc tahoe_lafs-1.20.0/src/allmydata/scripts/tahoe_manifest.py0000644000000000000000000001345113615410400020411 0ustar00""" Ported to Python 3. """ from urllib.parse import quote as url_quote import json from twisted.protocols.basic import LineOnlyReceiver from allmydata.util.abbreviate import abbreviate_space_both from allmydata.scripts.slow_operation import SlowOperationRunner from allmydata.scripts.common import get_alias, DEFAULT_ALIAS, escape_path, \ UnknownAliasError from allmydata.scripts.common_http import do_http, format_http_error from allmydata.util.encodingutil import quote_output, quote_path class FakeTransport(object): disconnecting = False class ManifestStreamer(LineOnlyReceiver, object): delimiter = b"\n" def __init__(self): self.transport = FakeTransport() def run(self, options): self.rc = 0 stdout = options.stdout stderr = options.stderr self.options = options nodeurl = options['node-url'] if not nodeurl.endswith("/"): nodeurl += "/" self.nodeurl = nodeurl where = options.where try: rootcap, path = get_alias(options.aliases, where, DEFAULT_ALIAS) except UnknownAliasError as e: e.display(stderr) return 1 path = str(path, "utf-8") if path == '/': path = '' url = nodeurl + "uri/%s" % url_quote(rootcap) if path: url += "/" + escape_path(path) # todo: should it end with a slash? url += "?t=stream-manifest" resp = do_http("POST", url) if resp.status not in (200, 302): print(format_http_error("ERROR", resp), file=stderr) return 1 #print("RESP", dir(resp)) # use Twisted to split this into lines self.in_error = False # Writing bytes, so need binary stdout. stdout = stdout.buffer while True: chunk = resp.read(100) if not chunk: break if self.options["raw"]: stdout.write(chunk) else: self.dataReceived(chunk) return self.rc def lineReceived(self, line): stdout = self.options.stdout stderr = self.options.stderr if self.in_error: print(quote_output(line, quotemarks=False), file=stderr) return if line.startswith(b"ERROR:"): self.in_error = True self.rc = 1 print(quote_output(line, quotemarks=False), file=stderr) return try: d = json.loads(line.decode('utf-8')) except Exception as e: print("ERROR could not decode/parse %s\nERROR %r" % (quote_output(line), e), file=stderr) else: if d["type"] in ("file", "directory"): if self.options["storage-index"]: si = d.get("storage-index", None) if si: print(quote_output(si, quotemarks=False), file=stdout) elif self.options["verify-cap"]: vc = d.get("verifycap", None) if vc: print(quote_output(vc, quotemarks=False), file=stdout) elif self.options["repair-cap"]: vc = d.get("repaircap", None) if vc: print(quote_output(vc, quotemarks=False), file=stdout) else: print("%s %s" % ( quote_output(d["cap"], quotemarks=False), quote_path(d["path"], quotemarks=False)), file=stdout) def manifest(options): return ManifestStreamer().run(options) class StatsGrabber(SlowOperationRunner): def make_url(self, base, ophandle): return base + "?t=start-deep-stats&ophandle=" + ophandle def write_results(self, data): stdout = self.options.stdout keys = ("count-immutable-files", "count-mutable-files", "count-literal-files", "count-files", "count-directories", "size-immutable-files", "size-mutable-files", "size-literal-files", "size-directories", "largest-directory", "largest-immutable-file", ) width = max([len(k) for k in keys]) print("Counts and Total Sizes:", file=stdout) for k in keys: fmt = "%" + str(width) + "s: %d" if k in data: value = data[k] if not k.startswith("count-") and value > 1000: absize = abbreviate_space_both(value) print(fmt % (k, data[k]), " ", absize, file=stdout) else: print(fmt % (k, data[k]), file=stdout) if data["size-files-histogram"]: print("Size Histogram:", file=stdout) prevmax = None maxlen = max([len(str(maxsize)) for (minsize, maxsize, count) in data["size-files-histogram"]]) maxcountlen = max([len(str(count)) for (minsize, maxsize, count) in data["size-files-histogram"]]) minfmt = "%" + str(maxlen) + "d" maxfmt = "%-" + str(maxlen) + "d" countfmt = "%-" + str(maxcountlen) + "d" linefmt = minfmt + "-" + maxfmt + " : " + countfmt + " %s" for (minsize, maxsize, count) in data["size-files-histogram"]: if prevmax is not None and minsize != prevmax+1: print(" "*(maxlen-1) + "...", file=stdout) prevmax = maxsize print(linefmt % (minsize, maxsize, count, abbreviate_space_both(maxsize)), file=stdout) def stats(options): return StatsGrabber().run(options) tahoe_lafs-1.20.0/src/allmydata/scripts/tahoe_mkdir.py0000644000000000000000000000332213615410400017705 0ustar00""" Ported to Python 3. """ from urllib.parse import quote as url_quote from allmydata.scripts.common_http import do_http, check_http_error from allmydata.scripts.common import get_alias, DEFAULT_ALIAS, UnknownAliasError from allmydata.util.encodingutil import quote_output def mkdir(options): nodeurl = options['node-url'] aliases = options.aliases where = options.where stdout = options.stdout stderr = options.stderr if not nodeurl.endswith("/"): nodeurl += "/" if where: try: rootcap, path = get_alias(aliases, where, DEFAULT_ALIAS) except UnknownAliasError as e: e.display(stderr) return 1 if not where or not path: # create a new unlinked directory url = nodeurl + "uri?t=mkdir" if options["format"]: url += "&format=%s" % url_quote(options['format']) resp = do_http("POST", url) rc = check_http_error(resp, stderr) if rc: return rc new_uri = resp.read().strip() # emit its write-cap print(quote_output(new_uri, quotemarks=False), file=stdout) return 0 # create a new directory at the given location path = str(path, "utf-8") if path.endswith("/"): path = path[:-1] # path must be "/".join([s.encode("utf-8") for s in segments]) url = nodeurl + "uri/%s/%s?t=mkdir" % (url_quote(rootcap), url_quote(path)) if options['format']: url += "&format=%s" % url_quote(options['format']) resp = do_http("POST", url) check_http_error(resp, stderr) new_uri = resp.read().strip() print(quote_output(new_uri, quotemarks=False), file=stdout) return 0 tahoe_lafs-1.20.0/src/allmydata/scripts/tahoe_mv.py0000644000000000000000000000477213615410400017233 0ustar00""" Ported to Python 3. """ import re from urllib.parse import quote as url_quote import json from allmydata.scripts.common import get_alias, DEFAULT_ALIAS, escape_path, \ UnknownAliasError from allmydata.scripts.common_http import do_http, format_http_error from allmydata.util.encodingutil import to_bytes # this script is used for both 'mv' and 'ln' def mv(options, mode="move"): nodeurl = options['node-url'] aliases = options.aliases from_file = options.from_file to_file = options.to_file stdout = options.stdout stderr = options.stderr if nodeurl[-1] != "/": nodeurl += "/" try: rootcap, from_path = get_alias(aliases, from_file, DEFAULT_ALIAS) except UnknownAliasError as e: e.display(stderr) return 1 from_path = str(from_path, "utf-8") from_url = nodeurl + "uri/%s" % url_quote(rootcap) if from_path: from_url += "/" + escape_path(from_path) # figure out the source cap resp = do_http("GET", from_url + "?t=json") if not re.search(r'^2\d\d$', str(resp.status)): print(format_http_error("Error", resp), file=stderr) return 1 data = resp.read() nodetype, attrs = json.loads(data) cap = to_bytes(attrs.get("rw_uri") or attrs["ro_uri"]) # now get the target try: rootcap, path = get_alias(aliases, to_file, DEFAULT_ALIAS) except UnknownAliasError as e: e.display(stderr) return 1 to_url = nodeurl + "uri/%s" % url_quote(rootcap) path = str(path, "utf-8") if path: to_url += "/" + escape_path(path) if to_url.endswith("/"): # "mv foo.txt bar/" == "mv foo.txt bar/foo.txt" to_url += escape_path(from_path[from_path.rfind("/")+1:]) to_url += "?t=uri&replace=only-files" resp = do_http("PUT", to_url, cap) status = resp.status if not re.search(r'^2\d\d$', str(status)): if status == 409: print("Error: You can't overwrite a directory with a file", file=stderr) else: print(format_http_error("Error", resp), file=stderr) if mode == "move": print("NOT removing the original", file=stderr) return 1 if mode == "move": # now remove the original resp = do_http("DELETE", from_url) if not re.search(r'^2\d\d$', str(resp.status)): print(format_http_error("Error deleting original after move", resp), file=stderr) return 2 print("OK", file=stdout) return 0 tahoe_lafs-1.20.0/src/allmydata/scripts/tahoe_put.py0000644000000000000000000001045613615410400017415 0ustar00""" Implement the ``tahoe put`` command. """ from __future__ import annotations from io import BytesIO from urllib.parse import quote as url_quote from base64 import urlsafe_b64encode from cryptography.hazmat.primitives.serialization import load_pem_private_key from twisted.python.filepath import FilePath from allmydata.crypto.rsa import PrivateKey, der_string_from_signing_key from allmydata.scripts.common_http import do_http, format_http_success, format_http_error from allmydata.scripts.common import get_alias, DEFAULT_ALIAS, escape_path, \ UnknownAliasError from allmydata.util.encodingutil import quote_output def load_private_key(path: str) -> str: """ Load a private key from a file and return it in a format appropriate to include in the HTTP request. """ privkey = load_pem_private_key(FilePath(path).getContent(), password=None) assert isinstance(privkey, PrivateKey) derbytes = der_string_from_signing_key(privkey) return urlsafe_b64encode(derbytes).decode("ascii") def put(options): """ @param verbosity: 0, 1, or 2, meaning quiet, verbose, or very verbose @return: a Deferred which eventually fires with the exit code """ nodeurl = options['node-url'] aliases = options.aliases from_file = options.from_file to_file = options.to_file mutable = options['mutable'] if options["private-key-path"] is None: private_key = None else: private_key = load_private_key(options["private-key-path"]) format = options['format'] if options['quiet']: verbosity = 0 else: verbosity = 2 stdin = options.stdin stdout = options.stdout stderr = options.stderr if nodeurl[-1] != "/": nodeurl += "/" if to_file: # several possibilities for the TO_FILE argument. # : unlinked upload # foo : TAHOE_ALIAS/foo # subdir/foo : TAHOE_ALIAS/subdir/foo # /oops/subdir/foo : DISALLOWED # ALIAS:foo : aliases[ALIAS]/foo # ALIAS:subdir/foo : aliases[ALIAS]/subdir/foo # ALIAS:/oops/subdir/foo : DISALLOWED # DIRCAP:./foo : DIRCAP/foo # DIRCAP:./subdir/foo : DIRCAP/subdir/foo # MUTABLE-FILE-WRITECAP : filecap # FIXME: don't hardcode cap format. if to_file.startswith("URI:MDMF:") or to_file.startswith("URI:SSK:"): url = nodeurl + "uri/%s" % url_quote(to_file) else: try: rootcap, path = get_alias(aliases, to_file, DEFAULT_ALIAS) except UnknownAliasError as e: e.display(stderr) return 1 path = str(path, "utf-8") if path.startswith("/"): suggestion = to_file.replace(u"/", u"", 1) print("Error: The remote filename must not start with a slash", file=stderr) print("Please try again, perhaps with %s" % quote_output(suggestion), file=stderr) return 1 url = nodeurl + "uri/%s/" % url_quote(rootcap) if path: url += escape_path(path) else: # unlinked upload url = nodeurl + "uri" queryargs = [] if mutable: queryargs.append("mutable=true") if private_key is not None: queryargs.append(f"private-key={private_key}") else: if private_key is not None: raise Exception("Can only supply a private key for mutables.") if format: queryargs.append("format=%s" % format) if queryargs: url += "?" + "&".join(queryargs) if from_file: infileobj = open(from_file, "rb") else: # do_http() can't use stdin directly: for one thing, we need a # Content-Length field. So we currently must copy it. if verbosity > 0: print("waiting for file data on stdin..", file=stderr) # We're uploading arbitrary files, so this had better be bytes: stdinb = stdin.buffer data = stdinb.read() infileobj = BytesIO(data) resp = do_http("PUT", url, infileobj) if resp.status in (200, 201,): print(format_http_success(resp), file=stderr) print(quote_output(resp.read(), quotemarks=False), file=stdout) return 0 print(format_http_error("Error", resp), file=stderr) return 1 tahoe_lafs-1.20.0/src/allmydata/scripts/tahoe_run.py0000644000000000000000000002672513615410400017417 0ustar00""" Ported to Python 3. """ __all__ = [ "RunOptions", "run", ] import os, sys from allmydata.scripts.common import BasedirOptions from twisted.scripts import twistd from twisted.python import usage from twisted.python.filepath import FilePath from twisted.python.reflect import namedAny from twisted.python.failure import Failure from twisted.internet.defer import maybeDeferred, Deferred from twisted.internet.protocol import Protocol from twisted.internet.stdio import StandardIO from twisted.internet.error import ReactorNotRunning from twisted.application.service import Service from allmydata.scripts.default_nodedir import _default_nodedir from allmydata.util.encodingutil import listdir_unicode, quote_local_unicode_path from allmydata.util.configutil import UnknownConfigError from allmydata.util.deferredutil import HookMixin from allmydata.util.pid import ( parse_pidfile, check_pid_process, cleanup_pidfile, ProcessInTheWay, InvalidPidFile, ) from allmydata.storage.crawler import ( MigratePickleFileError, ) from allmydata.storage_client import ( MissingPlugin, ) from allmydata.node import ( PortAssignmentRequired, PrivacyError, ) def get_pidfile(basedir): """ Returns the path to the PID file. :param basedir: the node's base directory :returns: the path to the PID file """ return os.path.join(basedir, u"running.process") def get_pid_from_pidfile(pidfile): """ Tries to read and return the PID stored in the node's PID file :param pidfile: try to read this PID file :returns: A numeric PID on success, ``None`` if PID file absent or inaccessible, ``-1`` if PID file invalid. """ try: pid, _ = parse_pidfile(pidfile) except EnvironmentError: return None except InvalidPidFile: return -1 return pid def identify_node_type(basedir): """ :return unicode: None or one of: 'client' or 'introducer'. """ tac = u'' try: for fn in listdir_unicode(basedir): if fn.endswith(u".tac"): tac = fn break except OSError: return None for t in (u"client", u"introducer"): if t in tac: return t return None class RunOptions(BasedirOptions): subcommand_name = "run" optParameters = [ ("basedir", "C", None, "Specify which Tahoe base directory should be used." " This has the same effect as the global --node-directory option." " [default: %s]" % quote_local_unicode_path(_default_nodedir)), ] optFlags = [ ("allow-stdin-close", None, 'Do not exit when stdin closes ("tahoe run" otherwise will exit).'), ] def parseArgs(self, basedir=None, *twistd_args): # This can't handle e.g. 'tahoe run --reactor=foo', since # '--reactor=foo' looks like an option to the tahoe subcommand, not to # twistd. So you can either use 'tahoe run' or 'tahoe run NODEDIR # --TWISTD-OPTIONS'. Note that 'tahoe --node-directory=NODEDIR run # --TWISTD-OPTIONS' also isn't allowed, unfortunately. BasedirOptions.parseArgs(self, basedir) self.twistd_args = twistd_args def getSynopsis(self): return ("Usage: %s [global-options] %s [options]" " [NODEDIR [twistd-options]]" % (self.command_name, self.subcommand_name)) def getUsage(self, width=None): t = BasedirOptions.getUsage(self, width) + "\n" twistd_options = str(MyTwistdConfig()).partition("\n")[2].partition("\n\n")[0] t += twistd_options.replace("Options:", "twistd-options:", 1) t += """ Note that if any twistd-options are used, NODEDIR must be specified explicitly (not by default or using -C/--basedir or -d/--node-directory), and followed by the twistd-options. """ return t class MyTwistdConfig(twistd.ServerOptions): subCommands = [("DaemonizeTahoeNode", None, usage.Options, "node")] stderr = sys.stderr class DaemonizeTheRealService(Service, HookMixin): """ this HookMixin should really be a helper; our hooks: - 'running': triggered when startup has completed; it triggers with None of successful or a Failure otherwise. """ stderr = sys.stderr def __init__(self, nodetype, basedir, options): super(DaemonizeTheRealService, self).__init__() self.nodetype = nodetype self.basedir = basedir # setup for HookMixin self._hooks = { "running": None, } self.stderr = options.parent.stderr self._close_on_stdin_close = False if options["allow-stdin-close"] else True def startService(self): from twisted.internet import reactor def start(): node_to_instance = { u"client": lambda: maybeDeferred(namedAny("allmydata.client.create_client"), self.basedir), u"introducer": lambda: maybeDeferred(namedAny("allmydata.introducer.server.create_introducer"), self.basedir), } try: service_factory = node_to_instance[self.nodetype] except KeyError: raise ValueError("unknown nodetype %s" % self.nodetype) def handle_config_error(reason): if reason.check(UnknownConfigError): self.stderr.write("\nConfiguration error:\n{}\n\n".format(reason.value)) elif reason.check(PortAssignmentRequired): self.stderr.write("\ntub.port cannot be 0: you must choose.\n\n") elif reason.check(PrivacyError): self.stderr.write("\n{}\n\n".format(reason.value)) elif reason.check(MigratePickleFileError): self.stderr.write( "Error\nAt least one 'pickle' format file exists.\n" "The file is {}\n" "You must either delete the pickle-format files" " or migrate them using the command:\n" " tahoe admin migrate-crawler --basedir {}\n\n" .format( reason.value.args[0].path, self.basedir, ) ) elif reason.check(MissingPlugin): self.stderr.write( "Missing Plugin\n" "The configuration requests a plugin:\n" "\n {}\n\n" "...which cannot be found.\n" "This typically means that some software hasn't been installed or the plugin couldn't be instantiated.\n\n" .format( reason.value.plugin_name, ) ) else: self.stderr.write("\nUnknown error, here's the traceback:\n") reason.printTraceback(self.stderr) reactor.stop() d = service_factory() def created(srv): if self.parent is not None: srv.setServiceParent(self.parent) # exiting on stdin-closed facilitates cleanup when run # as a subprocess if self._close_on_stdin_close: on_stdin_close(reactor, reactor.stop) d.addCallback(created) d.addErrback(handle_config_error) d.addBoth(self._call_hook, 'running') return d reactor.callWhenRunning(start) class DaemonizeTahoeNodePlugin(object): tapname = "tahoenode" def __init__(self, nodetype, basedir, allow_stdin_close): self.nodetype = nodetype self.basedir = basedir self.allow_stdin_close = allow_stdin_close def makeService(self, so): so["allow-stdin-close"] = self.allow_stdin_close return DaemonizeTheRealService(self.nodetype, self.basedir, so) def on_stdin_close(reactor, fn): """ Arrange for the function `fn` to run when our stdin closes """ when_closed_d = Deferred() class WhenClosed(Protocol): """ Notify a Deferred when our connection is lost .. as this is passed to twisted's StandardIO class, it is used to detect our parent going away. """ def connectionLost(self, reason): when_closed_d.callback(None) def on_close(arg): try: fn() except ReactorNotRunning: pass except Exception: # for our "exit" use-case failures will _mostly_ just be # ReactorNotRunning (because we're already shutting down # when our stdin closes) but no matter what "bad thing" # happens we just want to ignore it .. although other # errors might be interesting so we'll log those print(Failure()) return arg when_closed_d.addBoth(on_close) # we don't need to do anything with this instance because it gets # hooked into the reactor and thus remembered .. but we return it # for Windows testing purposes. return StandardIO( proto=WhenClosed(), reactor=reactor, ) def run(reactor, config, runApp=twistd.runApp): """ Runs a Tahoe-LAFS node in the foreground. Sets up the IService instance corresponding to the type of node that's starting and uses Twisted's twistd runner to disconnect our process from the terminal. """ out = config.stdout err = config.stderr basedir = config['basedir'] quoted_basedir = quote_local_unicode_path(basedir) print("'tahoe {}' in {}".format(config.subcommand_name, quoted_basedir), file=out) if not os.path.isdir(basedir): print("%s does not look like a directory at all" % quoted_basedir, file=err) return 1 nodetype = identify_node_type(basedir) if not nodetype: print("%s is not a recognizable node directory" % quoted_basedir, file=err) return 1 twistd_args = [ # ensure twistd machinery does not daemonize. "--nodaemon", "--rundir", basedir, ] if sys.platform != "win32": # turn off Twisted's pid-file to use our own -- but not on # windows, because twistd doesn't know about pidfiles there twistd_args.extend(["--pidfile", None]) twistd_args.extend(config.twistd_args) twistd_args.append("DaemonizeTahoeNode") # point at our DaemonizeTahoeNodePlugin twistd_config = MyTwistdConfig() twistd_config.stdout = out twistd_config.stderr = err try: twistd_config.parseOptions(twistd_args) except usage.error as ue: # these arguments were unsuitable for 'twistd' print(config, file=err) print("tahoe %s: usage error from twistd: %s\n" % (config.subcommand_name, ue), file=err) return 1 twistd_config.loadedPlugins = { "DaemonizeTahoeNode": DaemonizeTahoeNodePlugin(nodetype, basedir, config["allow-stdin-close"]) } # our own pid-style file contains PID and process creation time pidfile = FilePath(get_pidfile(config['basedir'])) try: check_pid_process(pidfile) except (ProcessInTheWay, InvalidPidFile) as e: print("ERROR: {}".format(e), file=err) return 1 else: reactor.addSystemEventTrigger( "after", "shutdown", lambda: cleanup_pidfile(pidfile) ) # We always pass --nodaemon so twistd.runApp does not daemonize. print("running node in %s" % (quoted_basedir,), file=out) runApp(twistd_config) return 0 tahoe_lafs-1.20.0/src/allmydata/scripts/tahoe_status.py0000644000000000000000000002570613615410400020134 0ustar00""" Ported to Python 3. """ import os from sys import stdout as _sys_stdout from urllib.parse import urlencode import json from .common import BaseOptions from allmydata.scripts.common import get_default_nodedir from allmydata.scripts.common_http import BadResponse from allmydata.util.abbreviate import abbreviate_space, abbreviate_time from allmydata.util.encodingutil import argv_to_abspath _print = print def print(*args, **kwargs): """ Builtin ``print``-alike that will even write unicode which cannot be encoded using the specified output file's encoding. This differs from the builtin print in that it will use the "replace" encoding error handler and then write the result whereas builtin print uses the "strict" encoding error handler. """ out = kwargs.pop("file", None) if out is None: out = _sys_stdout encoding = out.encoding or "ascii" def ensafe(o): if isinstance(o, str): return o.encode(encoding, errors="replace").decode(encoding) return o return _print( *(ensafe(a) for a in args), file=out, **kwargs ) def _get_request_parameters_for_fragment(options, fragment, method, post_args): """ Get parameters for ``do_http`` for requesting the given fragment. :return dict: A dictionary suitable for use as keyword arguments to ``do_http``. """ nodeurl = options['node-url'] if nodeurl.endswith('/'): nodeurl = nodeurl[:-1] url = u'%s/%s' % (nodeurl, fragment) if method == 'POST': if post_args is None: raise ValueError("Must pass post_args= for POST method") body = urlencode(post_args) else: body = '' if post_args is not None: raise ValueError("post_args= only valid for POST method") return dict( method=method, url=url, body=body.encode("utf-8"), ) def _handle_response_for_fragment(resp, nodeurl): """ Inspect an HTTP response and return the parsed payload, if possible. """ if isinstance(resp, BadResponse): # specifically NOT using format_http_error() here because the # URL is pretty sensitive (we're doing /uri/). raise RuntimeError( "Failed to get json from '%s': %s" % (nodeurl, resp.error) ) data = resp.read() parsed = json.loads(data) if parsed is None: raise RuntimeError("No data from '%s'" % (nodeurl,)) return parsed def pretty_progress(percent, size=10, output_ascii=False): """ Displays a unicode or ascii based progress bar of a certain length. Should we just depend on a library instead? (Originally from txtorcon) """ curr = int(percent / 100.0 * size) part = (percent / (100.0 / size)) - curr if output_ascii: part = int(part * 4) part = '.oO%'[part] block_chr = '#' else: block_chr = u'\u2588' # there are 8 unicode characters for vertical-bars/horiz-bars part = int(part * 8) # unicode 0x2581 -> 2589 are vertical bar chunks, like rainbarf uses # and following are narrow -> wider bars part = chr(0x258f - part) # for smooth bar # part = chr(0x2581 + part) # for neater-looking thing # hack for 100+ full so we don't print extra really-narrow/high bar if percent >= 100.0: part = '' curr = int(curr) return '%s%s%s' % ((block_chr * curr), part, (' ' * (size - curr - 1))) OP_MAP = { 'upload': ' put ', 'download': ' get ', 'retrieve': 'retr ', 'publish': ' pub ', 'mapupdate': 'mapup', 'unknown': ' ??? ', } def _render_active_upload(op): total = ( op['progress-hash'] + op['progress-ciphertext'] + op['progress-encode-push'] ) / 3.0 * 100.0 return { u"op_type": u" put ", u"total": "{:3.0f}".format(total), u"progress_bar": u"{}".format(pretty_progress(total, size=15)), u"storage-index-string": op["storage-index-string"], u"status": op["status"], } def _render_active_download(op): return { u"op_type": u" get ", u"total": op["progress"], u"progress_bar": u"{}".format(pretty_progress(op['progress'] * 100.0, size=15)), u"storage-index-string": op["storage-index-string"], u"status": op["status"], } def _render_active_generic(op): return { u"op_type": OP_MAP[op["type"]], u"progress_bar": u"", u"total": u"???", u"storage-index-string": op["storage-index-string"], u"status": op["status"], } active_renderers = { "upload": _render_active_upload, "download": _render_active_download, "publish": _render_active_generic, "retrieve": _render_active_generic, "mapupdate": _render_active_generic, "unknown": _render_active_generic, } def render_active(stdout, status_data): active = status_data.get('active', None) if not active: print(u"No active operations.", file=stdout) return header = u"\u2553 {:<5} \u2565 {:<26} \u2565 {:<22} \u2565 {}".format( "type", "storage index", "progress", "status message", ) header_bar = u"\u255f\u2500{}\u2500\u256b\u2500{}\u2500\u256b\u2500{}\u2500\u256b\u2500{}".format( u'\u2500' * 5, u'\u2500' * 26, u'\u2500' * 22, u'\u2500' * 20, ) line_template = ( u"\u2551 {op_type} " u"\u2551 {storage-index-string} " u"\u2551 {progress_bar:15} " u"({total}%) " u"\u2551 {status}" ) footer_bar = u"\u2559\u2500{}\u2500\u2568\u2500{}\u2500\u2568\u2500{}\u2500\u2568\u2500{}".format( u'\u2500' * 5, u'\u2500' * 26, u'\u2500' * 22, u'\u2500' * 20, ) print(u"Active operations:", file=stdout) print(header, file=stdout) print(header_bar, file=stdout) for op in active: print(line_template.format( **active_renderers[op["type"]](op) )) print(footer_bar, file=stdout) def _render_recent_generic(op): return { u"op_type": OP_MAP[op["type"]], u"storage-index-string": op["storage-index-string"], u"nice_size": abbreviate_space(op["total-size"]), u"status": op["status"], } def _render_recent_mapupdate(op): return { u"op_type": u"mapup", u"storage-index-string": op["storage-index-string"], u"nice_size": op["mode"], u"status": op["status"], } recent_renderers = { "upload": _render_recent_generic, "download": _render_recent_generic, "publish": _render_recent_generic, "retrieve": _render_recent_generic, "mapupdate": _render_recent_mapupdate, "unknown": _render_recent_generic, } def render_recent(verbose, stdout, status_data): recent = status_data.get('recent', None) if not recent: print(u"No recent operations.", file=stdout) header = u"\u2553 {:<5} \u2565 {:<26} \u2565 {:<10} \u2565 {}".format( "type", "storage index", "size", "status message", ) line_template = ( u"\u2551 {op_type} " u"\u2551 {storage-index-string} " u"\u2551 {nice_size:<10} " u"\u2551 {status}" ) footer = u"\u2559\u2500{}\u2500\u2568\u2500{}\u2500\u2568\u2500{}\u2500\u2568\u2500{}".format( u'\u2500' * 5, u'\u2500' * 26, u'\u2500' * 10, u'\u2500' * 20, ) non_verbose_ops = ('upload', 'download') recent = [op for op in status_data['recent'] if op['type'] in non_verbose_ops] print(u"\nRecent operations:", file=stdout) if len(recent) or verbose: print(header, file=stdout) ops_to_show = status_data['recent'] if verbose else recent for op in ops_to_show: print(line_template.format( **recent_renderers[op["type"]](op) )) if len(recent) or verbose: print(footer, file=stdout) skipped = len(status_data['recent']) - len(ops_to_show) if not verbose and skipped: print(u" Skipped {} non-upload/download operations; use --verbose to see".format(skipped), file=stdout) def do_status(options, do_http=None): if do_http is None: from allmydata.scripts.common_http import do_http nodedir = options["node-directory"] with open(os.path.join(nodedir, u'private', u'api_auth_token'), 'r') as f: token = f.read().strip() with open(os.path.join(nodedir, u'node.url'), 'r') as f: options['node-url'] = f.read().strip() # do *all* our data-retrievals first in case there's an error try: status_data = _handle_response_for_fragment( do_http(**_get_request_parameters_for_fragment( options, 'status?t=json', method='POST', post_args=dict( t='json', token=token, ), )), options['node-url'], ) statistics_data = _handle_response_for_fragment( do_http(**_get_request_parameters_for_fragment( options, 'statistics?t=json', method='POST', post_args=dict( t='json', token=token, ), )), options['node-url'], ) except Exception as e: print(u"failed to retrieve data: %s" % str(e), file=options.stderr) return 2 downloaded_bytes = statistics_data['counters'].get('downloader.bytes_downloaded', 0) downloaded_files = statistics_data['counters'].get('downloader.files_downloaded', 0) uploaded_bytes = statistics_data['counters'].get('uploader.bytes_uploaded', 0) uploaded_files = statistics_data['counters'].get('uploader.files_uploaded', 0) print(u"Statistics (for last {}):".format(abbreviate_time(statistics_data['stats']['node.uptime'])), file=options.stdout) print(u" uploaded {} in {} files".format(abbreviate_space(uploaded_bytes), uploaded_files), file=options.stdout) print(u" downloaded {} in {} files".format(abbreviate_space(downloaded_bytes), downloaded_files), file=options.stdout) print(u"", file=options.stdout) render_active(options.stdout, status_data) render_recent(options['verbose'], options.stdout, status_data) # open question: should we return non-zero if there were no # operations at all to display? return 0 class TahoeStatusCommand(BaseOptions): optFlags = [ ["verbose", "v", "Include publish, retrieve, mapupdate in ops"], ] def postOptions(self): if self.parent['node-directory']: self['node-directory'] = argv_to_abspath(self.parent['node-directory']) else: self['node-directory'] = get_default_nodedir() def getSynopsis(self): return "Usage: tahoe [global-options] status [options]" def getUsage(self, width=None): t = BaseOptions.getUsage(self, width) t += "Various status information" return t subCommands = [ ["status", None, TahoeStatusCommand, "Status."], ] tahoe_lafs-1.20.0/src/allmydata/scripts/tahoe_unlink.py0000644000000000000000000000227613615410400020106 0ustar00""" Ported to Python 3. """ from urllib.parse import quote as url_quote from allmydata.scripts.common_http import do_http, format_http_success, format_http_error from allmydata.scripts.common import get_alias, DEFAULT_ALIAS, escape_path, \ UnknownAliasError def unlink(options, command="unlink"): """ @return: a Deferred which eventually fires with the exit code """ nodeurl = options['node-url'] aliases = options.aliases where = options.where stdout = options.stdout stderr = options.stderr if nodeurl[-1] != "/": nodeurl += "/" try: rootcap, path = get_alias(aliases, where, DEFAULT_ALIAS) except UnknownAliasError as e: e.display(stderr) return 1 if not path: print(""" 'tahoe %s' can only unlink directory entries, so a path must be given.""" % (command,), file=stderr) return 1 url = nodeurl + "uri/%s" % url_quote(rootcap) url += "/" + escape_path(path) resp = do_http("DELETE", url) if resp.status in (200,): print(format_http_success(resp), file=stdout) return 0 print(format_http_error("ERROR", resp), file=stderr) return 1 tahoe_lafs-1.20.0/src/allmydata/scripts/tahoe_webopen.py0000644000000000000000000000170613615410400020242 0ustar00""" Ported to Python 3. """ from urllib.parse import quote as url_quote from allmydata.scripts.common import get_alias, DEFAULT_ALIAS, escape_path, \ UnknownAliasError def webopen(options, opener=None): nodeurl = options['node-url'] stderr = options.stderr if not nodeurl.endswith("/"): nodeurl += "/" where = options.where if where: try: rootcap, path = get_alias(options.aliases, where, DEFAULT_ALIAS) except UnknownAliasError as e: e.display(stderr) return 1 path = str(path, "utf-8") if path == '/': path = '' url = nodeurl + "uri/%s" % url_quote(rootcap) if path: url += "/" + escape_path(path) else: url = nodeurl if options['info']: url += "?t=info" if not opener: import webbrowser opener = webbrowser.open opener(url) return 0 tahoe_lafs-1.20.0/src/allmydata/scripts/types_.py0000644000000000000000000000073213615410400016724 0ustar00""" Type definitions used by modules in this package. """ from typing import List, Tuple, Type, Sequence, Any from twisted.python.usage import Options # Historically, subcommands were implemented as lists, but due to a # [designed contraint in mypy](https://stackoverflow.com/a/52559625/70170), # a Tuple is required. SubCommand = Tuple[str, None, Type[Options], str] SubCommands = List[SubCommand] Parameters = List[Sequence[Any]] Flags = List[Tuple[str, None, str]] tahoe_lafs-1.20.0/src/allmydata/storage/__init__.py0000644000000000000000000000000013615410400017121 0ustar00tahoe_lafs-1.20.0/src/allmydata/storage/common.py0000644000000000000000000000222513615410400016665 0ustar00""" Ported to Python 3. """ import os.path from allmydata.util import base32 # Backwards compatibility. from allmydata.interfaces import DataTooLargeError # noqa: F401 class UnknownContainerVersionError(Exception): def __init__(self, filename, version): self.filename = filename self.version = version def __str__(self): return "sharefile {!r} had unexpected version {!r}".format( self.filename, self.version, ) class UnknownMutableContainerVersionError(UnknownContainerVersionError): pass class UnknownImmutableContainerVersionError(UnknownContainerVersionError): pass def si_b2a(storageindex): return base32.b2a(storageindex) def si_a2b(ascii_storageindex): return base32.a2b(ascii_storageindex) def si_to_human_readable(storageindex: bytes) -> str: """Create human-readable string of storage index.""" return str(base32.b2a(storageindex), "ascii") def storage_index_to_dir(storageindex): """Convert storage index to directory path. Returns native string. """ sia = si_b2a(storageindex) sia = sia.decode("ascii") return os.path.join(sia[:2], sia) tahoe_lafs-1.20.0/src/allmydata/storage/crawler.py0000644000000000000000000006554613615410400017053 0ustar00""" Crawl the storage server shares. Ported to Python 3. """ import os import time import json import struct from twisted.internet import reactor from twisted.application import service from twisted.python.filepath import FilePath from allmydata.storage.common import si_b2a from allmydata.util import fileutil class TimeSliceExceeded(Exception): pass class MigratePickleFileError(Exception): """ A pickle-format file exists (the FilePath to the file will be the single arg). """ pass def _convert_cycle_data(state): """ :param dict state: cycle-to-date or history-item state :return dict: the state in the JSON form """ def _convert_expiration_mode(value): # original is a 4-tuple, with the last element being a 2-tuple # .. convert both to lists return [ value[0], value[1], value[2], list(value[3]), ] def _convert_lease_age(value): # if we're in cycle-to-date, this is a dict if isinstance(value, dict): return { "{},{}".format(k[0], k[1]): v for k, v in value.items() } # otherwise, it's a history-item and they're 3-tuples return [ list(v) for v in value ] converters = { "configured-expiration-mode": _convert_expiration_mode, "cycle-start-finish-times": list, "lease-age-histogram": _convert_lease_age, "corrupt-shares": lambda value: [ list(x) for x in value ], "leases-per-share-histogram": lambda value: { str(k): v for k, v in value.items() }, } return { k: converters.get(k, lambda z: z)(v) for k, v in state.items() } def _convert_pickle_state_to_json(state): """ :param dict state: the pickled state :return dict: the state in the JSON form """ assert state["version"] == 1, "Only known version is 1" converters = { "cycle-to-date": _convert_cycle_data, } return { k: converters.get(k, lambda x: x)(v) for k, v in state.items() } def _upgrade_pickle_to_json(state_path, convert_pickle): """ :param FilePath state_path: the filepath to ensure is json :param Callable[dict] convert_pickle: function to change pickle-style state into JSON-style state :returns FilePath: the local path where the state is stored If this state is pickle, convert to the JSON format and return the JSON path. """ json_state_path = state_path.siblingExtension(".json") # if there's no file there at all, we're done because there's # nothing to upgrade if not state_path.exists(): return json_state_path # upgrade the pickle data to JSON import pickle with state_path.open("rb") as f: state = pickle.load(f) new_state = convert_pickle(state) _dump_json_to_file(new_state, json_state_path) # we've written the JSON, delete the pickle state_path.remove() return json_state_path def _confirm_json_format(fp): """ :param FilePath fp: the original (pickle) name of a state file This confirms that we do _not_ have the pickle-version of a state-file and _do_ either have nothing, or the JSON version. If the pickle-version exists, an exception is raised. :returns FilePath: the JSON name of a state file """ if fp.path.endswith(".json"): return fp jsonfp = fp.siblingExtension(".json") if fp.exists(): raise MigratePickleFileError(fp) return jsonfp def _dump_json_to_file(js, afile): """ Dump the JSON object `js` to the FilePath `afile` """ with afile.open("wb") as f: data = json.dumps(js) f.write(data.encode("utf8")) class _LeaseStateSerializer(object): """ Read and write state for LeaseCheckingCrawler. This understands how to read the legacy pickle format files and upgrade them to the new JSON format (which will occur automatically). """ def __init__(self, state_path): self._path = _confirm_json_format(FilePath(state_path)) def load(self): """ :returns: deserialized JSON state """ with self._path.open("rb") as f: return json.load(f) def save(self, data): """ Serialize the given data as JSON into the state-path :returns: None """ tmpfile = self._path.siblingExtension(".tmp") _dump_json_to_file(data, tmpfile) fileutil.move_into_place(tmpfile.path, self._path.path) return None class ShareCrawler(service.MultiService): """A ShareCrawler subclass is attached to a StorageServer, and periodically walks all of its shares, processing each one in some fashion. This crawl is rate-limited, to reduce the IO burden on the host, since large servers can easily have a terabyte of shares, in several million files, which can take hours or days to read. Once the crawler starts a cycle, it will proceed at a rate limited by the allowed_cpu_percentage= and cpu_slice= parameters: yielding the reactor after it has worked for 'cpu_slice' seconds, and not resuming right away, always trying to use less than 'allowed_cpu_percentage'. Once the crawler finishes a cycle, it will put off starting the next one long enough to ensure that 'minimum_cycle_time' elapses between the start of two consecutive cycles. We assume that the normal upload/download/get_buckets traffic of a tahoe grid will cause the prefixdir contents to be mostly cached in the kernel, or that the number of buckets in each prefixdir will be small enough to load quickly. A 1TB allmydata.com server was measured to have 2.56M buckets, spread into the 1024 prefixdirs, with about 2500 buckets per prefix. On this server, each prefixdir took 130ms-200ms to list the first time, and 17ms to list the second time. To use a crawler, create a subclass which implements the process_bucket() method. It will be called with a prefixdir and a base32 storage index string. process_bucket() must run synchronously. Any keys added to self.state will be preserved. Override add_initial_state() to set up initial state keys. Override finished_cycle() to perform additional processing when the cycle is complete. Any status that the crawler produces should be put in the self.state dictionary. Status renderers (like a web page which describes the accomplishments of your crawler) will use crawler.get_state() to retrieve this dictionary; they can present the contents as they see fit. Then create an instance, with a reference to a StorageServer and a filename where it can store persistent state. The statefile is used to keep track of how far around the ring the process has travelled, as well as timing history to allow the pace to be predicted and controlled. The statefile will be updated and written to disk after each time slice (just before the crawler yields to the reactor), and also after each cycle is finished, and also when stopService() is called. Note that this means that a crawler which is interrupted with SIGKILL while it is in the middle of a time slice will lose progress: the next time the node is started, the crawler will repeat some unknown amount of work. The crawler instance must be started with startService() before it will do any work. To make it stop doing work, call stopService(). """ slow_start = 300 # don't start crawling for 5 minutes after startup # all three of these can be changed at any time allowed_cpu_percentage = .10 # use up to 10% of the CPU, on average cpu_slice = 1.0 # use up to 1.0 seconds before yielding minimum_cycle_time = 300 # don't run a cycle faster than this def __init__(self, server, statefile, allowed_cpu_percentage=None): service.MultiService.__init__(self) if allowed_cpu_percentage is not None: self.allowed_cpu_percentage = allowed_cpu_percentage self.server = server self.sharedir = server.sharedir self._state_serializer = _LeaseStateSerializer(statefile) self.prefixes = [si_b2a(struct.pack(">H", i << (16-10)))[:2] for i in range(2**10)] self.prefixes = [p.decode("ascii") for p in self.prefixes] self.prefixes.sort() self.timer = None self.bucket_cache = (None, []) self.current_sleep_time = None self.next_wake_time = None self.last_prefix_finished_time = None self.last_prefix_elapsed_time = None self.last_cycle_started_time = None self.last_cycle_elapsed_time = None self.load_state() def minus_or_none(self, a, b): if a is None: return None return a-b def get_progress(self): """I return information about how much progress the crawler is making. My return value is a dictionary. The primary key is 'cycle-in-progress': True if the crawler is currently traversing the shares, False if it is idle between cycles. Note that any of these 'time' keys could be None if I am called at certain moments, so application code must be prepared to tolerate this case. The estimates will also be None if insufficient data has been gatherered to form an estimate. If cycle-in-progress is True, the following keys will be present:: cycle-complete-percentage': float, from 0.0 to 100.0, indicating how far the crawler has progressed through the current cycle remaining-sleep-time: float, seconds from now when we do more work estimated-cycle-complete-time-left: float, seconds remaining until the current cycle is finished. TODO: this does not yet include the remaining time left in the current prefixdir, and it will be very inaccurate on fast crawlers (which can process a whole prefix in a single tick) estimated-time-per-cycle: float, seconds required to do a complete cycle If cycle-in-progress is False, the following keys are available:: next-crawl-time: float, seconds-since-epoch when next crawl starts remaining-wait-time: float, seconds from now when next crawl starts estimated-time-per-cycle: float, seconds required to do a complete cycle """ d = {} if self.state["current-cycle"] is None: d["cycle-in-progress"] = False d["next-crawl-time"] = self.next_wake_time d["remaining-wait-time"] = self.minus_or_none(self.next_wake_time, time.time()) else: d["cycle-in-progress"] = True pct = 100.0 * self.last_complete_prefix_index / len(self.prefixes) d["cycle-complete-percentage"] = pct remaining = None if self.last_prefix_elapsed_time is not None: left = len(self.prefixes) - self.last_complete_prefix_index remaining = left * self.last_prefix_elapsed_time # TODO: remainder of this prefix: we need to estimate the # per-bucket time, probably by measuring the time spent on # this prefix so far, divided by the number of buckets we've # processed. d["estimated-cycle-complete-time-left"] = remaining # it's possible to call get_progress() from inside a crawler's # finished_prefix() function d["remaining-sleep-time"] = self.minus_or_none(self.next_wake_time, time.time()) per_cycle = None if self.last_cycle_elapsed_time is not None: per_cycle = self.last_cycle_elapsed_time elif self.last_prefix_elapsed_time is not None: per_cycle = len(self.prefixes) * self.last_prefix_elapsed_time d["estimated-time-per-cycle"] = per_cycle return d def get_state(self): """I return the current state of the crawler. This is a copy of my state dictionary. If we are not currently sleeping (i.e. get_state() was called from inside the process_prefixdir, process_bucket, or finished_cycle() methods, or if startService has not yet been called on this crawler), these two keys will be None. Subclasses can override this to add computed keys to the return value, but don't forget to start with the upcall. """ state = self.state.copy() # it isn't a deepcopy, so don't go crazy return state def load_state(self): # we use this to store state for both the crawler's internals and # anything the subclass-specific code needs. The state is stored # after each bucket is processed, after each prefixdir is processed, # and after a cycle is complete. The internal keys we use are: # ["version"]: int, always 1 # ["last-cycle-finished"]: int, or None if we have not yet finished # any cycle # ["current-cycle"]: int, or None if we are sleeping between cycles # ["current-cycle-start-time"]: int, seconds-since-epoch of when this # cycle was started, possibly by an earlier # process # ["last-complete-prefix"]: str, two-letter name of the last prefixdir # that was fully processed, or None if we # are sleeping between cycles, or if we # have not yet finished any prefixdir since # a cycle was started # ["last-complete-bucket"]: str, base32 storage index bucket name # of the last bucket to be processed, or # None if we are sleeping between cycles try: state = self._state_serializer.load() except Exception: state = {"version": 1, "last-cycle-finished": None, "current-cycle": None, "last-complete-prefix": None, "last-complete-bucket": None, } state.setdefault("current-cycle-start-time", time.time()) # approximate self.state = state lcp = state["last-complete-prefix"] if lcp == None: self.last_complete_prefix_index = -1 else: self.last_complete_prefix_index = self.prefixes.index(lcp) self.add_initial_state() def add_initial_state(self): """Hook method to add extra keys to self.state when first loaded. The first time this Crawler is used, or when the code has been upgraded, the saved state file may not contain all the keys you expect. Use this method to add any missing keys. Simply modify self.state as needed. This method for subclasses to override. No upcall is necessary. """ pass def save_state(self): lcpi = self.last_complete_prefix_index if lcpi == -1: last_complete_prefix = None else: last_complete_prefix = self.prefixes[lcpi] self.state["last-complete-prefix"] = last_complete_prefix self._state_serializer.save(self.get_state()) def startService(self): # arrange things to look like we were just sleeping, so # status/progress values work correctly self.sleeping_between_cycles = True self.current_sleep_time = self.slow_start self.next_wake_time = time.time() + self.slow_start self.timer = reactor.callLater(self.slow_start, self.start_slice) service.MultiService.startService(self) def stopService(self): if self.timer: self.timer.cancel() self.timer = None self.save_state() return service.MultiService.stopService(self) def start_slice(self): start_slice = time.time() self.timer = None self.sleeping_between_cycles = False self.current_sleep_time = None self.next_wake_time = None try: self.start_current_prefix(start_slice) finished_cycle = True except TimeSliceExceeded: finished_cycle = False self.save_state() if not self.running: # someone might have used stopService() to shut us down return # either we finished a whole cycle, or we ran out of time now = time.time() this_slice = now - start_slice # this_slice/(this_slice+sleep_time) = percentage # this_slice/percentage = this_slice+sleep_time # sleep_time = (this_slice/percentage) - this_slice sleep_time = (this_slice / self.allowed_cpu_percentage) - this_slice # if the math gets weird, or a timequake happens, don't sleep # forever. Note that this means that, while a cycle is running, we # will process at least one bucket every 5 minutes, no matter how # long that bucket takes. sleep_time = max(0.0, min(sleep_time, 299)) if finished_cycle: # how long should we sleep between cycles? Don't run faster than # allowed_cpu_percentage says, but also run faster than # minimum_cycle_time self.sleeping_between_cycles = True sleep_time = max(sleep_time, self.minimum_cycle_time) else: self.sleeping_between_cycles = False self.current_sleep_time = sleep_time # for status page self.next_wake_time = now + sleep_time self.yielding(sleep_time) self.timer = reactor.callLater(sleep_time, self.start_slice) def start_current_prefix(self, start_slice): state = self.state if state["current-cycle"] is None: self.last_cycle_started_time = time.time() state["current-cycle-start-time"] = self.last_cycle_started_time if state["last-cycle-finished"] is None: state["current-cycle"] = 0 else: state["current-cycle"] = state["last-cycle-finished"] + 1 self.started_cycle(state["current-cycle"]) cycle = state["current-cycle"] for i in range(self.last_complete_prefix_index+1, len(self.prefixes)): # if we want to yield earlier, just raise TimeSliceExceeded() prefix = self.prefixes[i] prefixdir = os.path.join(self.sharedir, prefix) if i == self.bucket_cache[0]: buckets = self.bucket_cache[1] else: try: buckets = os.listdir(prefixdir) buckets.sort() except EnvironmentError: buckets = [] self.bucket_cache = (i, buckets) self.process_prefixdir(cycle, prefix, prefixdir, buckets, start_slice) self.last_complete_prefix_index = i now = time.time() if self.last_prefix_finished_time is not None: elapsed = now - self.last_prefix_finished_time self.last_prefix_elapsed_time = elapsed self.last_prefix_finished_time = now self.finished_prefix(cycle, prefix) if time.time() >= start_slice + self.cpu_slice: raise TimeSliceExceeded() # yay! we finished the whole cycle self.last_complete_prefix_index = -1 self.last_prefix_finished_time = None # don't include the sleep now = time.time() if self.last_cycle_started_time is not None: self.last_cycle_elapsed_time = now - self.last_cycle_started_time state["last-complete-bucket"] = None state["last-cycle-finished"] = cycle state["current-cycle"] = None self.finished_cycle(cycle) self.save_state() def process_prefixdir(self, cycle, prefix, prefixdir, buckets, start_slice): """This gets a list of bucket names (i.e. storage index strings, base32-encoded) in sorted order. You can override this if your crawler doesn't care about the actual shares, for example a crawler which merely keeps track of how many buckets are being managed by this server. Subclasses which *do* care about actual bucket should leave this method along, and implement process_bucket() instead. """ for bucket in buckets: last_complete = self.state["last-complete-bucket"] if last_complete is not None and bucket <= last_complete: continue self.process_bucket(cycle, prefix, prefixdir, bucket) self.state["last-complete-bucket"] = bucket if time.time() >= start_slice + self.cpu_slice: raise TimeSliceExceeded() # the remaining methods are explictly for subclasses to implement. def started_cycle(self, cycle): """Notify a subclass that the crawler is about to start a cycle. This method is for subclasses to override. No upcall is necessary. """ pass def process_bucket(self, cycle, prefix, prefixdir, storage_index_b32): """Examine a single bucket. Subclasses should do whatever they want to do to the shares therein, then update self.state as necessary. If the crawler is never interrupted by SIGKILL, this method will be called exactly once per share (per cycle). If it *is* interrupted, then the next time the node is started, some amount of work will be duplicated, according to when self.save_state() was last called. By default, save_state() is called at the end of each timeslice, and after finished_cycle() returns, and when stopService() is called. To reduce the chance of duplicate work (i.e. to avoid adding multiple records to a database), you can call save_state() at the end of your process_bucket() method. This will reduce the maximum duplicated work to one bucket per SIGKILL. It will also add overhead, probably 1-20ms per bucket (and some disk writes), which will count against your allowed_cpu_percentage, and which may be considerable if process_bucket() runs quickly. This method is for subclasses to override. No upcall is necessary. """ pass def finished_prefix(self, cycle, prefix): """Notify a subclass that the crawler has just finished processing a prefix directory (all buckets with the same two-character/10bit prefix). To impose a limit on how much work might be duplicated by a SIGKILL that occurs during a timeslice, you can call self.save_state() here, but be aware that it may represent a significant performance hit. This method is for subclasses to override. No upcall is necessary. """ pass def finished_cycle(self, cycle): """Notify subclass that a cycle (one complete traversal of all prefixdirs) has just finished. 'cycle' is the number of the cycle that just finished. This method should perform summary work and update self.state to publish information to status displays. One-shot crawlers, such as those used to upgrade shares to a new format or populate a database for the first time, can call self.stopService() (or more likely self.disownServiceParent()) to prevent it from running a second time. Don't forget to set some persistent state so that the upgrader won't be run again the next time the node is started. This method is for subclasses to override. No upcall is necessary. """ pass def yielding(self, sleep_time): """The crawler is about to sleep for 'sleep_time' seconds. This method is mostly for the convenience of unit tests. This method is for subclasses to override. No upcall is necessary. """ pass class BucketCountingCrawler(ShareCrawler): """I keep track of how many buckets are being managed by this server. This is equivalent to the number of distributed files and directories for which I am providing storage. The actual number of files+directories in the full grid is probably higher (especially when there are more servers than 'N', the number of generated shares), because some files+directories will have shares on other servers instead of me. Also note that the number of buckets will differ from the number of shares in small grids, when more than one share is placed on a single server. """ minimum_cycle_time = 60*60 # we don't need this more than once an hour def __init__(self, server, statefile, num_sample_prefixes=1): ShareCrawler.__init__(self, server, statefile) self.num_sample_prefixes = num_sample_prefixes def add_initial_state(self): # ["bucket-counts"][cyclenum][prefix] = number # ["last-complete-cycle"] = cyclenum # maintained by base class # ["last-complete-bucket-count"] = number # ["storage-index-samples"][prefix] = (cyclenum, # list of SI strings (base32)) self.state.setdefault("bucket-counts", {}) self.state.setdefault("last-complete-bucket-count", None) self.state.setdefault("storage-index-samples", {}) def process_prefixdir(self, cycle, prefix, prefixdir, buckets, start_slice): # we override process_prefixdir() because we don't want to look at # the individual buckets. We'll save state after each one. On my # laptop, a mostly-empty storage server can process about 70 # prefixdirs in a 1.0s slice. if cycle not in self.state["bucket-counts"]: self.state["bucket-counts"][cycle] = {} self.state["bucket-counts"][cycle][prefix] = len(buckets) if prefix in self.prefixes[:self.num_sample_prefixes]: self.state["storage-index-samples"][prefix] = (cycle, buckets) def finished_cycle(self, cycle): last_counts = self.state["bucket-counts"].get(cycle, []) if len(last_counts) == len(self.prefixes): # great, we have a whole cycle. num_buckets = sum(last_counts.values()) self.state["last-complete-bucket-count"] = num_buckets # get rid of old counts for old_cycle in list(self.state["bucket-counts"].keys()): if old_cycle != cycle: del self.state["bucket-counts"][old_cycle] # get rid of old samples too for prefix in list(self.state["storage-index-samples"].keys()): old_cycle,buckets = self.state["storage-index-samples"][prefix] if old_cycle != cycle: del self.state["storage-index-samples"][prefix] tahoe_lafs-1.20.0/src/allmydata/storage/expirer.py0000644000000000000000000004431613615410400017062 0ustar00import json import time import os import struct from allmydata.storage.crawler import ( ShareCrawler, _confirm_json_format, _convert_cycle_data, _dump_json_to_file, ) from allmydata.storage.shares import get_share_file from allmydata.storage.common import UnknownMutableContainerVersionError, \ UnknownImmutableContainerVersionError from twisted.python import log as twlog from twisted.python.filepath import FilePath def _convert_pickle_state_to_json(state): """ Convert a pickle-serialized crawler-history state to the new JSON format. :param dict state: the pickled state :return dict: the state in the JSON form """ return { str(k): _convert_cycle_data(v) for k, v in state.items() } class _HistorySerializer(object): """ Serialize the 'history' file of the lease-crawler state. This is "storage/lease_checker.history" for the pickle or "storage/lease_checker.history.json" for the new JSON format. """ def __init__(self, history_path): self._path = _confirm_json_format(FilePath(history_path)) if not self._path.exists(): _dump_json_to_file({}, self._path) def load(self): """ Deserialize the existing data. :return dict: the existing history state """ with self._path.open("rb") as f: history = json.load(f) return history def save(self, new_history): """ Serialize the existing data as JSON. """ _dump_json_to_file(new_history, self._path) return None class LeaseCheckingCrawler(ShareCrawler): """I examine the leases on all shares, determining which are still valid and which have expired. I can remove the expired leases (if so configured), and the share will be deleted when the last lease is removed. I collect statistics on the leases and make these available to a web status page, including:: Space recovered during this cycle-so-far: actual (only if expiration_enabled=True): num-buckets, num-shares, sum of share sizes, real disk usage ('real disk usage' means we use stat(fn).st_blocks*512 and include any space used by the directory) what it would have been with the original lease expiration time what it would have been with our configured expiration time Prediction of space that will be recovered during the rest of this cycle Prediction of space that will be recovered by the entire current cycle. Space recovered during the last 10 cycles <-- saved in separate pickle Shares/buckets examined: this cycle-so-far prediction of rest of cycle during last 10 cycles <-- separate pickle start/finish time of last 10 cycles <-- separate pickle expiration time used for last 10 cycles <-- separate pickle Histogram of leases-per-share: this-cycle-to-date last 10 cycles <-- separate pickle Histogram of lease ages, buckets = 1day cycle-to-date last 10 cycles <-- separate pickle All cycle-to-date values remain valid until the start of the next cycle. """ slow_start = 360 # wait 6 minutes after startup minimum_cycle_time = 12*60*60 # not more than twice per day def __init__(self, server, statefile, historyfile, expiration_enabled, mode, override_lease_duration, # used if expiration_mode=="age" cutoff_date, # used if expiration_mode=="cutoff-date" sharetypes): self._history_serializer = _HistorySerializer(historyfile) self.expiration_enabled = expiration_enabled self.mode = mode self.override_lease_duration = None self.cutoff_date = None if self.mode == "age": assert isinstance(override_lease_duration, (int, type(None))) self.override_lease_duration = override_lease_duration # seconds elif self.mode == "cutoff-date": assert isinstance(cutoff_date, int) # seconds-since-epoch assert cutoff_date is not None self.cutoff_date = cutoff_date else: raise ValueError("GC mode '%s' must be 'age' or 'cutoff-date'" % mode) self.sharetypes_to_expire = sharetypes ShareCrawler.__init__(self, server, statefile) def add_initial_state(self): # we fill ["cycle-to-date"] here (even though they will be reset in # self.started_cycle) just in case someone grabs our state before we # get started: unit tests do this so_far = self.create_empty_cycle_dict() self.state.setdefault("cycle-to-date", so_far) # in case we upgrade the code while a cycle is in progress, update # the keys individually for k in so_far: self.state["cycle-to-date"].setdefault(k, so_far[k]) def create_empty_cycle_dict(self): recovered = self.create_empty_recovered_dict() so_far = {"corrupt-shares": [], "space-recovered": recovered, "lease-age-histogram": {}, # (minage,maxage)->count "leases-per-share-histogram": {}, # leasecount->numshares } return so_far def create_empty_recovered_dict(self): recovered = {} for a in ("actual", "original", "configured", "examined"): for b in ("buckets", "shares", "sharebytes", "diskbytes"): recovered[a+"-"+b] = 0 recovered[a+"-"+b+"-mutable"] = 0 recovered[a+"-"+b+"-immutable"] = 0 return recovered def started_cycle(self, cycle): self.state["cycle-to-date"] = self.create_empty_cycle_dict() def stat(self, fn): return os.stat(fn) def process_bucket(self, cycle, prefix, prefixdir, storage_index_b32): bucketdir = os.path.join(prefixdir, storage_index_b32) s = self.stat(bucketdir) would_keep_shares = [] wks = None for fn in os.listdir(bucketdir): try: shnum = int(fn) except ValueError: continue # non-numeric means not a sharefile sharefile = os.path.join(bucketdir, fn) try: wks = self.process_share(sharefile) except (UnknownMutableContainerVersionError, UnknownImmutableContainerVersionError, struct.error): twlog.msg("lease-checker error processing %s" % sharefile) twlog.err() which = [storage_index_b32, shnum] self.state["cycle-to-date"]["corrupt-shares"].append(which) wks = (1, 1, 1, "unknown") would_keep_shares.append(wks) sharetype = None if wks: # use the last share's sharetype as the buckettype sharetype = wks[3] rec = self.state["cycle-to-date"]["space-recovered"] self.increment(rec, "examined-buckets", 1) if sharetype: self.increment(rec, "examined-buckets-"+sharetype, 1) del wks try: bucket_diskbytes = s.st_blocks * 512 except AttributeError: bucket_diskbytes = 0 # no stat().st_blocks on windows if sum([wks[0] for wks in would_keep_shares]) == 0: self.increment_bucketspace("original", bucket_diskbytes, sharetype) if sum([wks[1] for wks in would_keep_shares]) == 0: self.increment_bucketspace("configured", bucket_diskbytes, sharetype) if sum([wks[2] for wks in would_keep_shares]) == 0: self.increment_bucketspace("actual", bucket_diskbytes, sharetype) def process_share(self, sharefilename): # first, find out what kind of a share it is sf = get_share_file(sharefilename) sharetype = sf.sharetype now = time.time() s = self.stat(sharefilename) num_leases = 0 num_valid_leases_original = 0 num_valid_leases_configured = 0 expired_leases_configured = [] for li in sf.get_leases(): num_leases += 1 original_expiration_time = li.get_expiration_time() grant_renew_time = li.get_grant_renew_time_time() age = li.get_age() self.add_lease_age_to_histogram(age) # expired-or-not according to original expiration time if original_expiration_time > now: num_valid_leases_original += 1 # expired-or-not according to our configured age limit expired = False if self.mode == "age": age_limit = original_expiration_time if self.override_lease_duration is not None: age_limit = self.override_lease_duration if age > age_limit: expired = True else: assert self.mode == "cutoff-date" if grant_renew_time < self.cutoff_date: expired = True if sharetype not in self.sharetypes_to_expire: expired = False if expired: expired_leases_configured.append(li) else: num_valid_leases_configured += 1 so_far = self.state["cycle-to-date"] self.increment(so_far["leases-per-share-histogram"], str(num_leases), 1) self.increment_space("examined", s, sharetype) would_keep_share = [1, 1, 1, sharetype] if self.expiration_enabled: for li in expired_leases_configured: sf.cancel_lease(li.cancel_secret) if num_valid_leases_original == 0: would_keep_share[0] = 0 self.increment_space("original", s, sharetype) if num_valid_leases_configured == 0: would_keep_share[1] = 0 self.increment_space("configured", s, sharetype) if self.expiration_enabled: would_keep_share[2] = 0 self.increment_space("actual", s, sharetype) return would_keep_share def increment_space(self, a, s, sharetype): sharebytes = s.st_size try: # note that stat(2) says that st_blocks is 512 bytes, and that # st_blksize is "optimal file sys I/O ops blocksize", which is # independent of the block-size that st_blocks uses. diskbytes = s.st_blocks * 512 except AttributeError: # the docs say that st_blocks is only on linux. I also see it on # MacOS. But it isn't available on windows. diskbytes = sharebytes so_far_sr = self.state["cycle-to-date"]["space-recovered"] self.increment(so_far_sr, a+"-shares", 1) self.increment(so_far_sr, a+"-sharebytes", sharebytes) self.increment(so_far_sr, a+"-diskbytes", diskbytes) if sharetype: self.increment(so_far_sr, a+"-shares-"+sharetype, 1) self.increment(so_far_sr, a+"-sharebytes-"+sharetype, sharebytes) self.increment(so_far_sr, a+"-diskbytes-"+sharetype, diskbytes) def increment_bucketspace(self, a, bucket_diskbytes, sharetype): rec = self.state["cycle-to-date"]["space-recovered"] self.increment(rec, a+"-diskbytes", bucket_diskbytes) self.increment(rec, a+"-buckets", 1) if sharetype: self.increment(rec, a+"-diskbytes-"+sharetype, bucket_diskbytes) self.increment(rec, a+"-buckets-"+sharetype, 1) def increment(self, d, k, delta=1): if k not in d: d[k] = 0 d[k] += delta def add_lease_age_to_histogram(self, age): bucket_interval = 24*60*60 bucket_number = int(age/bucket_interval) bucket_start = bucket_number * bucket_interval bucket_end = bucket_start + bucket_interval k = (bucket_start, bucket_end) self.increment(self.state["cycle-to-date"]["lease-age-histogram"], k, 1) def convert_lease_age_histogram(self, lah): # convert { (minage,maxage) : count } into [ (minage,maxage,count) ] # since the former is not JSON-safe (JSON dictionaries must have # string keys). json_safe_lah = [] for k in sorted(lah): (minage,maxage) = k json_safe_lah.append( (minage, maxage, lah[k]) ) return json_safe_lah def finished_cycle(self, cycle): # add to our history state, prune old history h = {} start = self.state["current-cycle-start-time"] now = time.time() h["cycle-start-finish-times"] = [start, now] h["expiration-enabled"] = self.expiration_enabled h["configured-expiration-mode"] = [ self.mode, self.override_lease_duration, self.cutoff_date, self.sharetypes_to_expire, ] s = self.state["cycle-to-date"] # state["lease-age-histogram"] is a dictionary (mapping # (minage,maxage) tuple to a sharecount), but we report # self.get_state()["lease-age-histogram"] as a list of # (min,max,sharecount) tuples, because JSON can handle that better. # We record the list-of-tuples form into the history for the same # reason. lah = self.convert_lease_age_histogram(s["lease-age-histogram"]) h["lease-age-histogram"] = lah h["leases-per-share-histogram"] = s["leases-per-share-histogram"].copy() h["corrupt-shares"] = s["corrupt-shares"][:] # note: if ["shares-recovered"] ever acquires an internal dict, this # copy() needs to become a deepcopy h["space-recovered"] = s["space-recovered"].copy() history = self._history_serializer.load() history[str(cycle)] = h while len(history) > 10: oldcycles = sorted(int(k) for k in history.keys()) del history[str(oldcycles[0])] self._history_serializer.save(history) def get_state(self): """In addition to the crawler state described in ShareCrawler.get_state(), I return the following keys which are specific to the lease-checker/expirer. Note that the non-history keys (with 'cycle' in their names) are only present if a cycle is currently running. If the crawler is between cycles, it appropriate to show the latest item in the 'history' key instead. Also note that each history item has all the data in the 'cycle-to-date' value, plus cycle-start-finish-times. cycle-to-date: expiration-enabled configured-expiration-mode lease-age-histogram (list of (minage,maxage,sharecount) tuples) leases-per-share-histogram corrupt-shares (list of (si_b32,shnum) tuples, minimal verification) space-recovered estimated-remaining-cycle: # Values may be None if not enough data has been gathered to # produce an estimate. space-recovered estimated-current-cycle: # cycle-to-date plus estimated-remaining. Values may be None if # not enough data has been gathered to produce an estimate. space-recovered history: maps cyclenum to a dict with the following keys: cycle-start-finish-times expiration-enabled configured-expiration-mode lease-age-histogram leases-per-share-histogram corrupt-shares space-recovered The 'space-recovered' structure is a dictionary with the following keys: # 'examined' is what was looked at examined-buckets, examined-buckets-mutable, examined-buckets-immutable examined-shares, -mutable, -immutable examined-sharebytes, -mutable, -immutable examined-diskbytes, -mutable, -immutable # 'actual' is what was actually deleted actual-buckets, -mutable, -immutable actual-shares, -mutable, -immutable actual-sharebytes, -mutable, -immutable actual-diskbytes, -mutable, -immutable # would have been deleted, if the original lease timer was used original-buckets, -mutable, -immutable original-shares, -mutable, -immutable original-sharebytes, -mutable, -immutable original-diskbytes, -mutable, -immutable # would have been deleted, if our configured max_age was used configured-buckets, -mutable, -immutable configured-shares, -mutable, -immutable configured-sharebytes, -mutable, -immutable configured-diskbytes, -mutable, -immutable """ progress = self.get_progress() state = ShareCrawler.get_state(self) # does a shallow copy state["history"] = self._history_serializer.load() if not progress["cycle-in-progress"]: del state["cycle-to-date"] return state so_far = state["cycle-to-date"].copy() state["cycle-to-date"] = so_far lah = so_far["lease-age-histogram"] so_far["lease-age-histogram"] = self.convert_lease_age_histogram(lah) so_far["expiration-enabled"] = self.expiration_enabled so_far["configured-expiration-mode"] = [ self.mode, self.override_lease_duration, self.cutoff_date, self.sharetypes_to_expire, ] so_far_sr = so_far["space-recovered"] remaining_sr = {} remaining = {"space-recovered": remaining_sr} cycle_sr = {} cycle = {"space-recovered": cycle_sr} if progress["cycle-complete-percentage"] > 0.0: pc = progress["cycle-complete-percentage"] / 100.0 m = (1-pc)/pc for a in ("actual", "original", "configured", "examined"): for b in ("buckets", "shares", "sharebytes", "diskbytes"): for c in ("", "-mutable", "-immutable"): k = a+"-"+b+c remaining_sr[k] = m * so_far_sr[k] cycle_sr[k] = so_far_sr[k] + remaining_sr[k] else: for a in ("actual", "original", "configured", "examined"): for b in ("buckets", "shares", "sharebytes", "diskbytes"): for c in ("", "-mutable", "-immutable"): k = a+"-"+b+c remaining_sr[k] = None cycle_sr[k] = None state["estimated-remaining-cycle"] = remaining state["estimated-current-cycle"] = cycle return state tahoe_lafs-1.20.0/src/allmydata/storage/http_client.py0000644000000000000000000012270713615410400017722 0ustar00""" HTTP client that talks to the HTTP storage server. """ from __future__ import annotations from typing import ( Optional, Sequence, Mapping, BinaryIO, cast, TypedDict, Set, Dict, Callable, ClassVar, ) from base64 import b64encode from io import BytesIO from os import SEEK_END from attrs import define, asdict, frozen, field from eliot import start_action, register_exception_extractor from eliot.twisted import DeferredContext from pycddl import Schema from collections_extended import RangeMap from werkzeug.datastructures import Range, ContentRange from twisted.web.http_headers import Headers from twisted.web import http from twisted.web.iweb import IPolicyForHTTPS, IResponse, IAgent from twisted.internet.defer import Deferred, succeed from twisted.internet.interfaces import ( IOpenSSLClientConnectionCreator, IReactorTime, IDelayedCall, ) from twisted.internet.ssl import CertificateOptions from twisted.protocols.tls import TLSMemoryBIOProtocol from twisted.web.client import Agent, HTTPConnectionPool from zope.interface import implementer from hyperlink import DecodedURL import treq from treq.client import HTTPClient from OpenSSL import SSL from werkzeug.http import parse_content_range_header from .http_common import ( swissnum_auth_header, Secrets, get_content_type, CBOR_MIME_TYPE, get_spki_hash, response_is_not_html, ) from ..interfaces import VersionMessage from .common import si_b2a, si_to_human_readable from ..util.hashutil import timing_safe_compare from ..util.deferredutil import async_to_deferred from ..util.tor_provider import _Provider as TorProvider from ..util.cputhreadpool import defer_to_thread from ..util.cbor import dumps try: from txtorcon import Tor # type: ignore except ImportError: class Tor: # type: ignore[no-redef] pass def _encode_si(si: bytes) -> str: """Encode the storage index into Unicode string.""" return str(si_b2a(si), "ascii") class ClientException(Exception): """An unexpected response code from the server.""" def __init__( self, code: int, message: Optional[str] = None, body: Optional[bytes] = None ): Exception.__init__(self, code, message, body) self.code = code self.message = message self.body = body register_exception_extractor(ClientException, lambda e: {"response_code": e.code}) # Schemas for server responses. # # Tags are of the form #6.nnn, where the number is documented at # https://www.iana.org/assignments/cbor-tags/cbor-tags.xhtml. Notably, #6.258 # indicates a set. _SCHEMAS: Mapping[str, Schema] = { "get_version": Schema( # Note that the single-quoted (`'`) string keys in this schema # represent *byte* strings - per the CDDL specification. Text strings # are represented using strings with *double* quotes (`"`). """ response = {'http://allmydata.org/tahoe/protocols/storage/v1' => { 'maximum-immutable-share-size' => uint 'maximum-mutable-share-size' => uint 'available-space' => uint } 'application-version' => bstr } """ ), "allocate_buckets": Schema( """ response = { already-have: #6.258([0*256 uint]) allocated: #6.258([0*256 uint]) } """ ), "immutable_write_share_chunk": Schema( """ response = { required: [0* {begin: uint, end: uint}] } """ ), "list_shares": Schema( """ response = #6.258([0*256 uint]) """ ), "mutable_read_test_write": Schema( """ response = { "success": bool, "data": {0*256 share_number: [0* bstr]} } share_number = uint """ ), "mutable_list_shares": Schema( """ response = #6.258([0*256 uint]) """ ), } @define class _LengthLimitedCollector: """ Collect data using ``treq.collect()``, with limited length. """ remaining_length: int timeout_on_silence: IDelayedCall f: BytesIO = field(factory=BytesIO) def __call__(self, data: bytes) -> None: self.timeout_on_silence.reset(60) self.remaining_length -= len(data) if self.remaining_length < 0: raise ValueError("Response length was too long") self.f.write(data) def limited_content( response: IResponse, clock: IReactorTime, max_length: int = 30 * 1024 * 1024, ) -> Deferred[BinaryIO]: """ Like ``treq.content()``, but limit data read from the response to a set length. If the response is longer than the max allowed length, the result fails with a ``ValueError``. A potentially useful future improvement would be using a temporary file to store the content; since filesystem buffering means that would use memory for small responses and disk for large responses. This will time out if no data is received for 60 seconds; so long as a trickle of data continues to arrive, it will continue to run. """ result_deferred = succeed(None) # Sadly, addTimeout() won't work because we need access to the IDelayedCall # in order to reset it on each data chunk received. timeout = clock.callLater(60, result_deferred.cancel) collector = _LengthLimitedCollector(max_length, timeout) with start_action( action_type="allmydata:storage:http-client:limited-content", max_length=max_length, ).context(): d = DeferredContext(result_deferred) # Make really sure everything gets called in Deferred context, treq might # call collector directly... d.addCallback(lambda _: treq.collect(response, collector)) def done(_: object) -> BytesIO: timeout.cancel() collector.f.seek(0) return collector.f def failed(f): if timeout.active(): timeout.cancel() return f result = d.addCallbacks(done, failed) return result.addActionFinish() @define class ImmutableCreateResult(object): """Result of creating a storage index for an immutable.""" already_have: set[int] allocated: set[int] class _TLSContextFactory(CertificateOptions): """ Create a context that validates the way Tahoe-LAFS wants to: based on a pinned certificate hash, rather than a certificate authority. Originally implemented as part of Foolscap. To comply with the license, here's the original licensing terms: Copyright (c) 2006-2008 Brian Warner Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ def __init__(self, expected_spki_hash: bytes): self.expected_spki_hash = expected_spki_hash CertificateOptions.__init__(self) def getContext(self) -> SSL.Context: def always_validate(conn, cert, errno, depth, preverify_ok): # This function is called to validate the certificate received by # the other end. OpenSSL calls it multiple times, for each errno # for each certificate. # We do not care about certificate authorities or revocation # lists, we just want to know that the certificate has a valid # signature and follow the chain back to one which is # self-signed. We need to protect against forged signatures, but # not the usual TLS concerns about invalid CAs or revoked # certificates. things_are_ok = ( SSL.X509VerificationCodes.OK, SSL.X509VerificationCodes.ERR_CERT_NOT_YET_VALID, SSL.X509VerificationCodes.ERR_CERT_HAS_EXPIRED, SSL.X509VerificationCodes.ERR_DEPTH_ZERO_SELF_SIGNED_CERT, SSL.X509VerificationCodes.ERR_SELF_SIGNED_CERT_IN_CHAIN, ) # TODO can we do this once instead of multiple times? if errno in things_are_ok and timing_safe_compare( get_spki_hash(cert.to_cryptography()), self.expected_spki_hash ): return 1 # TODO: log the details of the error, because otherwise they get # lost in the PyOpenSSL exception that will eventually be raised # (possibly OpenSSL.SSL.Error: certificate verify failed) return 0 ctx = CertificateOptions.getContext(self) # VERIFY_PEER means we ask the the other end for their certificate. ctx.set_verify(SSL.VERIFY_PEER, always_validate) return ctx @implementer(IPolicyForHTTPS) @implementer(IOpenSSLClientConnectionCreator) @define class _StorageClientHTTPSPolicy: """ A HTTPS policy that ensures the SPKI hash of the public key matches a known hash, i.e. pinning-based validation. """ expected_spki_hash: bytes # IPolicyForHTTPS def creatorForNetloc(self, hostname: str, port: int) -> _StorageClientHTTPSPolicy: return self # IOpenSSLClientConnectionCreator def clientConnectionForTLS( self, tlsProtocol: TLSMemoryBIOProtocol ) -> SSL.Connection: return SSL.Connection( _TLSContextFactory(self.expected_spki_hash).getContext(), None ) @define class StorageClientFactory: """ Create ``StorageClient`` instances, using appropriate ``twisted.web.iweb.IAgent`` for different connection methods: normal TCP, Tor, and eventually I2P. There is some caching involved since there might be shared setup work, e.g. connecting to the local Tor service only needs to happen once. """ _default_connection_handlers: dict[str, str] _tor_provider: Optional[TorProvider] # Cache the Tor instance created by the provider, if relevant. _tor_instance: Optional[Tor] = None # If set, we're doing unit testing and we should call this with any # HTTPConnectionPool that gets passed/created to ``create_agent()``. TEST_MODE_REGISTER_HTTP_POOL: ClassVar[ Optional[Callable[[HTTPConnectionPool], None]] ] = None @classmethod def start_test_mode(cls, callback: Callable[[HTTPConnectionPool], None]) -> None: """Switch to testing mode. In testing mode we register the pool with test system using the given callback so it can Do Things, most notably killing off idle HTTP connections at test shutdown and, in some tests, in the midddle of the test. """ cls.TEST_MODE_REGISTER_HTTP_POOL = callback @classmethod def stop_test_mode(cls) -> None: """Stop testing mode.""" cls.TEST_MODE_REGISTER_HTTP_POOL = None async def _create_agent( self, nurl: DecodedURL, reactor: object, tls_context_factory: IPolicyForHTTPS, pool: HTTPConnectionPool, ) -> IAgent: """Create a new ``IAgent``, possibly using Tor.""" if self.TEST_MODE_REGISTER_HTTP_POOL is not None: self.TEST_MODE_REGISTER_HTTP_POOL(pool) # TODO default_connection_handlers should really be an object, not a # dict, so we can ask "is this using Tor" without poking at a # dictionary with arbitrary strings... See # https://tahoe-lafs.org/trac/tahoe-lafs/ticket/4032 handler = self._default_connection_handlers["tcp"] if handler == "tcp": return Agent(reactor, tls_context_factory, pool=pool) if handler == "tor" or nurl.scheme == "pb+tor": assert self._tor_provider is not None if self._tor_instance is None: self._tor_instance = await self._tor_provider.get_tor_instance(reactor) return self._tor_instance.web_agent( pool=pool, tls_context_factory=tls_context_factory ) else: # I2P support will be added here. See # https://tahoe-lafs.org/trac/tahoe-lafs/ticket/4037 raise RuntimeError(f"Unsupported tcp connection handler: {handler}") async def create_storage_client( self, nurl: DecodedURL, reactor: IReactorTime, pool: Optional[HTTPConnectionPool] = None, ) -> StorageClient: """Create a new ``StorageClient`` for the given NURL.""" assert nurl.fragment == "v=1" assert nurl.scheme in ("pb", "pb+tor") if pool is None: pool = HTTPConnectionPool(reactor) pool.maxPersistentPerHost = 10 certificate_hash = nurl.user.encode("ascii") agent = await self._create_agent( nurl, reactor, _StorageClientHTTPSPolicy(expected_spki_hash=certificate_hash), pool, ) treq_client = HTTPClient(agent) https_url = DecodedURL().replace(scheme="https", host=nurl.host, port=nurl.port) swissnum = nurl.path[0].encode("ascii") response_check = lambda _: None if self.TEST_MODE_REGISTER_HTTP_POOL is not None: response_check = response_is_not_html return StorageClient( https_url, swissnum, treq_client, pool, reactor, response_check, ) @define(hash=True) class StorageClient(object): """ Low-level HTTP client that talks to the HTTP storage server. Create using a ``StorageClientFactory`` instance. """ # The URL should be a HTTPS URL ("https://...") _base_url: DecodedURL _swissnum: bytes _treq: HTTPClient _pool: HTTPConnectionPool _clock: IReactorTime # Are we running unit tests? _analyze_response: Callable[[IResponse], None] = lambda _: None def relative_url(self, path: str) -> DecodedURL: """Get a URL relative to the base URL.""" return self._base_url.click(path) def _get_headers(self, headers: Optional[Headers]) -> Headers: """Return the basic headers to be used by default.""" if headers is None: headers = Headers() headers.addRawHeader( "Authorization", swissnum_auth_header(self._swissnum), ) return headers @async_to_deferred async def request( self, method: str, url: DecodedURL, lease_renew_secret: Optional[bytes] = None, lease_cancel_secret: Optional[bytes] = None, upload_secret: Optional[bytes] = None, write_enabler_secret: Optional[bytes] = None, headers: Optional[Headers] = None, message_to_serialize: object = None, timeout: float = 60, **kwargs, ) -> IResponse: """ Like ``treq.request()``, but with optional secrets that get translated into corresponding HTTP headers. If ``message_to_serialize`` is set, it will be serialized (by default with CBOR) and set as the request body. It should not be mutated during execution of this function! Default timeout is 60 seconds. """ with start_action( action_type="allmydata:storage:http-client:request", method=method, url=url.to_text(), timeout=timeout, ) as ctx: response = await self._request( method, url, lease_renew_secret, lease_cancel_secret, upload_secret, write_enabler_secret, headers, message_to_serialize, timeout, **kwargs, ) ctx.add_success_fields(response_code=response.code) return response async def _request( self, method: str, url: DecodedURL, lease_renew_secret: Optional[bytes] = None, lease_cancel_secret: Optional[bytes] = None, upload_secret: Optional[bytes] = None, write_enabler_secret: Optional[bytes] = None, headers: Optional[Headers] = None, message_to_serialize: object = None, timeout: float = 60, **kwargs, ) -> IResponse: """The implementation of request().""" headers = self._get_headers(headers) # Add secrets: for secret, value in [ (Secrets.LEASE_RENEW, lease_renew_secret), (Secrets.LEASE_CANCEL, lease_cancel_secret), (Secrets.UPLOAD, upload_secret), (Secrets.WRITE_ENABLER, write_enabler_secret), ]: if value is None: continue headers.addRawHeader( "X-Tahoe-Authorization", b"%s %s" % (secret.value.encode("ascii"), b64encode(value).strip()), ) # Note we can accept CBOR: headers.addRawHeader("Accept", CBOR_MIME_TYPE) # If there's a request message, serialize it and set the Content-Type # header: if message_to_serialize is not None: if "data" in kwargs: raise TypeError( "Can't use both `message_to_serialize` and `data` " "as keyword arguments at the same time" ) kwargs["data"] = await defer_to_thread(dumps, message_to_serialize) headers.addRawHeader("Content-Type", CBOR_MIME_TYPE) response = await self._treq.request( method, url, headers=headers, timeout=timeout, **kwargs ) self._analyze_response(response) return response async def decode_cbor(self, response: IResponse, schema: Schema) -> object: """Given HTTP response, return decoded CBOR body.""" with start_action(action_type="allmydata:storage:http-client:decode-cbor"): if response.code > 199 and response.code < 300: content_type = get_content_type(response.headers) if content_type == CBOR_MIME_TYPE: f = await limited_content(response, self._clock) data = f.read() def validate_and_decode(): return schema.validate_cbor(data, True) return await defer_to_thread(validate_and_decode) else: raise ClientException( -1, "Server didn't send CBOR, content type is {}".format( content_type ), ) else: data = ( await limited_content(response, self._clock, max_length=10_000) ).read() raise ClientException(response.code, response.phrase, data) def shutdown(self) -> Deferred[object]: """Shutdown any connections.""" return self._pool.closeCachedConnections() @define(hash=True) class StorageClientGeneral(object): """ High-level HTTP APIs that aren't immutable- or mutable-specific. """ _client: StorageClient @async_to_deferred async def get_version(self) -> VersionMessage: """ Return the version metadata for the server. """ with start_action( action_type="allmydata:storage:http-client:get-version", ): return await self._get_version() async def _get_version(self) -> VersionMessage: """Implementation of get_version().""" url = self._client.relative_url("/storage/v1/version") response = await self._client.request("GET", url) decoded_response = cast( Dict[bytes, object], await self._client.decode_cbor(response, _SCHEMAS["get_version"]), ) # Add some features we know are true because the HTTP API # specification requires them and because other parts of the storage # client implementation assumes they will be present. cast( Dict[bytes, object], decoded_response[b"http://allmydata.org/tahoe/protocols/storage/v1"], ).update( { b"tolerates-immutable-read-overrun": True, b"delete-mutable-shares-with-zero-length-writev": True, b"fills-holes-with-zero-bytes": True, b"prevents-read-past-end-of-share-data": True, } ) return decoded_response @async_to_deferred async def add_or_renew_lease( self, storage_index: bytes, renew_secret: bytes, cancel_secret: bytes ) -> None: """ Add or renew a lease. If the renewal secret matches an existing lease, it is renewed. Otherwise a new lease is added. """ with start_action( action_type="allmydata:storage:http-client:add-or-renew-lease", storage_index=si_to_human_readable(storage_index), ): return await self._add_or_renew_lease( storage_index, renew_secret, cancel_secret ) async def _add_or_renew_lease( self, storage_index: bytes, renew_secret: bytes, cancel_secret: bytes ) -> None: url = self._client.relative_url( "/storage/v1/lease/{}".format(_encode_si(storage_index)) ) response = await self._client.request( "PUT", url, lease_renew_secret=renew_secret, lease_cancel_secret=cancel_secret, ) if response.code == http.NO_CONTENT: return else: raise ClientException(response.code) @define class UploadProgress(object): """ Progress of immutable upload, per the server. """ # True when upload has finished. finished: bool # Remaining ranges to upload. required: RangeMap @async_to_deferred async def read_share_chunk( client: StorageClient, share_type: str, storage_index: bytes, share_number: int, offset: int, length: int, ) -> bytes: """ Download a chunk of data from a share. TODO https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3857 Failed downloads should be transparently retried and redownloaded by the implementation a few times so that if a failure percolates up, the caller can assume the failure isn't a short-term blip. NOTE: the underlying HTTP protocol is somewhat more flexible than this API, insofar as it doesn't always require a range. In practice a range is always provided by the current callers. """ url = client.relative_url( "/storage/v1/{}/{}/{}".format( share_type, _encode_si(storage_index), share_number ) ) # The default 60 second timeout is for getting the response, so it doesn't # include the time it takes to download the body... so we will will deal # with that later, via limited_content(). response = await client.request( "GET", url, headers=Headers( # Ranges in HTTP are _inclusive_, Python's convention is exclusive, # but Range constructor does that the conversion for us. {"range": [Range("bytes", [(offset, offset + length)]).to_header()]} ), unbuffered=True, # Don't buffer the response in memory. ) if response.code == http.NO_CONTENT: return b"" content_type = get_content_type(response.headers) if content_type != "application/octet-stream": raise ValueError( f"Content-type was wrong: {content_type}, should be application/octet-stream" ) if response.code == http.PARTIAL_CONTENT: content_range = parse_content_range_header( response.headers.getRawHeaders("content-range")[0] or "" ) if ( content_range is None or content_range.stop is None or content_range.start is None ): raise ValueError( "Content-Range was missing, invalid, or in format we don't support" ) supposed_length = content_range.stop - content_range.start if supposed_length > length: raise ValueError("Server sent more than we asked for?!") # It might also send less than we asked for. That's (probably) OK, e.g. # if we went past the end of the file. body = await limited_content(response, client._clock, supposed_length) body.seek(0, SEEK_END) actual_length = body.tell() if actual_length != supposed_length: # Most likely a mutable that got changed out from under us, but # conceivably could be a bug... raise ValueError( f"Length of response sent from server ({actual_length}) " + f"didn't match Content-Range header ({supposed_length})" ) body.seek(0) return body.read() else: # Technically HTTP allows sending an OK with full body under these # circumstances, but the server is not designed to do that so we ignore # that possibility for now... raise ClientException(response.code) @async_to_deferred async def advise_corrupt_share( client: StorageClient, share_type: str, storage_index: bytes, share_number: int, reason: str, ) -> None: assert isinstance(reason, str) url = client.relative_url( "/storage/v1/{}/{}/{}/corrupt".format( share_type, _encode_si(storage_index), share_number ) ) message = {"reason": reason} response = await client.request("POST", url, message_to_serialize=message) if response.code == http.OK: return else: raise ClientException( response.code, ) @define(hash=True) class StorageClientImmutables(object): """ APIs for interacting with immutables. """ _client: StorageClient @async_to_deferred async def create( self, storage_index: bytes, share_numbers: set[int], allocated_size: int, upload_secret: bytes, lease_renew_secret: bytes, lease_cancel_secret: bytes, ) -> ImmutableCreateResult: """ Create a new storage index for an immutable. TODO https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3857 retry internally on failure, to ensure the operation fully succeeded. If sufficient number of failures occurred, the result may fire with an error, but there's no expectation that user code needs to have a recovery codepath; it will most likely just report an error to the user. Result fires when creating the storage index succeeded, if creating the storage index failed the result will fire with an exception. """ with start_action( action_type="allmydata:storage:http-client:immutable:create", storage_index=si_to_human_readable(storage_index), share_numbers=share_numbers, allocated_size=allocated_size, ) as ctx: result = await self._create( storage_index, share_numbers, allocated_size, upload_secret, lease_renew_secret, lease_cancel_secret, ) ctx.add_success_fields( already_have=result.already_have, allocated=result.allocated ) return result async def _create( self, storage_index: bytes, share_numbers: set[int], allocated_size: int, upload_secret: bytes, lease_renew_secret: bytes, lease_cancel_secret: bytes, ) -> ImmutableCreateResult: """Implementation of create().""" url = self._client.relative_url( "/storage/v1/immutable/" + _encode_si(storage_index) ) message = {"share-numbers": share_numbers, "allocated-size": allocated_size} response = await self._client.request( "POST", url, lease_renew_secret=lease_renew_secret, lease_cancel_secret=lease_cancel_secret, upload_secret=upload_secret, message_to_serialize=message, ) decoded_response = cast( Mapping[str, Set[int]], await self._client.decode_cbor(response, _SCHEMAS["allocate_buckets"]), ) return ImmutableCreateResult( already_have=decoded_response["already-have"], allocated=decoded_response["allocated"], ) @async_to_deferred async def abort_upload( self, storage_index: bytes, share_number: int, upload_secret: bytes ) -> None: """Abort the upload.""" with start_action( action_type="allmydata:storage:http-client:immutable:abort-upload", storage_index=si_to_human_readable(storage_index), share_number=share_number, ): return await self._abort_upload(storage_index, share_number, upload_secret) async def _abort_upload( self, storage_index: bytes, share_number: int, upload_secret: bytes ) -> None: """Implementation of ``abort_upload()``.""" url = self._client.relative_url( "/storage/v1/immutable/{}/{}/abort".format( _encode_si(storage_index), share_number ) ) response = await self._client.request( "PUT", url, upload_secret=upload_secret, ) if response.code == http.OK: return else: raise ClientException( response.code, ) @async_to_deferred async def write_share_chunk( self, storage_index: bytes, share_number: int, upload_secret: bytes, offset: int, data: bytes, ) -> UploadProgress: """ Upload a chunk of data for a specific share. TODO https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3857 The implementation should retry failed uploads transparently a number of times, so that if a failure percolates up, the caller can assume the failure isn't a short-term blip. Result fires when the upload succeeded, with a boolean indicating whether the _complete_ share (i.e. all chunks, not just this one) has been uploaded. """ with start_action( action_type="allmydata:storage:http-client:immutable:write-share-chunk", storage_index=si_to_human_readable(storage_index), share_number=share_number, offset=offset, data_len=len(data), ) as ctx: result = await self._write_share_chunk( storage_index, share_number, upload_secret, offset, data ) ctx.add_success_fields(finished=result.finished) return result async def _write_share_chunk( self, storage_index: bytes, share_number: int, upload_secret: bytes, offset: int, data: bytes, ) -> UploadProgress: """Implementation of ``write_share_chunk()``.""" url = self._client.relative_url( "/storage/v1/immutable/{}/{}".format( _encode_si(storage_index), share_number ) ) response = await self._client.request( "PATCH", url, upload_secret=upload_secret, data=data, headers=Headers( { "content-range": [ ContentRange("bytes", offset, offset + len(data)).to_header() ] } ), ) if response.code == http.OK: # Upload is still unfinished. finished = False elif response.code == http.CREATED: # Upload is done! finished = True else: raise ClientException( response.code, ) body = cast( Mapping[str, Sequence[Mapping[str, int]]], await self._client.decode_cbor( response, _SCHEMAS["immutable_write_share_chunk"] ), ) remaining = RangeMap() for chunk in body["required"]: remaining.set(True, chunk["begin"], chunk["end"]) return UploadProgress(finished=finished, required=remaining) @async_to_deferred async def read_share_chunk( self, storage_index: bytes, share_number: int, offset: int, length: int ) -> bytes: """ Download a chunk of data from a share. """ with start_action( action_type="allmydata:storage:http-client:immutable:read-share-chunk", storage_index=si_to_human_readable(storage_index), share_number=share_number, offset=offset, length=length, ) as ctx: result = await read_share_chunk( self._client, "immutable", storage_index, share_number, offset, length ) ctx.add_success_fields(data_len=len(result)) return result @async_to_deferred async def list_shares(self, storage_index: bytes) -> Set[int]: """ Return the set of shares for a given storage index. """ with start_action( action_type="allmydata:storage:http-client:immutable:list-shares", storage_index=si_to_human_readable(storage_index), ) as ctx: result = await self._list_shares(storage_index) ctx.add_success_fields(shares=result) return result async def _list_shares(self, storage_index: bytes) -> Set[int]: """Implementation of ``list_shares()``.""" url = self._client.relative_url( "/storage/v1/immutable/{}/shares".format(_encode_si(storage_index)) ) response = await self._client.request( "GET", url, ) if response.code == http.OK: return cast( Set[int], await self._client.decode_cbor(response, _SCHEMAS["list_shares"]), ) else: raise ClientException(response.code) @async_to_deferred async def advise_corrupt_share( self, storage_index: bytes, share_number: int, reason: str, ) -> None: """Indicate a share has been corrupted, with a human-readable message.""" with start_action( action_type="allmydata:storage:http-client:immutable:advise-corrupt-share", storage_index=si_to_human_readable(storage_index), share_number=share_number, reason=reason, ): await advise_corrupt_share( self._client, "immutable", storage_index, share_number, reason ) @frozen class WriteVector: """Data to write to a chunk.""" offset: int data: bytes @frozen class TestVector: """Checks to make on a chunk before writing to it.""" offset: int size: int specimen: bytes @frozen class ReadVector: """ Reads to do on chunks, as part of a read/test/write operation. """ offset: int size: int @frozen class TestWriteVectors: """Test and write vectors for a specific share.""" test_vectors: Sequence[TestVector] = field(factory=list) write_vectors: Sequence[WriteVector] = field(factory=list) new_length: Optional[int] = None def asdict(self) -> dict: """Return dictionary suitable for sending over CBOR.""" d = asdict(self) d["test"] = d.pop("test_vectors") d["write"] = d.pop("write_vectors") d["new-length"] = d.pop("new_length") return d @frozen class ReadTestWriteResult: """Result of sending read-test-write vectors.""" success: bool # Map share numbers to reads corresponding to the request's list of # ReadVectors: reads: Mapping[int, Sequence[bytes]] # Result type for mutable read/test/write HTTP response. Can't just use # dict[int,list[bytes]] because on Python 3.8 that will error out. MUTABLE_RTW = TypedDict( "MUTABLE_RTW", {"success": bool, "data": Mapping[int, Sequence[bytes]]} ) @frozen class StorageClientMutables: """ APIs for interacting with mutables. """ _client: StorageClient @async_to_deferred async def read_test_write_chunks( self, storage_index: bytes, write_enabler_secret: bytes, lease_renew_secret: bytes, lease_cancel_secret: bytes, testwrite_vectors: dict[int, TestWriteVectors], read_vector: list[ReadVector], ) -> ReadTestWriteResult: """ Read, test, and possibly write chunks to a particular mutable storage index. Reads are done before writes. Given a mapping between share numbers and test/write vectors, the tests are done and if they are valid the writes are done. """ with start_action( action_type="allmydata:storage:http-client:mutable:read-test-write", storage_index=si_to_human_readable(storage_index), ): return await self._read_test_write_chunks( storage_index, write_enabler_secret, lease_renew_secret, lease_cancel_secret, testwrite_vectors, read_vector, ) async def _read_test_write_chunks( self, storage_index: bytes, write_enabler_secret: bytes, lease_renew_secret: bytes, lease_cancel_secret: bytes, testwrite_vectors: dict[int, TestWriteVectors], read_vector: list[ReadVector], ) -> ReadTestWriteResult: """Implementation of ``read_test_write_chunks()``.""" url = self._client.relative_url( "/storage/v1/mutable/{}/read-test-write".format(_encode_si(storage_index)) ) message = { "test-write-vectors": { share_number: twv.asdict() for (share_number, twv) in testwrite_vectors.items() }, "read-vector": [asdict(r) for r in read_vector], } response = await self._client.request( "POST", url, write_enabler_secret=write_enabler_secret, lease_renew_secret=lease_renew_secret, lease_cancel_secret=lease_cancel_secret, message_to_serialize=message, ) if response.code == http.OK: result = cast( MUTABLE_RTW, await self._client.decode_cbor( response, _SCHEMAS["mutable_read_test_write"] ), ) return ReadTestWriteResult(success=result["success"], reads=result["data"]) else: raise ClientException(response.code, (await response.content())) @async_to_deferred async def read_share_chunk( self, storage_index: bytes, share_number: int, offset: int, length: int, ) -> bytes: """ Download a chunk of data from a share. """ with start_action( action_type="allmydata:storage:http-client:mutable:read-share-chunk", storage_index=si_to_human_readable(storage_index), share_number=share_number, offset=offset, length=length, ) as ctx: result = await read_share_chunk( self._client, "mutable", storage_index, share_number, offset, length ) ctx.add_success_fields(data_len=len(result)) return result @async_to_deferred async def list_shares(self, storage_index: bytes) -> Set[int]: """ List the share numbers for a given storage index. """ with start_action( action_type="allmydata:storage:http-client:mutable:list-shares", storage_index=si_to_human_readable(storage_index), ) as ctx: result = await self._list_shares(storage_index) ctx.add_success_fields(shares=result) return result async def _list_shares(self, storage_index: bytes) -> Set[int]: """Implementation of ``list_shares()``.""" url = self._client.relative_url( "/storage/v1/mutable/{}/shares".format(_encode_si(storage_index)) ) response = await self._client.request("GET", url) if response.code == http.OK: return cast( Set[int], await self._client.decode_cbor( response, _SCHEMAS["mutable_list_shares"], ), ) else: raise ClientException(response.code) @async_to_deferred async def advise_corrupt_share( self, storage_index: bytes, share_number: int, reason: str, ) -> None: """Indicate a share has been corrupted, with a human-readable message.""" with start_action( action_type="allmydata:storage:http-client:mutable:advise-corrupt-share", storage_index=si_to_human_readable(storage_index), share_number=share_number, reason=reason, ): await advise_corrupt_share( self._client, "mutable", storage_index, share_number, reason ) tahoe_lafs-1.20.0/src/allmydata/storage/http_common.py0000644000000000000000000000442513615410400017730 0ustar00""" Common HTTP infrastructure for the storge server. """ from enum import Enum from base64 import urlsafe_b64encode, b64encode from hashlib import sha256 from typing import Optional from cryptography.x509 import Certificate from cryptography.hazmat.primitives.serialization import Encoding, PublicFormat from werkzeug.http import parse_options_header from twisted.web.http_headers import Headers from twisted.web.iweb import IResponse CBOR_MIME_TYPE = "application/cbor" def get_content_type(headers: Headers) -> Optional[str]: """ Get the content type from the HTTP ``Content-Type`` header. Returns ``None`` if no content-type was set. """ values = headers.getRawHeaders("content-type", [None]) or [None] content_type = parse_options_header(values[0])[0] or None return content_type def response_is_not_html(response: IResponse) -> None: """ During tests, this is registered so we can ensure the web server doesn't give us text/html. HTML is never correct except in 404, but it's the default for Twisted's web server so we assert nothing unexpected happened. """ if response.code != 404: assert get_content_type(response.headers) != "text/html" def swissnum_auth_header(swissnum: bytes) -> bytes: """Return value for ``Authorization`` header.""" return b"Tahoe-LAFS " + b64encode(swissnum).strip() class Secrets(Enum): """Different kinds of secrets the client may send.""" LEASE_RENEW = "lease-renew-secret" LEASE_CANCEL = "lease-cancel-secret" UPLOAD = "upload-secret" WRITE_ENABLER = "write-enabler" def get_spki(certificate: Certificate) -> bytes: """ Get the bytes making up the DER encoded representation of the `SubjectPublicKeyInfo` (RFC 7469) for the given certificate. """ return certificate.public_key().public_bytes( Encoding.DER, PublicFormat.SubjectPublicKeyInfo ) def get_spki_hash(certificate: Certificate) -> bytes: """ Get the public key hash, as per RFC 7469: base64 of sha256 of the public key encoded in DER + Subject Public Key Info format. We use the URL-safe base64 variant, since this is typically found in NURLs. """ spki_bytes = get_spki(certificate) return urlsafe_b64encode(sha256(spki_bytes).digest()).strip().rstrip(b"=") tahoe_lafs-1.20.0/src/allmydata/storage/http_server.py0000644000000000000000000011510113615410400017740 0ustar00""" HTTP server for storage. """ from __future__ import annotations from typing import ( Any, Callable, Union, cast, Optional, TypeVar, Sequence, Protocol, Dict, ) from typing_extensions import ParamSpec, Concatenate from functools import wraps from base64 import b64decode import binascii from tempfile import TemporaryFile from os import SEEK_END, SEEK_SET import mmap from eliot import start_action from cryptography.x509 import Certificate as CryptoCertificate from zope.interface import implementer from klein import Klein, KleinRenderable from klein.resource import KleinResource from twisted.web import http from twisted.internet.interfaces import ( IListeningPort, IStreamServerEndpoint, IPullProducer, IProtocolFactory, ) from twisted.internet.address import IPv4Address, IPv6Address from twisted.internet.defer import Deferred from twisted.internet.ssl import CertificateOptions, Certificate, PrivateCertificate from twisted.internet.interfaces import IReactorFromThreads from twisted.web.server import Site, Request from twisted.web.iweb import IRequest from twisted.protocols.tls import TLSMemoryBIOFactory from twisted.python.filepath import FilePath from twisted.python.failure import Failure from attrs import define, field, Factory from werkzeug.http import ( parse_range_header, parse_content_range_header, parse_accept_header, ) from werkzeug.routing import BaseConverter, ValidationError from werkzeug.datastructures import ContentRange from hyperlink import DecodedURL from cryptography.x509 import load_pem_x509_certificate from pycddl import Schema, ValidationError as CDDLValidationError from .server import StorageServer from .http_common import ( swissnum_auth_header, Secrets, get_content_type, CBOR_MIME_TYPE, get_spki_hash, ) from .common import si_a2b from .immutable import BucketWriter, ConflictingWriteError from ..util.hashutil import timing_safe_compare from ..util.base32 import rfc3548_alphabet from ..util.deferredutil import async_to_deferred from ..util.cputhreadpool import defer_to_thread from ..util import cbor from ..interfaces import BadWriteEnablerError class ClientSecretsException(Exception): """The client did not send the appropriate secrets.""" def _extract_secrets( header_values: Sequence[str], required_secrets: set[Secrets] ) -> dict[Secrets, bytes]: """ Given list of values of ``X-Tahoe-Authorization`` headers, and required secrets, return dictionary mapping secrets to decoded values. If too few secrets were given, or too many, a ``ClientSecretsException`` is raised; its text is sent in the HTTP response. """ string_key_to_enum = {e.value: e for e in Secrets} result = {} try: for header_value in header_values: string_key, string_value = header_value.strip().split(" ", 1) key = string_key_to_enum[string_key] value = b64decode(string_value) if value == b"": raise ClientSecretsException( "Failed to decode secret {}".format(string_key) ) if key in (Secrets.LEASE_CANCEL, Secrets.LEASE_RENEW) and len(value) != 32: raise ClientSecretsException("Lease secrets must be 32 bytes long") result[key] = value except (ValueError, KeyError): raise ClientSecretsException("Bad header value(s): {}".format(header_values)) if result.keys() != required_secrets: raise ClientSecretsException( "Expected {} in X-Tahoe-Authorization headers, got {}".format( [r.value for r in required_secrets], list(result.keys()) ) ) return result class BaseApp(Protocol): """Protocol for ``HTTPServer`` and testing equivalent.""" _swissnum: bytes P = ParamSpec("P") T = TypeVar("T") SecretsDict = Dict[Secrets, bytes] App = TypeVar("App", bound=BaseApp) def _authorization_decorator( required_secrets: set[Secrets], ) -> Callable[ [Callable[Concatenate[App, Request, SecretsDict, P], T]], Callable[Concatenate[App, Request, P], T], ]: """ 1. Check the ``Authorization`` header matches server swissnum. 2. Extract ``X-Tahoe-Authorization`` headers and pass them in. 3. Log the request and response. """ def decorator( f: Callable[Concatenate[App, Request, SecretsDict, P], T] ) -> Callable[Concatenate[App, Request, P], T]: @wraps(f) def route( self: App, request: Request, *args: P.args, **kwargs: P.kwargs, ) -> T: # Don't set text/html content type by default. # None is actually supported, see https://github.com/twisted/twisted/issues/11902 request.defaultContentType = None # type: ignore[assignment] with start_action( action_type="allmydata:storage:http-server:handle-request", method=request.method, path=request.path, ) as ctx: try: # Check Authorization header: try: auth_header = request.requestHeaders.getRawHeaders( "Authorization", [""] )[0].encode("utf-8") except UnicodeError: raise _HTTPError(http.BAD_REQUEST, "Bad Authorization header") if not timing_safe_compare( auth_header, swissnum_auth_header(self._swissnum), ): raise _HTTPError( http.UNAUTHORIZED, "Wrong Authorization header" ) # Check secrets: authorization = request.requestHeaders.getRawHeaders( "X-Tahoe-Authorization", [] ) try: secrets = _extract_secrets(authorization, required_secrets) except ClientSecretsException as e: raise _HTTPError(http.BAD_REQUEST, str(e)) # Run the business logic: result = f(self, request, secrets, *args, **kwargs) except _HTTPError as e: # This isn't an error necessarily for logging purposes, # it's an implementation detail, an easier way to set # response codes. ctx.add_success_fields(response_code=e.code) ctx.finish() raise else: ctx.add_success_fields(response_code=request.code) return result return route return decorator def _authorized_route( klein_app: Klein, required_secrets: set[Secrets], url: str, *route_args: Any, branch: bool = False, **route_kwargs: Any, ) -> Callable[ [ Callable[ Concatenate[App, Request, SecretsDict, P], KleinRenderable, ] ], Callable[..., KleinRenderable], ]: """ Like Klein's @route, but with additional support for checking the ``Authorization`` header as well as ``X-Tahoe-Authorization`` headers. The latter will get passed in as second argument to wrapped functions, a dictionary mapping a ``Secret`` value to the uploaded secret. :param required_secrets: Set of required ``Secret`` types. """ def decorator( f: Callable[ Concatenate[App, Request, SecretsDict, P], KleinRenderable, ] ) -> Callable[..., KleinRenderable]: @klein_app.route(url, *route_args, branch=branch, **route_kwargs) # type: ignore[arg-type] @_authorization_decorator(required_secrets) @wraps(f) def handle_route( app: App, request: Request, secrets: SecretsDict, *args: P.args, **kwargs: P.kwargs, ) -> KleinRenderable: return f(app, request, secrets, *args, **kwargs) return handle_route return decorator @define class StorageIndexUploads(object): """ In-progress upload to storage index. """ # Map share number to BucketWriter shares: dict[int, BucketWriter] = Factory(dict) # Map share number to the upload secret (different shares might have # different upload secrets). upload_secrets: dict[int, bytes] = Factory(dict) @define class UploadsInProgress(object): """ Keep track of uploads for storage indexes. """ # Map storage index to corresponding uploads-in-progress _uploads: dict[bytes, StorageIndexUploads] = Factory(dict) # Map BucketWriter to (storage index, share number) _bucketwriters: dict[BucketWriter, tuple[bytes, int]] = Factory(dict) def add_write_bucket( self, storage_index: bytes, share_number: int, upload_secret: bytes, bucket: BucketWriter, ): """Add a new ``BucketWriter`` to be tracked.""" si_uploads = self._uploads.setdefault(storage_index, StorageIndexUploads()) si_uploads.shares[share_number] = bucket si_uploads.upload_secrets[share_number] = upload_secret self._bucketwriters[bucket] = (storage_index, share_number) def get_write_bucket( self, storage_index: bytes, share_number: int, upload_secret: bytes ) -> BucketWriter: """Get the given in-progress immutable share upload.""" self.validate_upload_secret(storage_index, share_number, upload_secret) try: return self._uploads[storage_index].shares[share_number] except (KeyError, IndexError): raise _HTTPError(http.NOT_FOUND) def remove_write_bucket(self, bucket: BucketWriter) -> None: """Stop tracking the given ``BucketWriter``.""" try: storage_index, share_number = self._bucketwriters.pop(bucket) except KeyError: # This is probably a BucketWriter created by Foolscap, so just # ignore it. return uploads_index = self._uploads[storage_index] uploads_index.shares.pop(share_number) uploads_index.upload_secrets.pop(share_number) if not uploads_index.shares: self._uploads.pop(storage_index) def validate_upload_secret( self, storage_index: bytes, share_number: int, upload_secret: bytes ) -> None: """ Raise an unauthorized-HTTP-response exception if the given storage_index+share_number have a different upload secret than the given one. If the given upload doesn't exist at all, nothing happens. """ if storage_index in self._uploads: in_progress = self._uploads[storage_index] # For pre-existing upload, make sure password matches. if share_number in in_progress.upload_secrets and not timing_safe_compare( in_progress.upload_secrets[share_number], upload_secret ): raise _HTTPError(http.UNAUTHORIZED) class StorageIndexConverter(BaseConverter): """Parser/validator for storage index URL path segments.""" regex = "[" + str(rfc3548_alphabet, "ascii") + "]{26}" def to_python(self, value: str) -> bytes: try: return si_a2b(value.encode("ascii")) except (AssertionError, binascii.Error, ValueError): raise ValidationError("Invalid storage index") class _HTTPError(Exception): """ Raise from ``HTTPServer`` endpoint to return the given HTTP response code. """ def __init__(self, code: int, body: Optional[str] = None): Exception.__init__(self, (code, body)) self.code = code self.body = body # CDDL schemas. # # Tags are of the form #6.nnn, where the number is documented at # https://www.iana.org/assignments/cbor-tags/cbor-tags.xhtml. Notably, #6.258 # indicates a set. # # Somewhat arbitrary limits are set to reduce e.g. number of shares, number of # vectors, etc.. These may need to be iterated on in future revisions of the # code. _SCHEMAS = { "allocate_buckets": Schema( """ request = { share-numbers: #6.258([0*256 uint]) allocated-size: uint } """ ), "advise_corrupt_share": Schema( """ request = { reason: tstr .size (1..32765) } """ ), "mutable_read_test_write": Schema( """ request = { "test-write-vectors": { 0*256 share_number : { "test": [0*30 {"offset": uint, "size": uint, "specimen": bstr}] "write": [* {"offset": uint, "data": bstr}] "new-length": uint / null } } "read-vector": [0*30 {"offset": uint, "size": uint}] } share_number = uint """ ), } # Callable that takes offset and length, returns the data at that range. ReadData = Callable[[int, int], bytes] @implementer(IPullProducer) @define class _ReadAllProducer: """ Producer that calls a read function repeatedly to read all the data, and writes to a request. """ request: Request read_data: ReadData result: Deferred = Factory(Deferred) start: int = field(default=0) @classmethod def produce_to(cls, request: Request, read_data: ReadData) -> Deferred[bytes]: """ Create and register the producer, returning ``Deferred`` that should be returned from a HTTP server endpoint. """ producer = cls(request, read_data) request.registerProducer(producer, False) return producer.result def resumeProducing(self) -> None: data = self.read_data(self.start, 65536) if not data: self.request.unregisterProducer() d = self.result del self.result d.callback(b"") return self.request.write(data) self.start += len(data) def pauseProducing(self) -> None: pass def stopProducing(self) -> None: pass @implementer(IPullProducer) @define class _ReadRangeProducer: """ Producer that calls a read function to read a range of data, and writes to a request. """ request: Optional[Request] read_data: ReadData result: Optional[Deferred[bytes]] start: int remaining: int def resumeProducing(self) -> None: if self.result is None or self.request is None: return to_read = min(self.remaining, 65536) data = self.read_data(self.start, to_read) assert len(data) <= to_read if not data and self.remaining > 0: d, self.result = self.result, None d.errback( ValueError( f"Should be {self.remaining} bytes left, but we got an empty read" ) ) self.stopProducing() return if len(data) > self.remaining: d, self.result = self.result, None d.errback( ValueError( f"Should be {self.remaining} bytes left, but we got more than that ({len(data)})!" ) ) self.stopProducing() return self.start += len(data) self.remaining -= len(data) assert self.remaining >= 0 self.request.write(data) if self.remaining == 0: self.stopProducing() def pauseProducing(self) -> None: pass def stopProducing(self) -> None: if self.request is not None: self.request.unregisterProducer() self.request = None if self.result is not None: d = self.result self.result = None d.callback(b"") def read_range( request: Request, read_data: ReadData, share_length: int ) -> Union[Deferred[bytes], bytes]: """ Read an optional ``Range`` header, reads data appropriately via the given callable, writes the data to the request. Only parses a subset of ``Range`` headers that we support: must be set, bytes only, only a single range, the end must be explicitly specified. Raises a ``_HTTPError(http.REQUESTED_RANGE_NOT_SATISFIABLE)`` if parsing is not possible or the header isn't set. Takes a function that will do the actual reading given the start offset and a length to read. The resulting data is written to the request. """ def read_data_with_error_handling(offset: int, length: int) -> bytes: try: return read_data(offset, length) except _HTTPError as e: request.setResponseCode(e.code) # Empty read means we're done. return b"" if request.getHeader("range") is None: return _ReadAllProducer.produce_to(request, read_data_with_error_handling) range_header = parse_range_header(request.getHeader("range")) if ( range_header is None # failed to parse or range_header.units != "bytes" or len(range_header.ranges) > 1 # more than one range or range_header.ranges[0][1] is None # range without end ): raise _HTTPError(http.REQUESTED_RANGE_NOT_SATISFIABLE) offset, end = range_header.ranges[0] assert end is not None # should've exited in block above this if so # If we're being ask to read beyond the length of the share, just read # less: end = min(end, share_length) if offset >= end: # Basically we'd need to return an empty body. However, the # Content-Range header can't actually represent empty lengths... so # (mis)use 204 response code to indicate that. raise _HTTPError(http.NO_CONTENT) request.setResponseCode(http.PARTIAL_CONTENT) # Actual conversion from Python's exclusive ranges to inclusive ranges is # handled by werkzeug. request.setHeader( "content-range", ContentRange("bytes", offset, end).to_header(), ) d: Deferred[bytes] = Deferred() request.registerProducer( _ReadRangeProducer( request, read_data_with_error_handling, d, offset, end - offset ), False, ) return d def _add_error_handling(app: Klein) -> None: """Add exception handlers to a Klein app.""" @app.handle_errors(_HTTPError) def _http_error(self: Any, request: IRequest, failure: Failure) -> KleinRenderable: """Handle ``_HTTPError`` exceptions.""" assert isinstance(failure.value, _HTTPError) request.setResponseCode(failure.value.code) if failure.value.body is not None: return failure.value.body else: return b"" @app.handle_errors(CDDLValidationError) def _cddl_validation_error( self: Any, request: IRequest, failure: Failure ) -> KleinRenderable: """Handle CDDL validation errors.""" request.setResponseCode(http.BAD_REQUEST) return str(failure.value).encode("utf-8") async def read_encoded( reactor, request, schema: Schema, max_size: int = 1024 * 1024 ) -> Any: """ Read encoded request body data, decoding it with CBOR by default. Somewhat arbitrarily, limit body size to 1MiB by default. """ content_type = get_content_type(request.requestHeaders) if content_type is None: content_type = CBOR_MIME_TYPE if content_type != CBOR_MIME_TYPE: raise _HTTPError(http.UNSUPPORTED_MEDIA_TYPE) # Make sure it's not too large: request.content.seek(0, SEEK_END) size = request.content.tell() if size > max_size: raise _HTTPError(http.REQUEST_ENTITY_TOO_LARGE) request.content.seek(0, SEEK_SET) # We don't want to load the whole message into memory, cause it might # be quite large. The CDDL validator takes a read-only bytes-like # thing. Luckily, for large request bodies twisted.web will buffer the # data in a file, so we can use mmap() to get a memory view. The CDDL # validator will not make a copy, so it won't increase memory usage # beyond that. try: fd = request.content.fileno() except (ValueError, OSError): fd = -1 if fd >= 0: # It's a file, so we can use mmap() to save memory. message = mmap.mmap(fd, 0, access=mmap.ACCESS_READ) else: message = request.content.read() # Pycddl will release the GIL when validating larger documents, so # let's take advantage of multiple CPUs: decoded = await defer_to_thread(schema.validate_cbor, message, True) return decoded class HTTPServer(BaseApp): """ A HTTP interface to the storage server. """ _app = Klein() _app.url_map.converters["storage_index"] = StorageIndexConverter _add_error_handling(_app) def __init__( self, reactor: IReactorFromThreads, storage_server: StorageServer, swissnum: bytes, ): self._reactor = reactor self._storage_server = storage_server self._swissnum = swissnum # Maps storage index to StorageIndexUploads: self._uploads = UploadsInProgress() # When an upload finishes successfully, gets aborted, or times out, # make sure it gets removed from our tracking datastructure: self._storage_server.register_bucket_writer_close_handler( self._uploads.remove_write_bucket ) def get_resource(self) -> KleinResource: """Return twisted.web ``Resource`` for this object.""" return self._app.resource() def _send_encoded(self, request: Request, data: object) -> Deferred[bytes]: """ Return encoded data suitable for writing as the HTTP body response, by default using CBOR. Also sets the appropriate ``Content-Type`` header on the response. """ accept_headers = request.requestHeaders.getRawHeaders("accept") or [ CBOR_MIME_TYPE ] accept = parse_accept_header(accept_headers[0]) if accept.best == CBOR_MIME_TYPE: request.setHeader("Content-Type", CBOR_MIME_TYPE) f = TemporaryFile() cbor.dump(data, f) # type: ignore def read_data(offset: int, length: int) -> bytes: f.seek(offset) return f.read(length) return _ReadAllProducer.produce_to(request, read_data) else: # TODO Might want to optionally send JSON someday: # https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3861 raise _HTTPError(http.NOT_ACCEPTABLE) ##### Generic APIs ##### @_authorized_route(_app, set(), "/storage/v1/version", methods=["GET"]) def version(self, request: Request, authorization: SecretsDict) -> KleinRenderable: """Return version information.""" return self._send_encoded(request, self._get_version()) def _get_version(self) -> dict[bytes, Any]: """ Get the HTTP version of the storage server's version response. This differs from the Foolscap version by omitting certain obsolete fields. """ v = self._storage_server.get_version() v1_identifier = b"http://allmydata.org/tahoe/protocols/storage/v1" v1 = v[v1_identifier] return { v1_identifier: { b"maximum-immutable-share-size": v1[b"maximum-immutable-share-size"], b"maximum-mutable-share-size": v1[b"maximum-mutable-share-size"], b"available-space": v1[b"available-space"], }, b"application-version": v[b"application-version"], } ##### Immutable APIs ##### @_authorized_route( _app, {Secrets.LEASE_RENEW, Secrets.LEASE_CANCEL, Secrets.UPLOAD}, "/storage/v1/immutable/", methods=["POST"], ) @async_to_deferred async def allocate_buckets( self, request: Request, authorization: SecretsDict, storage_index: bytes ) -> KleinRenderable: """Allocate buckets.""" upload_secret = authorization[Secrets.UPLOAD] # It's just a list of up to ~256 shares, shouldn't use many bytes. info = await read_encoded( self._reactor, request, _SCHEMAS["allocate_buckets"], max_size=8192 ) # We do NOT validate the upload secret for existing bucket uploads. # Another upload may be happening in parallel, with a different upload # key. That's fine! If a client tries to _write_ to that upload, they # need to have an upload key. That does mean we leak the existence of # these parallel uploads, but if you know storage index you can # download them once upload finishes, so it's not a big deal to leak # that information. already_got, sharenum_to_bucket = self._storage_server.allocate_buckets( storage_index, renew_secret=authorization[Secrets.LEASE_RENEW], cancel_secret=authorization[Secrets.LEASE_CANCEL], sharenums=info["share-numbers"], allocated_size=info["allocated-size"], ) for share_number, bucket in sharenum_to_bucket.items(): self._uploads.add_write_bucket( storage_index, share_number, upload_secret, bucket ) return await self._send_encoded( request, {"already-have": set(already_got), "allocated": set(sharenum_to_bucket)}, ) @_authorized_route( _app, {Secrets.UPLOAD}, "/storage/v1/immutable///abort", methods=["PUT"], ) def abort_share_upload( self, request: Request, authorization: SecretsDict, storage_index: bytes, share_number: int, ) -> KleinRenderable: """Abort an in-progress immutable share upload.""" try: bucket = self._uploads.get_write_bucket( storage_index, share_number, authorization[Secrets.UPLOAD] ) except _HTTPError as e: if e.code == http.NOT_FOUND: # It may be we've already uploaded this, in which case error # should be method not allowed (405). try: self._storage_server.get_buckets(storage_index)[share_number] except KeyError: pass else: # Already uploaded, so we can't abort. raise _HTTPError(http.NOT_ALLOWED) raise # Abort the upload; this should close it which will eventually result # in self._uploads.remove_write_bucket() being called. bucket.abort() return b"" @_authorized_route( _app, {Secrets.UPLOAD}, "/storage/v1/immutable//", methods=["PATCH"], ) def write_share_data( self, request: Request, authorization: SecretsDict, storage_index: bytes, share_number: int, ) -> KleinRenderable: """Write data to an in-progress immutable upload.""" content_range = parse_content_range_header(request.getHeader("content-range")) if content_range is None or content_range.units != "bytes": request.setResponseCode(http.REQUESTED_RANGE_NOT_SATISFIABLE) return b"" bucket = self._uploads.get_write_bucket( storage_index, share_number, authorization[Secrets.UPLOAD] ) offset = content_range.start or 0 # We don't support an unspecified stop for the range: assert content_range.stop is not None # Missing body makes no sense: assert request.content is not None remaining = content_range.stop - offset finished = False while remaining > 0: data = request.content.read(min(remaining, 65536)) assert data, "uploaded data length doesn't match range" try: finished = bucket.write(offset, data) except ConflictingWriteError: request.setResponseCode(http.CONFLICT) return b"" remaining -= len(data) offset += len(data) if finished: bucket.close() request.setResponseCode(http.CREATED) else: request.setResponseCode(http.OK) required = [] for start, end, _ in bucket.required_ranges().ranges(): required.append({"begin": start, "end": end}) return self._send_encoded(request, {"required": required}) @_authorized_route( _app, set(), "/storage/v1/immutable//shares", methods=["GET"], ) def list_shares( self, request: Request, authorization: SecretsDict, storage_index: bytes ) -> KleinRenderable: """ List shares for the given storage index. """ share_numbers = set(self._storage_server.get_buckets(storage_index).keys()) return self._send_encoded(request, share_numbers) @_authorized_route( _app, set(), "/storage/v1/immutable//", methods=["GET"], ) def read_share_chunk( self, request: Request, authorization: SecretsDict, storage_index: bytes, share_number: int, ) -> KleinRenderable: """Read a chunk for an already uploaded immutable.""" request.setHeader("content-type", "application/octet-stream") try: bucket = self._storage_server.get_buckets(storage_index)[share_number] except KeyError: request.setResponseCode(http.NOT_FOUND) return b"" return read_range(request, bucket.read, bucket.get_length()) @_authorized_route( _app, {Secrets.LEASE_RENEW, Secrets.LEASE_CANCEL}, "/storage/v1/lease/", methods=["PUT"], ) def add_or_renew_lease( self, request: Request, authorization: SecretsDict, storage_index: bytes ) -> KleinRenderable: """Update the lease for an immutable or mutable share.""" if not list(self._storage_server.get_shares(storage_index)): raise _HTTPError(http.NOT_FOUND) # Checking of the renewal secret is done by the backend. self._storage_server.add_lease( storage_index, authorization[Secrets.LEASE_RENEW], authorization[Secrets.LEASE_CANCEL], ) request.setResponseCode(http.NO_CONTENT) return b"" @_authorized_route( _app, set(), "/storage/v1/immutable///corrupt", methods=["POST"], ) @async_to_deferred async def advise_corrupt_share_immutable( self, request: Request, authorization: SecretsDict, storage_index: bytes, share_number: int, ) -> KleinRenderable: """Indicate that given share is corrupt, with a text reason.""" try: bucket = self._storage_server.get_buckets(storage_index)[share_number] except KeyError: raise _HTTPError(http.NOT_FOUND) # The reason can be a string with explanation, so in theory it could be # longish? info = await read_encoded( self._reactor, request, _SCHEMAS["advise_corrupt_share"], max_size=32768, ) bucket.advise_corrupt_share(info["reason"].encode("utf-8")) return b"" ##### Mutable APIs ##### @_authorized_route( _app, {Secrets.LEASE_RENEW, Secrets.LEASE_CANCEL, Secrets.WRITE_ENABLER}, "/storage/v1/mutable//read-test-write", methods=["POST"], ) @async_to_deferred async def mutable_read_test_write( self, request: Request, authorization: SecretsDict, storage_index: bytes ) -> KleinRenderable: """Read/test/write combined operation for mutables.""" rtw_request = await read_encoded( self._reactor, request, _SCHEMAS["mutable_read_test_write"], max_size=2**48, ) secrets = ( authorization[Secrets.WRITE_ENABLER], authorization[Secrets.LEASE_RENEW], authorization[Secrets.LEASE_CANCEL], ) try: success, read_data = self._storage_server.slot_testv_and_readv_and_writev( storage_index, secrets, { k: ( [ (d["offset"], d["size"], b"eq", d["specimen"]) for d in v["test"] ], [(d["offset"], d["data"]) for d in v["write"]], v["new-length"], ) for (k, v) in rtw_request["test-write-vectors"].items() }, [(d["offset"], d["size"]) for d in rtw_request["read-vector"]], ) except BadWriteEnablerError: raise _HTTPError(http.UNAUTHORIZED) return await self._send_encoded( request, {"success": success, "data": read_data} ) @_authorized_route( _app, set(), "/storage/v1/mutable//", methods=["GET"], ) def read_mutable_chunk( self, request: Request, authorization: SecretsDict, storage_index: bytes, share_number: int, ) -> KleinRenderable: """Read a chunk from a mutable.""" request.setHeader("content-type", "application/octet-stream") try: share_length = self._storage_server.get_mutable_share_length( storage_index, share_number ) except KeyError: raise _HTTPError(http.NOT_FOUND) def read_data(offset, length): try: return self._storage_server.slot_readv( storage_index, [share_number], [(offset, length)] )[share_number][0] except KeyError: raise _HTTPError(http.NOT_FOUND) return read_range(request, read_data, share_length) @_authorized_route( _app, set(), "/storage/v1/mutable//shares", methods=["GET"], ) def enumerate_mutable_shares(self, request, authorization, storage_index): """List mutable shares for a storage index.""" shares = self._storage_server.enumerate_mutable_shares(storage_index) return self._send_encoded(request, shares) @_authorized_route( _app, set(), "/storage/v1/mutable///corrupt", methods=["POST"], ) @async_to_deferred async def advise_corrupt_share_mutable( self, request: Request, authorization: SecretsDict, storage_index: bytes, share_number: int, ) -> KleinRenderable: """Indicate that given share is corrupt, with a text reason.""" if share_number not in { shnum for (shnum, _) in self._storage_server.get_shares(storage_index) }: raise _HTTPError(http.NOT_FOUND) # The reason can be a string with explanation, so in theory it could be # longish? info = await read_encoded( self._reactor, request, _SCHEMAS["advise_corrupt_share"], max_size=32768 ) self._storage_server.advise_corrupt_share( b"mutable", storage_index, share_number, info["reason"].encode("utf-8") ) return b"" @implementer(IStreamServerEndpoint) @define class _TLSEndpointWrapper(object): """ Wrap an existing endpoint with the server-side storage TLS policy. This is useful because not all Tahoe-LAFS endpoints might be plain TCP+TLS, for example there's Tor and i2p. """ endpoint: IStreamServerEndpoint context_factory: CertificateOptions @classmethod def from_paths( cls: type[_TLSEndpointWrapper], endpoint: IStreamServerEndpoint, private_key_path: FilePath, cert_path: FilePath, ) -> "_TLSEndpointWrapper": """ Create an endpoint with the given private key and certificate paths on the filesystem. """ certificate = Certificate.loadPEM(cert_path.getContent()).original private_key = PrivateCertificate.loadPEM( cert_path.getContent() + b"\n" + private_key_path.getContent() ).privateKey.original certificate_options = CertificateOptions( privateKey=private_key, certificate=certificate ) return cls(endpoint=endpoint, context_factory=certificate_options) def listen(self, factory: IProtocolFactory) -> Deferred[IListeningPort]: return self.endpoint.listen( TLSMemoryBIOFactory(self.context_factory, False, factory) ) def build_nurl( hostname: str, port: int, swissnum: str, certificate: CryptoCertificate, subscheme: Optional[str] = None, ) -> DecodedURL: """ Construct a HTTPS NURL, given the hostname, port, server swissnum, and x509 certificate for the server. Clients can then connect to the server using this NURL. """ scheme = "pb" if subscheme is not None: scheme = f"{scheme}+{subscheme}" return DecodedURL().replace( fragment="v=1", # how we know this NURL is HTTP-based (i.e. not Foolscap) host=hostname, port=port, path=(swissnum,), userinfo=( str( get_spki_hash(certificate), "ascii", ), ), scheme=scheme, ) def listen_tls( server: HTTPServer, hostname: str, endpoint: IStreamServerEndpoint, private_key_path: FilePath, cert_path: FilePath, ) -> Deferred[tuple[DecodedURL, IListeningPort]]: """ Start a HTTPS storage server on the given port, return the NURL and the listening port. The hostname is the external IP or hostname clients will connect to, used to constrtuct the NURL; it does not modify what interfaces the server listens on. This will likely need to be updated eventually to handle Tor/i2p. """ endpoint = _TLSEndpointWrapper.from_paths(endpoint, private_key_path, cert_path) def get_nurl(listening_port: IListeningPort) -> DecodedURL: address = cast(Union[IPv4Address, IPv6Address], listening_port.getHost()) return build_nurl( hostname, address.port, str(server._swissnum, "ascii"), load_pem_x509_certificate(cert_path.getContent()), ) return endpoint.listen(Site(server.get_resource())).addCallback( lambda listening_port: (get_nurl(listening_port), listening_port) ) tahoe_lafs-1.20.0/src/allmydata/storage/immutable.py0000644000000000000000000005465413615410400017371 0ustar00""" Ported to Python 3. """ import os, stat, struct, time from collections_extended import RangeMap from foolscap.api import Referenceable from zope.interface import implementer from allmydata.interfaces import ( RIBucketWriter, RIBucketReader, ConflictingWriteError, DataTooLargeError, NoSpace, ) from allmydata.util import base32, fileutil, log from allmydata.util.assertutil import precondition from allmydata.storage.common import UnknownImmutableContainerVersionError from .immutable_schema import ( NEWEST_SCHEMA_VERSION, schema_from_version, ) # each share file (in storage/shares/$SI/$SHNUM) contains lease information # and share data. The share data is accessed by RIBucketWriter.write and # RIBucketReader.read . The lease information is not accessible through these # interfaces. # The share file has the following layout: # 0x00: share file version number, four bytes, current version is 2 # 0x04: share data length, four bytes big-endian = A # See Footnote 1 below. # 0x08: number of leases, four bytes big-endian # 0x0c: beginning of share data (see immutable.layout.WriteBucketProxy) # A+0x0c = B: first lease. Lease format is: # B+0x00: owner number, 4 bytes big-endian, 0 is reserved for no-owner # B+0x04: renew secret, 32 bytes (SHA256 + blake2b) # See Footnote 2 below. # B+0x24: cancel secret, 32 bytes (SHA256 + blake2b) # B+0x44: expiration time, 4 bytes big-endian seconds-since-epoch # B+0x48: next lease, or end of record # Footnote 1: as of Tahoe v1.3.0 this field is not used by storage servers, # but it is still filled in by storage servers in case the storage server # software gets downgraded from >= Tahoe v1.3.0 to < Tahoe v1.3.0, or the # share file is moved from one storage server to another. The value stored in # this field is truncated, so if the actual share data length is >= 2**32, # then the value stored in this field will be the actual share data length # modulo 2**32. # Footnote 2: The change between share file version number 1 and 2 is that # storage of lease secrets is changed from plaintext to hashed. This change # protects the secrets from compromises of local storage on the server: if a # plaintext cancel secret is somehow exfiltrated from the storage server, an # attacker could use it to cancel that lease and potentially cause user data # to be discarded before intended by the real owner. As of this comment, # lease cancellation is disabled because there have been at least two bugs # which leak the persisted value of the cancellation secret. If lease secrets # were stored hashed instead of plaintext then neither of these bugs would # have allowed an attacker to learn a usable cancel secret. # # Clients are free to construct these secrets however they like. The # Tahoe-LAFS client uses a SHA256-based construction. The server then uses # blake2b to hash these values for storage so that it retains no persistent # copy of the original secret. # def _fix_lease_count_format(lease_count_format): """ Turn a single character struct format string into a format string suitable for use in encoding and decoding the lease count value inside a share file, if possible. :param str lease_count_format: A single character format string like ``"B"`` or ``"L"``. :raise ValueError: If the given format string is not suitable for use encoding and decoding a lease count. :return str: A complete format string which can safely be used to encode and decode lease counts in a share file. """ if len(lease_count_format) != 1: raise ValueError( "Cannot construct ShareFile with lease_count_format={!r}; " "format must accept a single value".format( lease_count_format, ), ) # Make it big-endian with standard size so all platforms agree on the # result. fixed = ">" + lease_count_format if struct.calcsize(fixed) > 4: # There is only room for at most 4 bytes in the share file format so # we can't allow any larger formats. raise ValueError( "Cannot construct ShareFile with lease_count_format={!r}; " "size must be smaller than size of '>L'".format( lease_count_format, ), ) return fixed class ShareFile(object): """ Support interaction with persistent storage of a share. :ivar str _lease_count_format: The format string which is used to encode and decode the lease count inside the share file. As stated in the comment in this module there is room for at most 4 bytes in this part of the file. A format string that works on fewer bytes is allowed to restrict the number of leases allowed in the share file to a smaller number than could be supported by using the full 4 bytes. This is mostly of interest for testing. """ LEASE_SIZE = struct.calcsize(">L32s32sL") sharetype = "immutable" @classmethod def is_valid_header(cls, header): # type: (bytes) -> bool """ Determine if the given bytes constitute a valid header for this type of container. :param header: Some bytes from the beginning of a container. :return: ``True`` if the bytes could belong to this container, ``False`` otherwise. """ (version,) = struct.unpack(">L", header[:4]) return schema_from_version(version) is not None def __init__( self, filename, max_size=None, create=False, lease_count_format="L", schema=NEWEST_SCHEMA_VERSION, ): """ Initialize a ``ShareFile``. :param Optional[int] max_size: If given, the maximum number of bytes that this ``ShareFile`` will accept to be stored. :param bool create: If ``True``, create the file (and fail if it exists already). ``max_size`` must not be ``None`` in this case. If ``False``, open an existing file for reading. :param str lease_count_format: A format character to use to encode and decode the number of leases in the share file. There are only 4 bytes available in the file so the format must be 4 bytes or smaller. If different formats are used at different times with the same share file, the result will likely be nonsense. This parameter is intended for the test suite to use to be able to exercise values near the maximum encodeable value without having to create billions of leases. :raise ValueError: If the encoding of ``lease_count_format`` is too large or if it is not a single format character. """ precondition((max_size is not None) or (not create), max_size, create) self._lease_count_format = _fix_lease_count_format(lease_count_format) self._lease_count_size = struct.calcsize(self._lease_count_format) self.home = filename self._max_size = max_size if create: # touch the file, so later callers will see that we're working on # it. Also construct the metadata. assert not os.path.exists(self.home) fileutil.make_dirs(os.path.dirname(self.home)) self._schema = schema with open(self.home, 'wb') as f: f.write(self._schema.header(max_size)) self._lease_offset = max_size + 0x0c self._num_leases = 0 else: with open(self.home, 'rb') as f: filesize = os.path.getsize(self.home) (version, unused, num_leases) = struct.unpack(">LLL", f.read(0xc)) self._schema = schema_from_version(version) if self._schema is None: raise UnknownImmutableContainerVersionError(filename, version) self._num_leases = num_leases self._lease_offset = filesize - (num_leases * self.LEASE_SIZE) self._length = filesize - 0xc - (num_leases * self.LEASE_SIZE) self._data_offset = 0xc def get_length(self): """ Return the length of the data in the share, if we're reading. """ return self._length def unlink(self): os.unlink(self.home) def read_share_data(self, offset, length): precondition(offset >= 0) # reads beyond the end of the data are truncated. Reads that start # beyond the end of the data return an empty string. seekpos = self._data_offset+offset actuallength = max(0, min(length, self._lease_offset-seekpos)) if actuallength == 0: return b"" with open(self.home, 'rb') as f: f.seek(seekpos) return f.read(actuallength) def write_share_data(self, offset, data): length = len(data) precondition(offset >= 0, offset) if self._max_size is not None and offset+length > self._max_size: raise DataTooLargeError(self._max_size, offset, length) with open(self.home, 'rb+') as f: real_offset = self._data_offset+offset f.seek(real_offset) assert f.tell() == real_offset f.write(data) def _write_lease_record(self, f, lease_number, lease_info): offset = self._lease_offset + lease_number * self.LEASE_SIZE f.seek(offset) assert f.tell() == offset f.write(self._schema.lease_serializer.serialize(lease_info)) def _read_num_leases(self, f): f.seek(0x08) (num_leases,) = struct.unpack( self._lease_count_format, f.read(self._lease_count_size), ) return num_leases def _write_num_leases(self, f, num_leases): self._write_encoded_num_leases( f, struct.pack(self._lease_count_format, num_leases), ) def _write_encoded_num_leases(self, f, encoded_num_leases): f.seek(0x08) f.write(encoded_num_leases) def _truncate_leases(self, f, num_leases): f.truncate(self._lease_offset + num_leases * self.LEASE_SIZE) def get_leases(self): """Yields a LeaseInfo instance for all leases.""" with open(self.home, 'rb') as f: (version, unused, num_leases) = struct.unpack(">LLL", f.read(0xc)) f.seek(self._lease_offset) for i in range(num_leases): data = f.read(self.LEASE_SIZE) if data: yield self._schema.lease_serializer.unserialize(data) def add_lease(self, lease_info): with open(self.home, 'rb+') as f: num_leases = self._read_num_leases(f) # Before we write the new lease record, make sure we can encode # the new lease count. new_lease_count = struct.pack(self._lease_count_format, num_leases + 1) self._write_lease_record(f, num_leases, lease_info) self._write_encoded_num_leases(f, new_lease_count) def renew_lease(self, renew_secret, new_expire_time, allow_backdate=False): # type: (bytes, int, bool) -> None """ Update the expiration time on an existing lease. :param allow_backdate: If ``True`` then allow the new expiration time to be before the current expiration time. Otherwise, make no change when this is the case. :raise IndexError: If there is no lease matching the given renew secret. """ for i,lease in enumerate(self.get_leases()): if lease.is_renew_secret(renew_secret): # yup. See if we need to update the owner time. if allow_backdate or new_expire_time > lease.get_expiration_time(): # yes lease = lease.renew(new_expire_time) with open(self.home, 'rb+') as f: self._write_lease_record(f, i, lease) return raise IndexError("unable to renew non-existent lease") def add_or_renew_lease(self, available_space, lease_info): """ Renew an existing lease if possible, otherwise allocate a new one. :param int available_space: The maximum number of bytes of storage to commit in this operation. If more than this number of bytes is required, raise ``NoSpace`` instead. :param LeaseInfo lease_info: The details of the lease to renew or add. :raise NoSpace: If more than ``available_space`` bytes is required to complete the operation. In this case, no lease is added. :return: ``None`` """ try: self.renew_lease(lease_info.renew_secret, lease_info.get_expiration_time()) except IndexError: if lease_info.immutable_size() > available_space: raise NoSpace() self.add_lease(lease_info) def cancel_lease(self, cancel_secret): """Remove a lease with the given cancel_secret. If the last lease is cancelled, the file will be removed. Return the number of bytes that were freed (by truncating the list of leases, and possibly by deleting the file. Raise IndexError if there was no lease with the given cancel_secret. """ leases = list(self.get_leases()) num_leases_removed = 0 for i,lease in enumerate(leases): if lease.is_cancel_secret(cancel_secret): leases[i] = None num_leases_removed += 1 if not num_leases_removed: raise IndexError("unable to find matching lease to cancel") if num_leases_removed: # pack and write out the remaining leases. We write these out in # the same order as they were added, so that if we crash while # doing this, we won't lose any non-cancelled leases. leases = [l for l in leases if l] # remove the cancelled leases with open(self.home, 'rb+') as f: for i, lease in enumerate(leases): self._write_lease_record(f, i, lease) self._write_num_leases(f, len(leases)) self._truncate_leases(f, len(leases)) space_freed = self.LEASE_SIZE * num_leases_removed if not len(leases): space_freed += os.stat(self.home)[stat.ST_SIZE] self.unlink() return space_freed class BucketWriter(object): """ Keep track of the process of writing to a ShareFile. """ def __init__(self, ss, incominghome, finalhome, max_size, lease_info, clock): self.ss = ss self.incominghome = incominghome self.finalhome = finalhome self._max_size = max_size # don't allow the client to write more than this self.closed = False self.throw_out_all_data = False self._sharefile = ShareFile(incominghome, create=True, max_size=max_size) # also, add our lease to the file now, so that other ones can be # added by simultaneous uploaders self._sharefile.add_lease(lease_info) self._already_written = RangeMap() self._clock = clock self._timeout = clock.callLater(30 * 60, self._abort_due_to_timeout) def required_ranges(self): # type: () -> RangeMap """ Return which ranges still need to be written. """ result = RangeMap() result.set(True, 0, self._max_size) for start, end, _ in self._already_written.ranges(): result.delete(start, end) return result def allocated_size(self): return self._max_size def write(self, offset, data): # type: (int, bytes) -> bool """ Write data at given offset, return whether the upload is complete. """ # Delay the timeout, since we received data; if we get an # AlreadyCancelled error, that means there's a bug in the client and # write() was called after close(). self._timeout.reset(30 * 60) start = self._clock.seconds() precondition(not self.closed) if self.throw_out_all_data: return False # Make sure we're not conflicting with existing data: end = offset + len(data) for (chunk_start, chunk_stop, _) in self._already_written.ranges(offset, end): chunk_len = chunk_stop - chunk_start actual_chunk = self._sharefile.read_share_data(chunk_start, chunk_len) writing_chunk = data[chunk_start - offset:chunk_stop - offset] if actual_chunk != writing_chunk: raise ConflictingWriteError( "Chunk {}-{} doesn't match already written data.".format(chunk_start, chunk_stop) ) self._sharefile.write_share_data(offset, data) self._already_written.set(True, offset, end) self.ss.add_latency("write", self._clock.seconds() - start) self.ss.count("write") return self._is_finished() def _is_finished(self): """ Return whether the whole thing has been written. """ return sum([mr.stop - mr.start for mr in self._already_written.ranges()]) == self._max_size def close(self): # This can't actually be enabled, because it's not backwards compatible # with old Foolscap clients. # assert self._is_finished() precondition(not self.closed) self._timeout.cancel() start = self._clock.seconds() fileutil.make_dirs(os.path.dirname(self.finalhome)) fileutil.rename(self.incominghome, self.finalhome) try: # self.incominghome is like storage/shares/incoming/ab/abcde/4 . # We try to delete the parent (.../ab/abcde) to avoid leaving # these directories lying around forever, but the delete might # fail if we're working on another share for the same storage # index (like ab/abcde/5). The alternative approach would be to # use a hierarchy of objects (PrefixHolder, BucketHolder, # ShareWriter), each of which is responsible for a single # directory on disk, and have them use reference counting of # their children to know when they should do the rmdir. This # approach is simpler, but relies on os.rmdir refusing to delete # a non-empty directory. Do *not* use fileutil.rm_dir() here! os.rmdir(os.path.dirname(self.incominghome)) # we also delete the grandparent (prefix) directory, .../ab , # again to avoid leaving directories lying around. This might # fail if there is another bucket open that shares a prefix (like # ab/abfff). os.rmdir(os.path.dirname(os.path.dirname(self.incominghome))) # we leave the great-grandparent (incoming/) directory in place. except EnvironmentError: # ignore the "can't rmdir because the directory is not empty" # exceptions, those are normal consequences of the # above-mentioned conditions. pass self._sharefile = None self.closed = True filelen = os.stat(self.finalhome)[stat.ST_SIZE] self.ss.bucket_writer_closed(self, filelen) self.ss.add_latency("close", self._clock.seconds() - start) self.ss.count("close") def disconnected(self): if not self.closed: self.abort() def _abort_due_to_timeout(self): """ Called if we run out of time. """ log.msg("storage: aborting sharefile %s due to timeout" % self.incominghome, facility="tahoe.storage", level=log.UNUSUAL) self.abort() def abort(self): log.msg("storage: aborting sharefile %s" % self.incominghome, facility="tahoe.storage", level=log.UNUSUAL) self.ss.count("abort") if self.closed: return os.remove(self.incominghome) # if we were the last share to be moved, remove the incoming/ # directory that was our parent parentdir = os.path.split(self.incominghome)[0] if not os.listdir(parentdir): os.rmdir(parentdir) self._sharefile = None # We are now considered closed for further writing. We must tell # the storage server about this so that it stops expecting us to # use the space it allocated for us earlier. self.closed = True self.ss.bucket_writer_closed(self, 0) # Cancel timeout if it wasn't already cancelled. if self._timeout.active(): self._timeout.cancel() @implementer(RIBucketWriter) class FoolscapBucketWriter(Referenceable): # type: ignore # warner/foolscap#78 """ Foolscap-specific BucketWriter. """ def __init__(self, bucket_writer): self._bucket_writer = bucket_writer def remote_write(self, offset, data): self._bucket_writer.write(offset, data) def remote_close(self): return self._bucket_writer.close() def remote_abort(self): return self._bucket_writer.abort() class BucketReader(object): """ Manage the process for reading from a ``ShareFile``. """ def __init__(self, ss, sharefname, storage_index=None, shnum=None): self.ss = ss self._share_file = ShareFile(sharefname) self.storage_index = storage_index self.shnum = shnum def __repr__(self): return "<%s %s %s>" % (self.__class__.__name__, base32.b2a(self.storage_index[:8])[:12].decode(), self.shnum) def read(self, offset, length): start = time.time() data = self._share_file.read_share_data(offset, length) self.ss.add_latency("read", time.time() - start) self.ss.count("read") return data def advise_corrupt_share(self, reason): return self.ss.advise_corrupt_share(b"immutable", self.storage_index, self.shnum, reason) def get_length(self): """ Return the length of the data in the share. """ return self._share_file.get_length() @implementer(RIBucketReader) class FoolscapBucketReader(Referenceable): # type: ignore # warner/foolscap#78 """ Foolscap wrapper for ``BucketReader`` """ def __init__(self, bucket_reader): self._bucket_reader = bucket_reader def remote_read(self, offset, length): return self._bucket_reader.read(offset, length) def remote_advise_corrupt_share(self, reason): return self._bucket_reader.advise_corrupt_share(reason) tahoe_lafs-1.20.0/src/allmydata/storage/immutable_schema.py0000644000000000000000000000371513615410400020701 0ustar00""" Ported to Python 3. """ import struct import attr from .lease_schema import ( v1_immutable, v2_immutable, ) @attr.s(frozen=True) class _Schema(object): """ Implement encoding and decoding for multiple versions of the immutable container schema. :ivar int version: the version number of the schema this object supports :ivar lease_serializer: an object that is responsible for lease serialization and unserialization """ version = attr.ib() lease_serializer = attr.ib() def header(self, max_size): # type: (int) -> bytes """ Construct a container header. :param max_size: the maximum size the container can hold :return: the header bytes """ # The second field -- the four-byte share data length -- is no longer # used as of Tahoe v1.3.0, but we continue to write it in there in # case someone downgrades a storage server from >= Tahoe-1.3.0 to < # Tahoe-1.3.0, or moves a share file from one server to another, # etc. We do saturation -- a share data length larger than 2**32-1 # (what can fit into the field) is marked as the largest length that # can fit into the field. That way, even if this does happen, the old # < v1.3.0 server will still allow clients to read the first part of # the share. return struct.pack(">LLL", self.version, min(2**32 - 1, max_size), 0) ALL_SCHEMAS = { _Schema(version=2, lease_serializer=v2_immutable), _Schema(version=1, lease_serializer=v1_immutable), } ALL_SCHEMA_VERSIONS = {schema.version for schema in ALL_SCHEMAS} NEWEST_SCHEMA_VERSION = max(ALL_SCHEMAS, key=lambda schema: schema.version) def schema_from_version(version): # (int) -> Optional[type] """ Find the schema object that corresponds to a certain version number. """ for schema in ALL_SCHEMAS: if schema.version == version: return schema return None tahoe_lafs-1.20.0/src/allmydata/storage/lease.py0000644000000000000000000003072413615410400016473 0ustar00""" Ported to Python 3. """ import struct, time import attr from zope.interface import ( Interface, implementer, ) from twisted.python.components import ( proxyForInterface, ) from allmydata.util.hashutil import timing_safe_compare from allmydata.util import base32 # struct format for representation of a lease in an immutable share IMMUTABLE_FORMAT = ">L32s32sL" # struct format for representation of a lease in a mutable share MUTABLE_FORMAT = ">LL32s32s20s" class ILeaseInfo(Interface): """ Represent a marker attached to a share that indicates that share should be retained for some amount of time. Typically clients will create and renew leases on their shares as a way to inform storage servers that there is still interest in those shares. A share may have more than one lease. If all leases on a share have expiration times in the past then the storage server may take this as a strong hint that no one is interested in the share anymore and therefore the share may be deleted to reclaim the space. """ def renew(new_expire_time): """ Create a new ``ILeaseInfo`` with the given expiration time. :param Union[int, float] new_expire_time: The expiration time the new ``ILeaseInfo`` will have. :return: The new ``ILeaseInfo`` provider with the new expiration time. """ def get_expiration_time(): """ :return Union[int, float]: this lease's expiration time """ def get_grant_renew_time_time(): """ :return Union[int, float]: a guess about the last time this lease was renewed """ def get_age(): """ :return Union[int, float]: a guess about how long it has been since this lease was renewed """ def to_immutable_data(): """ :return bytes: a serialized representation of this lease suitable for inclusion in an immutable container """ def to_mutable_data(): """ :return bytes: a serialized representation of this lease suitable for inclusion in a mutable container """ def immutable_size(): """ :return int: the size of the serialized representation of this lease in an immutable container """ def mutable_size(): """ :return int: the size of the serialized representation of this lease in a mutable container """ def is_renew_secret(candidate_secret): """ :return bool: ``True`` if the given byte string is this lease's renew secret, ``False`` otherwise """ def present_renew_secret(): """ :return str: Text which could reasonably be shown to a person representing this lease's renew secret. """ def is_cancel_secret(candidate_secret): """ :return bool: ``True`` if the given byte string is this lease's cancel secret, ``False`` otherwise """ def present_cancel_secret(): """ :return str: Text which could reasonably be shown to a person representing this lease's cancel secret. """ @implementer(ILeaseInfo) @attr.s(frozen=True) class LeaseInfo(object): """ Represent the details of one lease, a marker which is intended to inform the storage server how long to store a particular share. """ owner_num = attr.ib(default=None) # Don't put secrets into the default string representation. This makes it # slightly less likely the secrets will accidentally be leaked to # someplace they're not meant to be. renew_secret = attr.ib(default=None, repr=False) cancel_secret = attr.ib(default=None, repr=False) _expiration_time = attr.ib(default=None) nodeid = attr.ib(default=None) @nodeid.validator def _validate_nodeid(self, attribute, value): if value is not None: if not isinstance(value, bytes): raise ValueError( "nodeid value must be bytes, not {!r}".format(value), ) if len(value) != 20: raise ValueError( "nodeid value must be 20 bytes long, not {!r}".format(value), ) return None def get_expiration_time(self): # type: () -> float """ Retrieve a POSIX timestamp representing the time at which this lease is set to expire. """ return self._expiration_time def renew(self, new_expire_time): # type: (float) -> LeaseInfo """ Create a new lease the same as this one but with a new expiration time. :param new_expire_time: The new expiration time. :return: The new lease info. """ return attr.assoc( self, # MyPy is unhappy with this; long-term solution is likely switch to # new @frozen attrs API, with type annotations. _expiration_time=new_expire_time, # type: ignore[call-arg] ) def is_renew_secret(self, candidate_secret): # type: (bytes) -> bool """ Check a string to see if it is the correct renew secret. :return: ``True`` if it is the correct renew secret, ``False`` otherwise. """ return timing_safe_compare(self.renew_secret, candidate_secret) def present_renew_secret(self): # type: () -> str """ Return the renew secret, base32-encoded. """ return str(base32.b2a(self.renew_secret), "utf-8") def is_cancel_secret(self, candidate_secret): # type: (bytes) -> bool """ Check a string to see if it is the correct cancel secret. :return: ``True`` if it is the correct cancel secret, ``False`` otherwise. """ return timing_safe_compare(self.cancel_secret, candidate_secret) def present_cancel_secret(self): # type: () -> str """ Return the cancel secret, base32-encoded. """ return str(base32.b2a(self.cancel_secret), "utf-8") def get_grant_renew_time_time(self): # hack, based upon fixed 31day expiration period return self._expiration_time - 31*24*60*60 def get_age(self): return time.time() - self.get_grant_renew_time_time() @classmethod def from_immutable_data(cls, data): """ Create a new instance from the encoded data given. :param data: A lease serialized using the immutable-share-file format. """ names = [ "owner_num", "renew_secret", "cancel_secret", "expiration_time", ] values = struct.unpack(IMMUTABLE_FORMAT, data) return cls(nodeid=None, **dict(zip(names, values))) def immutable_size(self): """ :return int: The size, in bytes, of the representation of this lease in an immutable share file. """ return struct.calcsize(IMMUTABLE_FORMAT) def mutable_size(self): """ :return int: The size, in bytes, of the representation of this lease in a mutable share file. """ return struct.calcsize(MUTABLE_FORMAT) def to_immutable_data(self): return struct.pack(IMMUTABLE_FORMAT, self.owner_num, self.renew_secret, self.cancel_secret, int(self._expiration_time)) def to_mutable_data(self): return struct.pack(MUTABLE_FORMAT, self.owner_num, int(self._expiration_time), self.renew_secret, self.cancel_secret, self.nodeid) @classmethod def from_mutable_data(cls, data): """ Create a new instance from the encoded data given. :param data: A lease serialized using the mutable-share-file format. """ names = [ "owner_num", "expiration_time", "renew_secret", "cancel_secret", "nodeid", ] values = struct.unpack(MUTABLE_FORMAT, data) return cls(**dict(zip(names, values))) @attr.s(frozen=True) class HashedLeaseInfo(proxyForInterface(ILeaseInfo, "_lease_info")): # type: ignore # unsupported dynamic base class """ A ``HashedLeaseInfo`` wraps lease information in which the secrets have been hashed. """ _lease_info = attr.ib() _hash = attr.ib() # proxyForInterface will take care of forwarding all methods on ILeaseInfo # to `_lease_info`. Here we override a few of those methods to adjust # their behavior to make them suitable for use with hashed secrets. def renew(self, new_expire_time): # Preserve the HashedLeaseInfo wrapper around the renewed LeaseInfo. return attr.assoc( self, _lease_info=super(HashedLeaseInfo, self).renew(new_expire_time), ) def is_renew_secret(self, candidate_secret): # type: (bytes) -> bool """ Hash the candidate secret and compare the result to the stored hashed secret. """ return super(HashedLeaseInfo, self).is_renew_secret(self._hash(candidate_secret)) def present_renew_secret(self): # type: () -> str """ Present the hash of the secret with a marker indicating it is a hash. """ return u"hash:" + super(HashedLeaseInfo, self).present_renew_secret() def is_cancel_secret(self, candidate_secret): # type: (bytes) -> bool """ Hash the candidate secret and compare the result to the stored hashed secret. """ if isinstance(candidate_secret, _HashedCancelSecret): # Someone read it off of this object in this project - probably # the lease crawler - and is just trying to use it to identify # which lease it wants to operate on. Avoid re-hashing the value. # # It is important that this codepath is only availably internally # for this process to talk to itself. If it were to be exposed to # clients over the network, they could just provide the hashed # value to avoid having to ever learn the original value. hashed_candidate = candidate_secret.hashed_value else: # It is not yet hashed so hash it. hashed_candidate = self._hash(candidate_secret) return super(HashedLeaseInfo, self).is_cancel_secret(hashed_candidate) def present_cancel_secret(self): # type: () -> str """ Present the hash of the secret with a marker indicating it is a hash. """ return u"hash:" + super(HashedLeaseInfo, self).present_cancel_secret() @property def owner_num(self): return self._lease_info.owner_num @property def nodeid(self): return self._lease_info.nodeid @property def cancel_secret(self): """ Give back an opaque wrapper around the hashed cancel secret which can later be presented for a succesful equality comparison. """ # We don't *have* the cancel secret. We hashed it and threw away the # original. That's good. It does mean that some code that runs # in-process with the storage service (LeaseCheckingCrawler) runs into # some difficulty. That code wants to cancel leases and does so using # the same interface that faces storage clients (or would face them, # if lease cancellation were exposed). # # Since it can't use the hashed secret to cancel a lease (that's the # point of the hashing) and we don't have the unhashed secret to give # it, instead we give it a marker that `cancel_lease` will recognize. # On recognizing it, if the hashed value given matches the hashed # value stored it is considered a match and the lease can be # cancelled. # # This isn't great. Maybe the internal and external consumers of # cancellation should use different interfaces. return _HashedCancelSecret(self._lease_info.cancel_secret) @attr.s(frozen=True) class _HashedCancelSecret(object): """ ``_HashedCancelSecret`` is a marker type for an already-hashed lease cancel secret that lets internal lease cancellers bypass the hash-based protection that's imposed on external lease cancellers. :ivar bytes hashed_value: The already-hashed secret. """ hashed_value = attr.ib() tahoe_lafs-1.20.0/src/allmydata/storage/lease_schema.py0000644000000000000000000000734713615410400020020 0ustar00""" Ported to Python 3. """ from typing import Union import attr from nacl.hash import blake2b from nacl.encoding import RawEncoder from .lease import ( LeaseInfo, HashedLeaseInfo, ) @attr.s(frozen=True) class CleartextLeaseSerializer(object): """ Serialize and unserialize leases with cleartext secrets. """ _to_data = attr.ib() _from_data = attr.ib() def serialize(self, lease): # type: (LeaseInfo) -> bytes """ Represent the given lease as bytes with cleartext secrets. """ if isinstance(lease, LeaseInfo): return self._to_data(lease) raise ValueError( "ShareFile v1 schema only supports LeaseInfo, not {!r}".format( lease, ), ) def unserialize(self, data): # type: (bytes) -> LeaseInfo """ Load a lease with cleartext secrets from the given bytes representation. """ # In v1 of the immutable schema lease secrets are stored plaintext. # So load the data into a plain LeaseInfo which works on plaintext # secrets. return self._from_data(data) @attr.s(frozen=True) class HashedLeaseSerializer(object): _to_data = attr.ib() _from_data = attr.ib() @classmethod def _hash_secret(cls, secret): # type: (bytes) -> bytes """ Hash a lease secret for storage. """ return blake2b(secret, digest_size=32, encoder=RawEncoder) @classmethod def _hash_lease_info(cls, lease_info): # type: (LeaseInfo) -> HashedLeaseInfo """ Hash the cleartext lease info secrets into a ``HashedLeaseInfo``. """ if not isinstance(lease_info, LeaseInfo): # Provide a little safety against misuse, especially an attempt to # re-hash an already-hashed lease info which is represented as a # different type. raise TypeError( "Can only hash LeaseInfo, not {!r}".format(lease_info), ) # Hash the cleartext secrets in the lease info and wrap the result in # a new type. return HashedLeaseInfo( attr.assoc( lease_info, renew_secret=cls._hash_secret(lease_info.renew_secret), cancel_secret=cls._hash_secret(lease_info.cancel_secret), ), cls._hash_secret, ) def serialize(self, lease: Union[LeaseInfo, HashedLeaseInfo]) -> bytes: if isinstance(lease, LeaseInfo): # v2 of the immutable schema stores lease secrets hashed. If # we're given a LeaseInfo then it holds plaintext secrets. Hash # them before trying to serialize. lease = self._hash_lease_info(lease) if isinstance(lease, HashedLeaseInfo): return self._to_data(lease) raise ValueError( "ShareFile v2 schema cannot represent lease {!r}".format( lease, ), ) def unserialize(self, data): # type: (bytes) -> HashedLeaseInfo # In v2 of the immutable schema lease secrets are stored hashed. Wrap # a LeaseInfo in a HashedLeaseInfo so it can supply the correct # interpretation for those values. return HashedLeaseInfo(self._from_data(data), self._hash_secret) v1_immutable = CleartextLeaseSerializer( LeaseInfo.to_immutable_data, LeaseInfo.from_immutable_data, ) v2_immutable = HashedLeaseSerializer( HashedLeaseInfo.to_immutable_data, LeaseInfo.from_immutable_data, ) v1_mutable = CleartextLeaseSerializer( LeaseInfo.to_mutable_data, LeaseInfo.from_mutable_data, ) v2_mutable = HashedLeaseSerializer( HashedLeaseInfo.to_mutable_data, LeaseInfo.from_mutable_data, ) tahoe_lafs-1.20.0/src/allmydata/storage/mutable.py0000644000000000000000000004474313615410400017041 0ustar00""" Ported to Python 3. """ import os, stat, struct from allmydata.interfaces import ( BadWriteEnablerError, NoSpace, ) from allmydata.util import idlib, log from allmydata.util.assertutil import precondition from allmydata.util.hashutil import timing_safe_compare from allmydata.storage.lease import LeaseInfo from allmydata.storage.common import UnknownMutableContainerVersionError, \ DataTooLargeError from allmydata.mutable.layout import MAX_MUTABLE_SHARE_SIZE from .mutable_schema import ( NEWEST_SCHEMA_VERSION, schema_from_header, ) # the MutableShareFile is like the ShareFile, but used for mutable data. It # has a different layout. See docs/mutable.txt for more details. # # offset size name # 1 0 32 magic verstr "tahoe mutable container v1" plus binary # 2 32 20 write enabler's nodeid # 3 52 32 write enabler # 4 84 8 data size (actual share data present) (a) # 5 92 8 offset of (8) count of extra leases (after data) # 6 100 368 four leases, 92 bytes each # 0 4 ownerid (0 means "no lease here") # 4 4 expiration timestamp # 8 32 renewal token # 40 32 cancel token # 72 20 nodeid which accepted the tokens # 7 468 (a) data # 8 ?? 4 count of extra leases # 9 ?? n*92 extra leases # The struct module doc says that L's are 4 bytes in size., and that Q's are # 8 bytes in size. Since compatibility depends upon this, double-check it. assert struct.calcsize(">L") == 4, struct.calcsize(">L") assert struct.calcsize(">Q") == 8, struct.calcsize(">Q") class MutableShareFile(object): sharetype = "mutable" DATA_LENGTH_OFFSET = struct.calcsize(">32s20s32s") EXTRA_LEASE_OFFSET = DATA_LENGTH_OFFSET + 8 HEADER_SIZE = struct.calcsize(">32s20s32sQQ") # doesn't include leases LEASE_SIZE = struct.calcsize(">LL32s32s20s") assert LEASE_SIZE == 92 DATA_OFFSET = HEADER_SIZE + 4*LEASE_SIZE assert DATA_OFFSET == 468, DATA_OFFSET # our sharefiles share with a recognizable string, plus some random # binary data to reduce the chance that a regular text file will look # like a sharefile. MAX_SIZE = MAX_MUTABLE_SHARE_SIZE # TODO: decide upon a policy for max share size @classmethod def is_valid_header(cls, header): # type: (bytes) -> bool """ Determine if the given bytes constitute a valid header for this type of container. :param header: Some bytes from the beginning of a container. :return: ``True`` if the bytes could belong to this container, ``False`` otherwise. """ return schema_from_header(header) is not None def __init__(self, filename, parent=None, schema=NEWEST_SCHEMA_VERSION): self.home = filename if os.path.exists(self.home): # we don't cache anything, just check the magic with open(self.home, 'rb') as f: header = f.read(self.HEADER_SIZE) self._schema = schema_from_header(header) if self._schema is None: raise UnknownMutableContainerVersionError(filename, header) else: self._schema = schema self.parent = parent # for logging def log(self, *args, **kwargs): return self.parent.log(*args, **kwargs) def create(self, my_nodeid, write_enabler): assert not os.path.exists(self.home) with open(self.home, 'wb') as f: f.write(self._schema.header(my_nodeid, write_enabler)) def unlink(self): os.unlink(self.home) def _read_data_length(self, f): f.seek(self.DATA_LENGTH_OFFSET) (data_length,) = struct.unpack(">Q", f.read(8)) return data_length def _write_data_length(self, f, data_length): f.seek(self.DATA_LENGTH_OFFSET) f.write(struct.pack(">Q", data_length)) def _read_share_data(self, f, offset, length): precondition(offset >= 0) precondition(length >= 0) data_length = self._read_data_length(f) if offset+length > data_length: # reads beyond the end of the data are truncated. Reads that # start beyond the end of the data return an empty string. length = max(0, data_length-offset) if length == 0: return b"" precondition(offset+length <= data_length) f.seek(self.DATA_OFFSET+offset) data = f.read(length) return data def _read_extra_lease_offset(self, f): f.seek(self.EXTRA_LEASE_OFFSET) (extra_lease_offset,) = struct.unpack(">Q", f.read(8)) return extra_lease_offset def _write_extra_lease_offset(self, f, offset): f.seek(self.EXTRA_LEASE_OFFSET) f.write(struct.pack(">Q", offset)) def _read_num_extra_leases(self, f): offset = self._read_extra_lease_offset(f) f.seek(offset) (num_extra_leases,) = struct.unpack(">L", f.read(4)) return num_extra_leases def _write_num_extra_leases(self, f, num_leases): extra_lease_offset = self._read_extra_lease_offset(f) f.seek(extra_lease_offset) f.write(struct.pack(">L", num_leases)) def _change_container_size(self, f, new_container_size): if new_container_size > self.MAX_SIZE: raise DataTooLargeError() old_extra_lease_offset = self._read_extra_lease_offset(f) new_extra_lease_offset = self.DATA_OFFSET + new_container_size if new_extra_lease_offset < old_extra_lease_offset: # TODO: allow containers to shrink. For now they remain large. return num_extra_leases = self._read_num_extra_leases(f) f.seek(old_extra_lease_offset) leases_size = 4 + num_extra_leases * self.LEASE_SIZE extra_lease_data = f.read(leases_size) # Zero out the old lease info (in order to minimize the chance that # it could accidentally be exposed to a reader later, re #1528). f.seek(old_extra_lease_offset) f.write(b'\x00' * leases_size) f.flush() # An interrupt here will corrupt the leases. f.seek(new_extra_lease_offset) f.write(extra_lease_data) self._write_extra_lease_offset(f, new_extra_lease_offset) def _write_share_data(self, f, offset, data): length = len(data) precondition(offset >= 0) data_length = self._read_data_length(f) extra_lease_offset = self._read_extra_lease_offset(f) if offset+length >= data_length: # They are expanding their data size. if self.DATA_OFFSET+offset+length > extra_lease_offset: # TODO: allow containers to shrink. For now, they remain # large. # Their new data won't fit in the current container, so we # have to move the leases. With luck, they're expanding it # more than the size of the extra lease block, which will # minimize the corrupt-the-share window self._change_container_size(f, offset+length) extra_lease_offset = self._read_extra_lease_offset(f) # an interrupt here is ok.. the container has been enlarged # but the data remains untouched assert self.DATA_OFFSET+offset+length <= extra_lease_offset # Their data now fits in the current container. We must write # their new data and modify the recorded data size. # Fill any newly exposed empty space with 0's. if offset > data_length: f.seek(self.DATA_OFFSET+data_length) f.write(b'\x00'*(offset - data_length)) f.flush() new_data_length = offset+length self._write_data_length(f, new_data_length) # an interrupt here will result in a corrupted share # now all that's left to do is write out their data f.seek(self.DATA_OFFSET+offset) f.write(data) return def _write_lease_record(self, f, lease_number, lease_info): extra_lease_offset = self._read_extra_lease_offset(f) num_extra_leases = self._read_num_extra_leases(f) if lease_number < 4: offset = self.HEADER_SIZE + lease_number * self.LEASE_SIZE elif (lease_number-4) < num_extra_leases: offset = (extra_lease_offset + 4 + (lease_number-4)*self.LEASE_SIZE) else: # must add an extra lease record self._write_num_extra_leases(f, num_extra_leases+1) offset = (extra_lease_offset + 4 + (lease_number-4)*self.LEASE_SIZE) f.seek(offset) assert f.tell() == offset f.write(self._schema.lease_serializer.serialize(lease_info)) def _read_lease_record(self, f, lease_number): # returns a LeaseInfo instance, or None extra_lease_offset = self._read_extra_lease_offset(f) num_extra_leases = self._read_num_extra_leases(f) if lease_number < 4: offset = self.HEADER_SIZE + lease_number * self.LEASE_SIZE elif (lease_number-4) < num_extra_leases: offset = (extra_lease_offset + 4 + (lease_number-4)*self.LEASE_SIZE) else: raise IndexError("No such lease number %d" % lease_number) f.seek(offset) assert f.tell() == offset data = f.read(self.LEASE_SIZE) lease_info = self._schema.lease_serializer.unserialize(data) if lease_info.owner_num == 0: return None return lease_info def _get_num_lease_slots(self, f): # how many places do we have allocated for leases? Not all of them # are filled. num_extra_leases = self._read_num_extra_leases(f) return 4+num_extra_leases def _get_first_empty_lease_slot(self, f): # return an int with the index of an empty slot, or None if we do not # currently have an empty slot for i in range(self._get_num_lease_slots(f)): if self._read_lease_record(f, i) is None: return i return None def get_leases(self): """Yields a LeaseInfo instance for all leases.""" with open(self.home, 'rb') as f: for i, lease in self._enumerate_leases(f): yield lease def _enumerate_leases(self, f): for i in range(self._get_num_lease_slots(f)): try: data = self._read_lease_record(f, i) if data is not None: yield i,data except IndexError: return def add_lease(self, available_space, lease_info): """ Add a new lease to this share. :param int available_space: The maximum number of bytes of storage to commit in this operation. If more than this number of bytes is required, raise ``NoSpace`` instead. :raise NoSpace: If more than ``available_space`` bytes is required to complete the operation. In this case, no lease is added. :return: ``None`` """ precondition(lease_info.owner_num != 0) # 0 means "no lease here" with open(self.home, 'rb+') as f: num_lease_slots = self._get_num_lease_slots(f) empty_slot = self._get_first_empty_lease_slot(f) if empty_slot is not None: self._write_lease_record(f, empty_slot, lease_info) else: if lease_info.mutable_size() > available_space: raise NoSpace() self._write_lease_record(f, num_lease_slots, lease_info) def renew_lease(self, renew_secret, new_expire_time, allow_backdate=False): # type: (bytes, int, bool) -> None """ Update the expiration time on an existing lease. :param allow_backdate: If ``True`` then allow the new expiration time to be before the current expiration time. Otherwise, make no change when this is the case. :raise IndexError: If there is no lease matching the given renew secret. """ accepting_nodeids = set() with open(self.home, 'rb+') as f: for (leasenum,lease) in self._enumerate_leases(f): if lease.is_renew_secret(renew_secret): # yup. See if we need to update the owner time. if allow_backdate or new_expire_time > lease.get_expiration_time(): # yes lease = lease.renew(new_expire_time) self._write_lease_record(f, leasenum, lease) return accepting_nodeids.add(lease.nodeid) # Return the accepting_nodeids set, to give the client a chance to # update the leases on a share which has been migrated from its # original server to a new one. msg = ("Unable to renew non-existent lease. I have leases accepted by" " nodeids: ") msg += ",".join([("'%s'" % idlib.nodeid_b2a(anid)) for anid in accepting_nodeids]) msg += " ." raise IndexError(msg) def add_or_renew_lease(self, available_space, lease_info): precondition(lease_info.owner_num != 0) # 0 means "no lease here" try: self.renew_lease(lease_info.renew_secret, lease_info.get_expiration_time()) except IndexError: self.add_lease(available_space, lease_info) def cancel_lease(self, cancel_secret): """Remove any leases with the given cancel_secret. If the last lease is cancelled, the file will be removed. Return the number of bytes that were freed (by truncating the list of leases, and possibly by deleting the file. Raise IndexError if there was no lease with the given cancel_secret.""" accepting_nodeids = set() modified = 0 remaining = 0 blank_lease = LeaseInfo(owner_num=0, renew_secret=b"\x00"*32, cancel_secret=b"\x00"*32, expiration_time=0, nodeid=b"\x00"*20) with open(self.home, 'rb+') as f: for (leasenum,lease) in self._enumerate_leases(f): accepting_nodeids.add(lease.nodeid) if lease.is_cancel_secret(cancel_secret): self._write_lease_record(f, leasenum, blank_lease) modified += 1 else: remaining += 1 if modified: freed_space = self._pack_leases(f) f.close() if not remaining: freed_space += os.stat(self.home)[stat.ST_SIZE] self.unlink() return freed_space msg = ("Unable to cancel non-existent lease. I have leases " "accepted by nodeids: ") msg += ",".join([("'%s'" % idlib.nodeid_b2a(anid)) for anid in accepting_nodeids]) msg += " ." raise IndexError(msg) def _pack_leases(self, f): # TODO: reclaim space from cancelled leases return 0 def _read_write_enabler_and_nodeid(self, f): f.seek(0) data = f.read(self.HEADER_SIZE) (magic, write_enabler_nodeid, write_enabler, data_length, extra_least_offset) = \ struct.unpack(">32s20s32sQQ", data) assert self.is_valid_header(data) return (write_enabler, write_enabler_nodeid) def readv(self, readv): datav = [] with open(self.home, 'rb') as f: for (offset, length) in readv: datav.append(self._read_share_data(f, offset, length)) return datav def get_length(self): """ Return the length of the data in the share. """ f = open(self.home, 'rb') data_length = self._read_data_length(f) f.close() return data_length def check_write_enabler(self, write_enabler, si_s): with open(self.home, 'rb+') as f: (real_write_enabler, write_enabler_nodeid) = \ self._read_write_enabler_and_nodeid(f) # avoid a timing attack #if write_enabler != real_write_enabler: if not timing_safe_compare(write_enabler, real_write_enabler): # accomodate share migration by reporting the nodeid used for the # old write enabler. self.log(format="bad write enabler on SI %(si)s," " recorded by nodeid %(nodeid)s", facility="tahoe.storage", level=log.WEIRD, umid="cE1eBQ", si=si_s, nodeid=idlib.nodeid_b2a(write_enabler_nodeid)) msg = "The write enabler was recorded by nodeid '%s'." % \ (idlib.nodeid_b2a(write_enabler_nodeid),) raise BadWriteEnablerError(msg) def check_testv(self, testv): test_good = True with open(self.home, 'rb+') as f: for (offset, length, operator, specimen) in testv: data = self._read_share_data(f, offset, length) if not testv_compare(data, operator, specimen): test_good = False break return test_good def writev(self, datav, new_length): with open(self.home, 'rb+') as f: for (offset, data) in datav: self._write_share_data(f, offset, data) if new_length is not None: cur_length = self._read_data_length(f) if new_length < cur_length: self._write_data_length(f, new_length) # TODO: if we're going to shrink the share file when the # share data has shrunk, then call # self._change_container_size() here. def testv_compare(a, op, b): assert op == b"eq" return a == b class EmptyShare(object): def check_testv(self, testv): test_good = True for (offset, length, operator, specimen) in testv: data = b"" if not testv_compare(data, operator, specimen): test_good = False break return test_good def create_mutable_sharefile(filename, my_nodeid, write_enabler, parent): ms = MutableShareFile(filename, parent) ms.create(my_nodeid, write_enabler) del ms return MutableShareFile(filename, parent) tahoe_lafs-1.20.0/src/allmydata/storage/mutable_schema.py0000644000000000000000000000751413615410400020354 0ustar00""" Ported to Python 3. """ import struct import attr from ..util.hashutil import ( tagged_hash, ) from .lease import ( LeaseInfo, ) from .lease_schema import ( v1_mutable, v2_mutable, ) def _magic(version): # type: (int) -> bytes """ Compute a "magic" header string for a container of the given version. :param version: The version number of the container. """ # Make it easy for people to recognize human_readable = u"Tahoe mutable container v{:d}\n".format(version).encode("ascii") # But also keep the chance of accidental collision low if version == 1: # It's unclear where this byte sequence came from. It may have just # been random. In any case, preserve it since it is the magic marker # in all v1 share files. random_bytes = b"\x75\x09\x44\x03\x8e" else: # For future versions, use a reproducable scheme. random_bytes = tagged_hash( b"allmydata_mutable_container_header", human_readable, truncate_to=5, ) magic = human_readable + random_bytes assert len(magic) == 32 if version > 1: # The chance of collision is pretty low but let's just be sure about # it. assert magic != _magic(version - 1) return magic def _header(magic, extra_lease_offset, nodeid, write_enabler): # type: (bytes, int, bytes, bytes) -> bytes """ Construct a container header. :param nodeid: A unique identifier for the node holding this container. :param write_enabler: A secret shared with the client used to authorize changes to the contents of this container. """ fixed_header = struct.pack( ">32s20s32sQQ", magic, nodeid, write_enabler, # data length, initially the container is empty 0, extra_lease_offset, ) blank_leases = b"\x00" * LeaseInfo().mutable_size() * 4 extra_lease_count = struct.pack(">L", 0) return b"".join([ fixed_header, # share data will go in between the next two items eventually but # for now there is none. blank_leases, extra_lease_count, ]) _HEADER_FORMAT = ">32s20s32sQQ" # This size excludes leases _HEADER_SIZE = struct.calcsize(_HEADER_FORMAT) _EXTRA_LEASE_OFFSET = _HEADER_SIZE + 4 * LeaseInfo().mutable_size() @attr.s(frozen=True) class _Schema(object): """ Implement encoding and decoding for the mutable container. :ivar int version: the version number of the schema this object supports :ivar lease_serializer: an object that is responsible for lease serialization and unserialization """ version = attr.ib() lease_serializer = attr.ib() _magic = attr.ib() @classmethod def for_version(cls, version, lease_serializer): return cls(version, lease_serializer, magic=_magic(version)) def magic_matches(self, candidate_magic): # type: (bytes) -> bool """ Return ``True`` if a candidate string matches the expected magic string from a mutable container header, ``False`` otherwise. """ return candidate_magic[:len(self._magic)] == self._magic def header(self, nodeid, write_enabler): return _header(self._magic, _EXTRA_LEASE_OFFSET, nodeid, write_enabler) ALL_SCHEMAS = { _Schema.for_version(version=2, lease_serializer=v2_mutable), _Schema.for_version(version=1, lease_serializer=v1_mutable), } ALL_SCHEMA_VERSIONS = {schema.version for schema in ALL_SCHEMAS} NEWEST_SCHEMA_VERSION = max(ALL_SCHEMAS, key=lambda schema: schema.version) def schema_from_header(header): # (int) -> Optional[type] """ Find the schema object that corresponds to a certain version number. """ for schema in ALL_SCHEMAS: if schema.magic_matches(header): return schema return None tahoe_lafs-1.20.0/src/allmydata/storage/server.py0000644000000000000000000011343713615410400016713 0ustar00""" Ported to Python 3. """ from __future__ import annotations from typing import Iterable, Any import os, re from foolscap.api import Referenceable from foolscap.ipb import IRemoteReference from twisted.application import service from twisted.internet import reactor from zope.interface import implementer from allmydata.interfaces import RIStorageServer, IStatsProducer from allmydata.util import fileutil, idlib, log, time_format import allmydata # for __full_version__ from allmydata.storage.common import si_b2a, si_a2b, storage_index_to_dir _pyflakes_hush = [si_b2a, si_a2b, storage_index_to_dir] # re-exported from allmydata.storage.lease import LeaseInfo from allmydata.storage.mutable import MutableShareFile, EmptyShare, \ create_mutable_sharefile from allmydata.mutable.layout import MAX_MUTABLE_SHARE_SIZE from allmydata.storage.immutable import ( ShareFile, BucketWriter, BucketReader, FoolscapBucketWriter, FoolscapBucketReader, ) from allmydata.storage.crawler import BucketCountingCrawler from allmydata.storage.expirer import LeaseCheckingCrawler # storage/ # storage/shares/incoming # incoming/ holds temp dirs named $START/$STORAGEINDEX/$SHARENUM which will # be moved to storage/shares/$START/$STORAGEINDEX/$SHARENUM upon success # storage/shares/$START/$STORAGEINDEX # storage/shares/$START/$STORAGEINDEX/$SHARENUM # Where "$START" denotes the first 10 bits worth of $STORAGEINDEX (that's 2 # base-32 chars). # $SHARENUM matches this regex: NUM_RE=re.compile("^[0-9]+$") # Number of seconds to add to expiration time on lease renewal. # For now it's not actually configurable, but maybe someday. DEFAULT_RENEWAL_TIME = 31 * 24 * 60 * 60 @implementer(IStatsProducer) class StorageServer(service.MultiService): """ Implement the business logic for the storage server. """ # The type in Twisted for services is wrong in 22.10... # https://github.com/twisted/twisted/issues/10135 name = 'storage' # type: ignore[assignment] # only the tests change this to anything else LeaseCheckerClass = LeaseCheckingCrawler def __init__(self, storedir, nodeid, reserved_space=0, discard_storage=False, readonly_storage=False, stats_provider=None, expiration_enabled=False, expiration_mode="age", expiration_override_lease_duration=None, expiration_cutoff_date=None, expiration_sharetypes=("mutable", "immutable"), clock=reactor): service.MultiService.__init__(self) assert isinstance(nodeid, bytes) assert len(nodeid) == 20 assert isinstance(nodeid, bytes) self.my_nodeid = nodeid self.storedir = storedir sharedir = os.path.join(storedir, "shares") fileutil.make_dirs(sharedir) self.sharedir = sharedir self.corruption_advisory_dir = os.path.join(storedir, "corruption-advisories") fileutil.make_dirs(self.corruption_advisory_dir) self.reserved_space = int(reserved_space) self.no_storage = discard_storage self.readonly_storage = readonly_storage self.stats_provider = stats_provider if self.stats_provider: self.stats_provider.register_producer(self) self.incomingdir = os.path.join(sharedir, 'incoming') self._clean_incomplete() fileutil.make_dirs(self.incomingdir) log.msg("StorageServer created", facility="tahoe.storage") if reserved_space: if self.get_available_space() is None: log.msg("warning: [storage]reserved_space= is set, but this platform does not support an API to get disk statistics (statvfs(2) or GetDiskFreeSpaceEx), so this reservation cannot be honored", umin="0wZ27w", level=log.UNUSUAL) self.latencies = {"allocate": [], # immutable "write": [], "close": [], "read": [], "get": [], "writev": [], # mutable "readv": [], "add-lease": [], # both "renew": [], "cancel": [], } self.add_bucket_counter() statefile = os.path.join(self.storedir, "lease_checker.state") historyfile = os.path.join(self.storedir, "lease_checker.history") klass = self.LeaseCheckerClass self.lease_checker = klass(self, statefile, historyfile, expiration_enabled, expiration_mode, expiration_override_lease_duration, expiration_cutoff_date, expiration_sharetypes) self.lease_checker.setServiceParent(self) self._clock = clock # Map in-progress filesystem path -> BucketWriter: self._bucket_writers = {} # type: Dict[str,BucketWriter] # These callables will be called with BucketWriters that closed: self._call_on_bucket_writer_close = [] def stopService(self): # Cancel any in-progress uploads: for bw in list(self._bucket_writers.values()): bw.disconnected() return service.MultiService.stopService(self) def __repr__(self): return "" % (idlib.shortnodeid_b2a(self.my_nodeid),) def have_shares(self): # quick test to decide if we need to commit to an implicit # permutation-seed or if we should use a new one return bool(set(os.listdir(self.sharedir)) - set(["incoming"])) def add_bucket_counter(self): statefile = os.path.join(self.storedir, "bucket_counter.state") self.bucket_counter = BucketCountingCrawler(self, statefile) self.bucket_counter.setServiceParent(self) def count(self, name, delta=1): if self.stats_provider: self.stats_provider.count("storage_server." + name, delta) def add_latency(self, category, latency): a = self.latencies[category] a.append(latency) if len(a) > 1000: self.latencies[category] = a[-1000:] def get_latencies(self): """Return a dict, indexed by category, that contains a dict of latency numbers for each category. If there are sufficient samples for unambiguous interpretation, each dict will contain the following keys: mean, 01_0_percentile, 10_0_percentile, 50_0_percentile (median), 90_0_percentile, 95_0_percentile, 99_0_percentile, 99_9_percentile. If there are insufficient samples for a given percentile to be interpreted unambiguously that percentile will be reported as None. If no samples have been collected for the given category, then that category name will not be present in the return value. """ # note that Amazon's Dynamo paper says they use 99.9% percentile. output = {} for category in self.latencies: if not self.latencies[category]: continue stats = {} samples = self.latencies[category][:] count = len(samples) stats["samplesize"] = count samples.sort() if count > 1: stats["mean"] = sum(samples) / count else: stats["mean"] = None orderstatlist = [(0.01, "01_0_percentile", 100), (0.1, "10_0_percentile", 10),\ (0.50, "50_0_percentile", 10), (0.90, "90_0_percentile", 10),\ (0.95, "95_0_percentile", 20), (0.99, "99_0_percentile", 100),\ (0.999, "99_9_percentile", 1000)] for percentile, percentilestring, minnumtoobserve in orderstatlist: if count >= minnumtoobserve: stats[percentilestring] = samples[int(percentile*count)] else: stats[percentilestring] = None output[category] = stats return output def log(self, *args, **kwargs): if "facility" not in kwargs: kwargs["facility"] = "tahoe.storage" return log.msg(*args, **kwargs) def _clean_incomplete(self): fileutil.rm_dir(self.incomingdir) def get_stats(self): # remember: RIStatsProvider requires that our return dict # contains numeric values. stats = { 'storage_server.allocated': self.allocated_size(), } stats['storage_server.reserved_space'] = self.reserved_space for category,ld in self.get_latencies().items(): for name,v in ld.items(): stats['storage_server.latencies.%s.%s' % (category, name)] = v try: disk = fileutil.get_disk_stats(self.sharedir, self.reserved_space) writeable = disk['avail'] > 0 # spacetime predictors should use disk_avail / (d(disk_used)/dt) stats['storage_server.disk_total'] = disk['total'] stats['storage_server.disk_used'] = disk['used'] stats['storage_server.disk_free_for_root'] = disk['free_for_root'] stats['storage_server.disk_free_for_nonroot'] = disk['free_for_nonroot'] stats['storage_server.disk_avail'] = disk['avail'] except AttributeError: writeable = True except EnvironmentError: log.msg("OS call to get disk statistics failed", level=log.UNUSUAL) writeable = False if self.readonly_storage: stats['storage_server.disk_avail'] = 0 writeable = False stats['storage_server.accepting_immutable_shares'] = int(writeable) s = self.bucket_counter.get_state() bucket_count = s.get("last-complete-bucket-count") if bucket_count: stats['storage_server.total_bucket_count'] = bucket_count return stats def get_available_space(self): """Returns available space for share storage in bytes, or None if no API to get this information is available.""" if self.readonly_storage: return 0 return fileutil.get_available_space(self.sharedir, self.reserved_space) def allocated_size(self): space = 0 for bw in self._bucket_writers.values(): space += bw.allocated_size() return space def get_version(self): remaining_space = self.get_available_space() if remaining_space is None: # We're on a platform that has no API to get disk stats. remaining_space = 2**64 # Unicode strings might be nicer, but for now sticking to bytes since # this is what the wire protocol has always been. version = { b"http://allmydata.org/tahoe/protocols/storage/v1" : { b"maximum-immutable-share-size": remaining_space, b"maximum-mutable-share-size": MAX_MUTABLE_SHARE_SIZE, b"available-space": remaining_space, b"tolerates-immutable-read-overrun": True, b"delete-mutable-shares-with-zero-length-writev": True, b"fills-holes-with-zero-bytes": True, b"prevents-read-past-end-of-share-data": True, }, b"application-version": allmydata.__full_version__.encode("utf-8"), } return version def allocate_buckets(self, storage_index, renew_secret, cancel_secret, sharenums, allocated_size, owner_num=0, renew_leases=True): """ Generic bucket allocation API. :param bool renew_leases: If and only if this is ``True`` then renew a secret-matching lease on (or, if none match, add a new lease to) existing shares in this bucket. Any *new* shares are given a new lease regardless. """ # owner_num is not for clients to set, but rather it should be # curried into the PersonalStorageServer instance that is dedicated # to a particular owner. start = self._clock.seconds() self.count("allocate") alreadygot = {} bucketwriters = {} # k: shnum, v: BucketWriter si_dir = storage_index_to_dir(storage_index) si_s = si_b2a(storage_index) log.msg("storage: allocate_buckets %r" % si_s) # in this implementation, the lease information (including secrets) # goes into the share files themselves. It could also be put into a # separate database. Note that the lease should not be added until # the BucketWriter has been closed. expire_time = self._clock.seconds() + DEFAULT_RENEWAL_TIME lease_info = LeaseInfo(owner_num, renew_secret, cancel_secret, expire_time, self.my_nodeid) max_space_per_bucket = allocated_size remaining_space = self.get_available_space() limited = remaining_space is not None if limited: # this is a bit conservative, since some of this allocated_size() # has already been written to disk, where it will show up in # get_available_space. remaining_space -= self.allocated_size() # self.readonly_storage causes remaining_space <= 0 # fill alreadygot with all shares that we have, not just the ones # they asked about: this will save them a lot of work. Add or update # leases for all of them: if they want us to hold shares for this # file, they'll want us to hold leases for this file. for (shnum, fn) in self.get_shares(storage_index): alreadygot[shnum] = ShareFile(fn) if renew_leases: self._add_or_renew_leases(alreadygot.values(), lease_info) for shnum in sharenums: incominghome = os.path.join(self.incomingdir, si_dir, "%d" % shnum) finalhome = os.path.join(self.sharedir, si_dir, "%d" % shnum) if os.path.exists(finalhome): # great! we already have it. easy. pass elif os.path.exists(incominghome): # For Foolscap we don't create BucketWriters for shnums that # have a partial share (in incoming/), so if a second upload # occurs while the first is still in progress, the second # uploader will use different storage servers. pass elif (not limited) or (remaining_space >= max_space_per_bucket): # ok! we need to create the new share file. bw = BucketWriter(self, incominghome, finalhome, max_space_per_bucket, lease_info, clock=self._clock) if self.no_storage: # Really this should be done by having a separate class for # this situation; see # https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3862 bw.throw_out_all_data = True bucketwriters[shnum] = bw self._bucket_writers[incominghome] = bw if limited: remaining_space -= max_space_per_bucket else: # bummer! not enough space to accept this bucket pass if bucketwriters: fileutil.make_dirs(os.path.join(self.sharedir, si_dir)) self.add_latency("allocate", self._clock.seconds() - start) return set(alreadygot), bucketwriters def _iter_share_files(self, storage_index): for shnum, filename in self.get_shares(storage_index): with open(filename, 'rb') as f: header = f.read(32) if MutableShareFile.is_valid_header(header): sf = MutableShareFile(filename, self) # note: if the share has been migrated, the renew_lease() # call will throw an exception, with information to help the # client update the lease. elif ShareFile.is_valid_header(header): sf = ShareFile(filename) else: continue # non-sharefile yield sf def add_lease(self, storage_index, renew_secret, cancel_secret, owner_num=1): start = self._clock.seconds() self.count("add-lease") new_expire_time = self._clock.seconds() + DEFAULT_RENEWAL_TIME lease_info = LeaseInfo(owner_num, renew_secret, cancel_secret, new_expire_time, self.my_nodeid) self._add_or_renew_leases( self._iter_share_files(storage_index), lease_info, ) self.add_latency("add-lease", self._clock.seconds() - start) return None def renew_lease(self, storage_index, renew_secret): start = self._clock.seconds() self.count("renew") new_expire_time = self._clock.seconds() + DEFAULT_RENEWAL_TIME found_buckets = False for sf in self._iter_share_files(storage_index): found_buckets = True sf.renew_lease(renew_secret, new_expire_time) self.add_latency("renew", self._clock.seconds() - start) if not found_buckets: raise IndexError("no such lease to renew") def bucket_writer_closed(self, bw, consumed_size): if self.stats_provider: self.stats_provider.count('storage_server.bytes_added', consumed_size) del self._bucket_writers[bw.incominghome] for handler in self._call_on_bucket_writer_close: handler(bw) def register_bucket_writer_close_handler(self, handler): """ The handler will be called with any ``BucketWriter`` that closes. """ self._call_on_bucket_writer_close.append(handler) def get_shares(self, storage_index) -> Iterable[tuple[int, str]]: """ Return an iterable of (shnum, pathname) tuples for files that hold shares for this storage_index. In each tuple, 'shnum' will always be the integer form of the last component of 'pathname'. """ storagedir = os.path.join(self.sharedir, storage_index_to_dir(storage_index)) try: for f in os.listdir(storagedir): if NUM_RE.match(f): filename = os.path.join(storagedir, f) yield (int(f), filename) except OSError: # Commonly caused by there being no buckets at all. pass def get_buckets(self, storage_index): """ Get ``BucketReaders`` for an immutable. """ start = self._clock.seconds() self.count("get") si_s = si_b2a(storage_index) log.msg("storage: get_buckets %r" % si_s) bucketreaders = {} # k: sharenum, v: BucketReader for shnum, filename in self.get_shares(storage_index): bucketreaders[shnum] = BucketReader(self, filename, storage_index, shnum) self.add_latency("get", self._clock.seconds() - start) return bucketreaders def get_leases(self, storage_index): """Provide an iterator that yields all of the leases attached to this bucket. Each lease is returned as a LeaseInfo instance. This method is not for client use. :note: Only for immutable shares. """ # since all shares get the same lease data, we just grab the leases # from the first share try: shnum, filename = next(self.get_shares(storage_index)) sf = ShareFile(filename) return sf.get_leases() except StopIteration: return iter([]) def get_slot_leases(self, storage_index): """ This method is not for client use. :note: Only for mutable shares. :return: An iterable of the leases attached to this slot. """ for _, share_filename in self.get_shares(storage_index): share = MutableShareFile(share_filename) return share.get_leases() return [] def _collect_mutable_shares_for_storage_index(self, bucketdir, write_enabler, si_s): """ Gather up existing mutable shares for the given storage index. :param bytes bucketdir: The filesystem path containing shares for the given storage index. :param bytes write_enabler: The write enabler secret for the shares. :param bytes si_s: The storage index in encoded (base32) form. :raise BadWriteEnablerError: If the write enabler is not correct for any of the collected shares. :return dict[int, MutableShareFile]: The collected shares in a mapping from integer share numbers to ``MutableShareFile`` instances. """ shares = {} if os.path.isdir(bucketdir): # shares exist if there is a file for them for sharenum_s in os.listdir(bucketdir): try: sharenum = int(sharenum_s) except ValueError: continue filename = os.path.join(bucketdir, sharenum_s) msf = MutableShareFile(filename, self) msf.check_write_enabler(write_enabler, si_s) shares[sharenum] = msf return shares def _evaluate_test_vectors(self, test_and_write_vectors, shares): """ Execute test vectors against share data. :param test_and_write_vectors: See ``allmydata.interfaces.TestAndWriteVectorsForShares``. :param dict[int, MutableShareFile] shares: The shares against which to execute the vectors. :return bool: ``True`` if and only if all of the test vectors succeed against the given shares. """ for sharenum in test_and_write_vectors: (testv, datav, new_length) = test_and_write_vectors[sharenum] if sharenum in shares: if not shares[sharenum].check_testv(testv): self.log("testv failed: [%d]: %r" % (sharenum, testv)) return False else: # compare the vectors against an empty share, in which all # reads return empty strings. if not EmptyShare().check_testv(testv): self.log("testv failed (empty): [%d] %r" % (sharenum, testv)) return False return True def _evaluate_read_vectors(self, read_vector, shares): """ Execute read vectors against share data. :param read_vector: See ``allmydata.interfaces.ReadVector``. :param dict[int, MutableShareFile] shares: The shares against which to execute the vector. :return dict[int, bytes]: The data read from the shares. """ read_data = {} for sharenum, share in shares.items(): read_data[sharenum] = share.readv(read_vector) return read_data def _evaluate_write_vectors(self, bucketdir, secrets, test_and_write_vectors, shares): """ Execute write vectors against share data. :param bytes bucketdir: The parent directory holding the shares. This is removed if the last share is removed from it. If shares are created, they are created in it. :param secrets: A tuple of ``WriteEnablerSecret``, ``LeaseRenewSecret``, and ``LeaseCancelSecret``. These secrets are used to initialize new shares. :param test_and_write_vectors: See ``allmydata.interfaces.TestAndWriteVectorsForShares``. :param dict[int, MutableShareFile]: The shares against which to execute the vectors. :return dict[int, MutableShareFile]: The shares which still exist after applying the vectors. """ remaining_shares = {} for sharenum in test_and_write_vectors: (testv, datav, new_length) = test_and_write_vectors[sharenum] if new_length == 0: if sharenum in shares: shares[sharenum].unlink() else: if sharenum not in shares: # allocate a new share share = self._allocate_slot_share(bucketdir, secrets, sharenum, owner_num=0) shares[sharenum] = share shares[sharenum].writev(datav, new_length) remaining_shares[sharenum] = shares[sharenum] if new_length == 0: # delete bucket directories that exist but are empty. They # might not exist if a client showed up and asked us to # truncate a share we weren't even holding. if os.path.exists(bucketdir) and [] == os.listdir(bucketdir): os.rmdir(bucketdir) return remaining_shares def _make_lease_info(self, renew_secret, cancel_secret): """ :return LeaseInfo: Information for a new lease for a share. """ ownerid = 1 # TODO expire_time = self._clock.seconds() + DEFAULT_RENEWAL_TIME lease_info = LeaseInfo(ownerid, renew_secret, cancel_secret, expire_time, self.my_nodeid) return lease_info def _add_or_renew_leases(self, shares, lease_info): """ Put the given lease onto the given shares. :param Iterable[Union[MutableShareFile, ShareFile]] shares: The shares to put the lease onto. :param LeaseInfo lease_info: The lease to put on the shares. """ for share in shares: share.add_or_renew_lease(self.get_available_space(), lease_info) def slot_testv_and_readv_and_writev( # type: ignore # warner/foolscap#78 self, storage_index, secrets, test_and_write_vectors, read_vector, renew_leases=True, ): """ Read data from shares and conditionally write some data to them. :param bool renew_leases: If and only if this is ``True`` and the test vectors pass then shares mentioned in ``test_and_write_vectors`` that still exist after the changes are made will also have a secret-matching lease renewed (or, if none match, a new lease added). See ``allmydata.interfaces.RIStorageServer`` for details about other parameters and return value. """ start = self._clock.seconds() self.count("writev") si_s = si_b2a(storage_index) log.msg("storage: slot_writev %r" % si_s) si_dir = storage_index_to_dir(storage_index) (write_enabler, renew_secret, cancel_secret) = secrets bucketdir = os.path.join(self.sharedir, si_dir) # If collection succeeds we know the write_enabler is good for all # existing shares. shares = self._collect_mutable_shares_for_storage_index( bucketdir, write_enabler, si_s, ) # Now evaluate test vectors. testv_is_good = self._evaluate_test_vectors( test_and_write_vectors, shares, ) # now gather the read vectors, before we do any writes read_data = self._evaluate_read_vectors( read_vector, shares, ) if testv_is_good: # now apply the write vectors remaining_shares = self._evaluate_write_vectors( bucketdir, secrets, test_and_write_vectors, shares, ) if renew_leases: lease_info = self._make_lease_info(renew_secret, cancel_secret) self._add_or_renew_leases(remaining_shares.values(), lease_info) # all done self.add_latency("writev", self._clock.seconds() - start) return (testv_is_good, read_data) def _allocate_slot_share(self, bucketdir, secrets, sharenum, owner_num=0): (write_enabler, renew_secret, cancel_secret) = secrets my_nodeid = self.my_nodeid fileutil.make_dirs(bucketdir) filename = os.path.join(bucketdir, "%d" % sharenum) share = create_mutable_sharefile(filename, my_nodeid, write_enabler, self) return share def enumerate_mutable_shares(self, storage_index: bytes) -> set[int]: """Return all share numbers for the given mutable.""" si_dir = storage_index_to_dir(storage_index) # shares exist if there is a file for them bucketdir = os.path.join(self.sharedir, si_dir) if not os.path.isdir(bucketdir): return set() result = set() for sharenum_s in os.listdir(bucketdir): try: result.add(int(sharenum_s)) except ValueError: continue return result def slot_readv(self, storage_index, shares, readv): start = self._clock.seconds() self.count("readv") si_s = si_b2a(storage_index) lp = log.msg("storage: slot_readv %r %r" % (si_s, shares), facility="tahoe.storage", level=log.OPERATIONAL) si_dir = storage_index_to_dir(storage_index) # shares exist if there is a file for them bucketdir = os.path.join(self.sharedir, si_dir) if not os.path.isdir(bucketdir): self.add_latency("readv", self._clock.seconds() - start) return {} datavs = {} for sharenum_s in os.listdir(bucketdir): try: sharenum = int(sharenum_s) except ValueError: continue if sharenum in shares or not shares: filename = os.path.join(bucketdir, sharenum_s) msf = MutableShareFile(filename, self) datavs[sharenum] = msf.readv(readv) log.msg("returning shares %s" % (list(datavs.keys()),), facility="tahoe.storage", level=log.NOISY, parent=lp) self.add_latency("readv", self._clock.seconds() - start) return datavs def _share_exists(self, storage_index, shnum): """ Check local share storage to see if a matching share exists. :param bytes storage_index: The storage index to inspect. :param int shnum: The share number to check for. :return bool: ``True`` if a share with the given number exists at the given storage index, ``False`` otherwise. """ for existing_sharenum, ignored in self.get_shares(storage_index): if existing_sharenum == shnum: return True return False def advise_corrupt_share(self, share_type, storage_index, shnum, reason): # Previously this had to be bytes for legacy protocol backwards # compatibility reasons. Now that Foolscap layer has been abstracted # out, we can probably refactor this to be unicode... assert isinstance(share_type, bytes) assert isinstance(reason, bytes), "%r is not bytes" % (reason,) si_s = si_b2a(storage_index) if not self._share_exists(storage_index, shnum): log.msg( format=( "discarding client corruption claim for %(si)s/%(shnum)d " "which I do not have" ), si=si_s, shnum=shnum, ) return log.msg(format=("client claims corruption in (%(share_type)s) " + "%(si)s-%(shnum)d: %(reason)s"), share_type=share_type, si=si_s, shnum=shnum, reason=reason, level=log.SCARY, umid="SGx2fA") report = render_corruption_report(share_type, si_s, shnum, reason) if len(report) > self.get_available_space(): return None now = time_format.iso_utc(sep="T") report_path = get_corruption_report_path( self.corruption_advisory_dir, now, si_s.decode("utf8"), shnum, ) with open(report_path, "w", encoding="utf-8") as f: f.write(report) return None def get_immutable_share_length(self, storage_index: bytes, share_number: int) -> int: """Returns the length (in bytes) of an immutable.""" si_dir = storage_index_to_dir(storage_index) path = os.path.join(self.sharedir, si_dir, str(share_number)) return ShareFile(path).get_length() def get_mutable_share_length(self, storage_index: bytes, share_number: int) -> int: """Returns the length (in bytes) of a mutable.""" si_dir = storage_index_to_dir(storage_index) path = os.path.join(self.sharedir, si_dir, str(share_number)) if not os.path.exists(path): raise KeyError("No such storage index or share number") return MutableShareFile(path).get_length() @implementer(RIStorageServer) class FoolscapStorageServer(Referenceable): # type: ignore # warner/foolscap#78 """ A filesystem-based implementation of ``RIStorageServer``. For Foolscap, BucketWriter lifetime is tied to connection: when disconnection happens, the BucketWriters are removed. """ name = 'storage' def __init__(self, storage_server): # type: (StorageServer) -> None self._server = storage_server # Canaries and disconnect markers for BucketWriters created via Foolscap: self._bucket_writer_disconnect_markers : dict[BucketWriter, tuple[IRemoteReference, Any]] = {} self._server.register_bucket_writer_close_handler(self._bucket_writer_closed) def _bucket_writer_closed(self, bw): if bw in self._bucket_writer_disconnect_markers: canary, disconnect_marker = self._bucket_writer_disconnect_markers.pop(bw) canary.dontNotifyOnDisconnect(disconnect_marker) def remote_get_version(self): return self._server.get_version() def remote_allocate_buckets(self, storage_index, renew_secret, cancel_secret, sharenums, allocated_size, canary, owner_num=0): """Foolscap-specific ``allocate_buckets()`` API.""" alreadygot, bucketwriters = self._server.allocate_buckets( storage_index, renew_secret, cancel_secret, sharenums, allocated_size, owner_num=owner_num, renew_leases=True, ) # Abort BucketWriters if disconnection happens. for bw in bucketwriters.values(): disconnect_marker = canary.notifyOnDisconnect(bw.disconnected) self._bucket_writer_disconnect_markers[bw] = (canary, disconnect_marker) # Wrap BucketWriters with Foolscap adapter: bucketwriters = { k: FoolscapBucketWriter(bw) for (k, bw) in bucketwriters.items() } return alreadygot, bucketwriters def remote_add_lease(self, storage_index, renew_secret, cancel_secret, owner_num=1): return self._server.add_lease(storage_index, renew_secret, cancel_secret) def remote_renew_lease(self, storage_index, renew_secret): return self._server.renew_lease(storage_index, renew_secret) def remote_get_buckets(self, storage_index): return { k: FoolscapBucketReader(bucket) for (k, bucket) in self._server.get_buckets(storage_index).items() } def remote_slot_testv_and_readv_and_writev(self, storage_index, secrets, test_and_write_vectors, read_vector): return self._server.slot_testv_and_readv_and_writev( storage_index, secrets, test_and_write_vectors, read_vector, renew_leases=True, ) def remote_slot_readv(self, storage_index, shares, readv): return self._server.slot_readv(storage_index, shares, readv) def remote_advise_corrupt_share(self, share_type, storage_index, shnum, reason): return self._server.advise_corrupt_share(share_type, storage_index, shnum, reason) CORRUPTION_REPORT_FORMAT = """\ report: Share Corruption type: {type} storage_index: {storage_index} share_number: {share_number} {reason} """ def render_corruption_report( share_type: bytes, si_s: bytes, shnum: int, reason: bytes ) -> str: """ Create a string that explains a corruption report using freeform text. :param bytes share_type: The type of the share which the report is about. :param bytes si_s: The encoded representation of the storage index which the report is about. :param int shnum: The share number which the report is about. :param bytes reason: The reason given by the client for the corruption report. """ return CORRUPTION_REPORT_FORMAT.format( type=share_type.decode(), storage_index=si_s.decode(), share_number=shnum, reason=reason.decode(), ) def get_corruption_report_path( base_dir: str, now: str, si_s: str, shnum: int ) -> str: """ Determine the path to which a certain corruption report should be written. :param str base_dir: The directory beneath which to construct the path. :param str now: The time of the report. :param str si_s: The encoded representation of the storage index which the report is about. :param int shnum: The share number which the report is about. :return str: A path to which the report can be written. """ # windows can't handle colons in the filename return os.path.join( base_dir, ("%s--%s-%d" % (now, si_s, shnum)).replace(":","") ) tahoe_lafs-1.20.0/src/allmydata/storage/shares.py0000644000000000000000000000060513615410400016662 0ustar00""" Ported to Python 3. """ from allmydata.storage.mutable import MutableShareFile from allmydata.storage.immutable import ShareFile def get_share_file(filename): with open(filename, "rb") as f: prefix = f.read(32) if MutableShareFile.is_valid_header(prefix): return MutableShareFile(filename) # otherwise assume it's immutable return ShareFile(filename) tahoe_lafs-1.20.0/src/allmydata/test/__init__.py0000644000000000000000000001004413615410400016445 0ustar00# -*- coding: utf-8 -*- # Tahoe-LAFS -- secure, distributed storage grid # # Copyright © 2020 The Tahoe-LAFS Software Foundation # # This file is part of Tahoe-LAFS. # # See the docs/about.rst file for licensing information. """ Some setup that should apply across the entire test suite. Rather than defining interesting APIs for other code to use, this just causes some side-effects which make things better when the test suite runs. Ported to Python 3. """ from traceback import extract_stack, format_list from foolscap.pb import Listener from twisted.python.log import err from twisted.application import service from foolscap.logging.incident import IncidentQualifier class NonQualifier(IncidentQualifier, object): def check_event(self, ev): return False def disable_foolscap_incidents(): # Foolscap-0.2.9 (at least) uses "trailing delay" in its default incident # reporter: after a severe log event is recorded (thus triggering an # "incident" in which recent events are dumped to a file), a few seconds # of subsequent events are also recorded in the incident file. The timer # that this leaves running will cause "Unclean Reactor" unit test # failures. The simplest workaround is to disable this timer. Note that # this disables the timer for the entire process: do not call this from # regular runtime code; only use it for unit tests that are running under # Trial. #IncidentReporter.TRAILING_DELAY = None # # Also, using Incidents more than doubles the test time. So we just # disable them entirely. from foolscap.logging.log import theLogger iq = NonQualifier() theLogger.setIncidentQualifier(iq) # we disable incident reporting for all unit tests. disable_foolscap_incidents() def _configure_hypothesis(): from os import environ from hypothesis import ( HealthCheck, settings, ) settings.register_profile( "ci", suppress_health_check=[ # CPU resources available to CI builds typically varies # significantly from run to run making it difficult to determine # if "too slow" data generation is a result of the code or the # execution environment. Prevent these checks from # (intermittently) failing tests that are otherwise fine. HealthCheck.too_slow, ], # With the same reasoning, disable the test deadline. deadline=None, ) profile_name = environ.get("TAHOE_LAFS_HYPOTHESIS_PROFILE", "default") settings.load_profile(profile_name) _configure_hypothesis() def logging_for_pb_listener(): """ Make Foolscap listen error reports include Listener creation stack information. """ original__init__ = Listener.__init__ def _listener__init__(self, *a, **kw): original__init__(self, *a, **kw) # Capture the stack here, where Listener is instantiated. This is # likely to explain what code is responsible for this Listener, useful # information to have when the Listener eventually fails to listen. self._creation_stack = extract_stack() # Override the Foolscap implementation with one that has an errback def _listener_startService(self): service.Service.startService(self) d = self._ep.listen(self) def _listening(lp): self._lp = lp d.addCallbacks( _listening, # Make sure that this listen failure is reported promptly and with # the creation stack. err, errbackArgs=( "Listener created at {}".format( "".join(format_list(self._creation_stack)), ), ), ) Listener.__init__ = _listener__init__ Listener.startService = _listener_startService logging_for_pb_listener() import sys if sys.platform == "win32": from allmydata.windows.fixups import initialize initialize() from eliot import to_file from allmydata.util.jsonbytes import AnyBytesJSONEncoder to_file(open("eliot.log", "wb"), encoder=AnyBytesJSONEncoder) tahoe_lafs-1.20.0/src/allmydata/test/blocking.py0000644000000000000000000000207413615410400016502 0ustar00import sys import traceback import signal import threading from twisted.internet import reactor def print_stacks(): print("Uh oh, something is blocking the event loop!") current_thread = threading.get_ident() for thread_id, frame in sys._current_frames().items(): if thread_id == current_thread: traceback.print_stack(frame, limit=10) break def catch_blocking_in_event_loop(test=None): """ Print tracebacks if the event loop is blocked for more than a short amount of time. """ signal.signal(signal.SIGALRM, lambda *args: print_stacks()) current_scheduled = [None] def cancel_and_rerun(): signal.setitimer(signal.ITIMER_REAL, 0) signal.setitimer(signal.ITIMER_REAL, 0.015) current_scheduled[0] = reactor.callLater(0.01, cancel_and_rerun) cancel_and_rerun() def cleanup(): signal.signal(signal.SIGALRM, signal.SIG_DFL) signal.setitimer(signal.ITIMER_REAL, 0) current_scheduled[0].cancel() if test is not None: test.addCleanup(cleanup) tahoe_lafs-1.20.0/src/allmydata/test/certs.py0000644000000000000000000000402413615410400016027 0ustar00"""Utilities for generating TLS certificates.""" import datetime from cryptography import x509 from cryptography.x509.oid import NameOID from cryptography.hazmat.primitives.asymmetric import rsa from cryptography.hazmat.primitives import serialization, hashes from twisted.python.filepath import FilePath def cert_to_file(path: FilePath, cert) -> FilePath: """ Write the given certificate to a file on disk. Returns the path. """ path.setContent(cert.public_bytes(serialization.Encoding.PEM)) return path def private_key_to_file(path: FilePath, private_key) -> FilePath: """ Write the given key to a file on disk. Returns the path. """ path.setContent( private_key.private_bytes( encoding=serialization.Encoding.PEM, format=serialization.PrivateFormat.TraditionalOpenSSL, encryption_algorithm=serialization.NoEncryption(), ) ) return path def generate_private_key(): """Create a RSA private key.""" return rsa.generate_private_key(public_exponent=65537, key_size=2048) def generate_certificate( private_key, expires_days: int = 10, valid_in_days: int = 0, org_name: str = "Yoyodyne", ): """Generate a certificate from a RSA private key.""" subject = issuer = x509.Name( [x509.NameAttribute(NameOID.ORGANIZATION_NAME, org_name)] ) starts = datetime.datetime.utcnow() + datetime.timedelta(days=valid_in_days) expires = datetime.datetime.utcnow() + datetime.timedelta(days=expires_days) return ( x509.CertificateBuilder() .subject_name(subject) .issuer_name(issuer) .public_key(private_key.public_key()) .serial_number(x509.random_serial_number()) .not_valid_before(min(starts, expires)) .not_valid_after(expires) .add_extension( x509.SubjectAlternativeName([x509.DNSName("localhost")]), critical=False, # Sign our certificate with our private key ) .sign(private_key, hashes.SHA256()) ) tahoe_lafs-1.20.0/src/allmydata/test/cli_node_api.py0000644000000000000000000001325513615410400017322 0ustar00""" Ported to Python 3. """ __all__ = [ "CLINodeAPI", "Expect", "on_stdout", "on_stdout_and_stderr", "on_different", ] import os import sys from errno import ENOENT import attr from eliot import ( log_call, ) from twisted.internet.error import ( ProcessTerminated, ProcessExitedAlready, ) from twisted.internet.interfaces import ( IProcessProtocol, ) from twisted.python.log import ( msg, ) from twisted.python.filepath import ( FilePath, ) from twisted.internet.protocol import ( Protocol, ProcessProtocol, ) from twisted.internet.defer import ( Deferred, succeed, ) from twisted.internet.task import ( deferLater, ) from ..client import ( _Client, ) from ..util.eliotutil import ( inline_callbacks, log_call_deferred, ) class Expect(Protocol, object): def __init__(self): self._expectations = [] def get_buffered_output(self): return self._buffer def expect(self, expectation): if expectation in self._buffer: return succeed(None) d = Deferred() self._expectations.append((expectation, d)) return d def connectionMade(self): self._buffer = b"" def dataReceived(self, data): self._buffer += data for i in range(len(self._expectations) - 1, -1, -1): expectation, d = self._expectations[i] if expectation in self._buffer: del self._expectations[i] d.callback(None) def connectionLost(self, reason): for ignored, d in self._expectations: d.errback(reason) class _ProcessProtocolAdapter(ProcessProtocol, object): def __init__(self, fds): self._fds = fds def connectionMade(self): for proto in list(self._fds.values()): proto.makeConnection(self.transport) def childDataReceived(self, childFD, data): try: proto = self._fds[childFD] except KeyError: msg(format="Received unhandled output on %(fd)s: %(output)s", fd=childFD, output=data, ) else: proto.dataReceived(data) def processEnded(self, reason): notified = set() for proto in list(self._fds.values()): if proto not in notified: proto.connectionLost(reason) notified.add(proto) def on_stdout(protocol): return _ProcessProtocolAdapter({1: protocol}) def on_stdout_and_stderr(protocol): return _ProcessProtocolAdapter({1: protocol, 2: protocol}) def on_different(fd_mapping): return _ProcessProtocolAdapter(fd_mapping) @attr.s class CLINodeAPI(object): reactor = attr.ib() basedir = attr.ib(type=FilePath) process = attr.ib(default=None) @property def twistd_pid_file(self): return self.basedir.child(u"running.process") @property def node_url_file(self): return self.basedir.child(u"node.url") @property def storage_furl_file(self): return self.basedir.child(u"private").child(u"storage.furl") @property def introducer_furl_file(self): return self.basedir.child(u"private").child(u"introducer.furl") @property def config_file(self): return self.basedir.child(u"tahoe.cfg") @property def exit_trigger_file(self): return self.basedir.child(_Client.EXIT_TRIGGER_FILE) def _execute(self, process_protocol, argv): exe = sys.executable argv = [ exe, "-b", u"-m", u"allmydata.scripts.runner", ] + argv msg(format="Executing %(argv)s", argv=argv, ) return self.reactor.spawnProcess( processProtocol=process_protocol, executable=exe, args=argv, env=os.environ, ) @log_call(action_type="test:cli-api:run", include_args=["extra_tahoe_args"]) def run(self, protocol, extra_tahoe_args=()): """ Start the node running. :param IProcessProtocol protocol: This protocol will be hooked up to the node process and can handle output or generate input. """ if not IProcessProtocol.providedBy(protocol): raise TypeError("run requires process protocol, got {}".format(protocol)) self.process = self._execute( protocol, list(extra_tahoe_args) + [u"run", self.basedir.asTextMode().path], ) # Don't let the process run away forever. try: self.active() except OSError as e: if ENOENT != e.errno: raise @log_call_deferred(action_type="test:cli-api:stop") def stop(self): return self.stop_and_wait() @log_call_deferred(action_type="test:cli-api:stop-and-wait") @inline_callbacks def stop_and_wait(self): if self.process is not None: while True: try: self.process.signalProcess("TERM") except ProcessExitedAlready: break else: yield deferLater(self.reactor, 0.1, lambda: None) def active(self): # By writing this file, we get two minutes before the client will # exit. This ensures that even if the 'stop' command doesn't work (and # the test fails), the client should still terminate. self.exit_trigger_file.touch() def _check_cleanup_reason(self, reason): # Let it fail because the process has already exited. reason.trap(ProcessTerminated) return None def cleanup(self): stopping = self.stop_and_wait() stopping.addErrback(self._check_cleanup_reason) return stopping tahoe_lafs-1.20.0/src/allmydata/test/common.py0000644000000000000000000015100513615410400016201 0ustar00""" Functionality related to a lot of the test suite. """ from __future__ import annotations from past.builtins import chr as byteschr __all__ = [ "SyncTestCase", "AsyncTestCase", "AsyncBrokenTestCase", "TrialTestCase", "flush_logged_errors", "skip", "skipIf", # Selected based on platform and re-exported for convenience. "Popen", "PIPE", ] import sys import os, random, struct from contextlib import contextmanager import six import tempfile from tempfile import mktemp from functools import partial from unittest import case as _case from socket import ( AF_INET, SOCK_STREAM, SOMAXCONN, socket, error as socket_error, ) from errno import ( EADDRINUSE, ) import attr import treq from zope.interface import implementer from testtools import ( TestCase, skip, skipIf, ) from testtools.twistedsupport import ( SynchronousDeferredRunTest, AsynchronousDeferredRunTest, AsynchronousDeferredRunTestForBrokenTwisted, flush_logged_errors, ) from twisted.application import service from twisted.plugin import IPlugin from twisted.internet import defer from twisted.internet.defer import inlineCallbacks, returnValue from twisted.internet.interfaces import IPullProducer from twisted.python import failure from twisted.python.filepath import FilePath from twisted.web.error import Error as WebError from twisted.internet.interfaces import ( IStreamServerEndpointStringParser, IReactorSocket, ) from twisted.internet.endpoints import AdoptedStreamServerEndpoint from twisted.trial.unittest import TestCase as _TrialTestCase from allmydata import uri from allmydata.interfaces import ( IMutableFileNode, IImmutableFileNode, NotEnoughSharesError, ICheckable, IMutableUploadable, SDMF_VERSION, MDMF_VERSION, IAddressFamily, NoSpace, ) from allmydata.check_results import CheckResults, CheckAndRepairResults, \ DeepCheckResults, DeepCheckAndRepairResults from allmydata.storage_client import StubServer from allmydata.mutable.layout import unpack_header from allmydata.mutable.publish import MutableData from allmydata.storage.mutable import MutableShareFile from allmydata.util import hashutil, log, iputil from allmydata.util.assertutil import precondition from allmydata.util.consumer import download_to_data import allmydata.test.common_util as testutil from allmydata.immutable.upload import Uploader from allmydata.client import ( config_from_string, create_client_from_config, ) from allmydata.scripts.common import ( write_introducer, ) from ..crypto import ( ed25519, rsa, ) from .eliotutil import ( EliotLoggedRunTest, ) from .common_util import ShouldFailMixin # noqa: F401 from subprocess import ( Popen, PIPE, ) # Is the process running as an OS user with elevated privileges (ie, root)? # We only know how to determine this for POSIX systems. superuser = getattr(os, "getuid", lambda: -1)() == 0 EMPTY_CLIENT_CONFIG = config_from_string( "/dev/null", "tub.port", "" ) @attr.s class FakeDisk(object): """ Just enough of a disk to be able to report free / used information. """ total = attr.ib() used = attr.ib() def use(self, num_bytes): """ Mark some amount of available bytes as used (and no longer available). :param int num_bytes: The number of bytes to use. :raise NoSpace: If there are fewer bytes available than ``num_bytes``. :return: ``None`` """ if num_bytes > self.total - self.used: raise NoSpace() self.used += num_bytes @property def available(self): return self.total - self.used def get_disk_stats(self, whichdir, reserved_space): avail = self.available return { 'total': self.total, 'free_for_root': avail, 'free_for_nonroot': avail, 'used': self.used, 'avail': avail - reserved_space, } @attr.s class MemoryIntroducerClient(object): """ A model-only (no behavior) stand-in for ``IntroducerClient``. """ tub = attr.ib() introducer_furl = attr.ib() nickname = attr.ib() my_version = attr.ib() oldest_supported = attr.ib() sequencer = attr.ib() cache_filepath = attr.ib() subscribed_to : list[Subscription] = attr.ib(default=attr.Factory(list)) published_announcements : list[Announcement] = attr.ib(default=attr.Factory(list)) def setServiceParent(self, parent): pass def subscribe_to(self, service_name, cb, *args, **kwargs): self.subscribed_to.append(Subscription(service_name, cb, args, kwargs)) def publish(self, service_name, ann, signing_key): self.published_announcements.append(Announcement( service_name, ann, ed25519.string_from_signing_key(signing_key), )) @attr.s class Subscription(object): """ A model of an introducer subscription. """ service_name = attr.ib() cb = attr.ib() args = attr.ib() kwargs = attr.ib() @attr.s class Announcement(object): """ A model of an introducer announcement. """ service_name = attr.ib() ann = attr.ib() signing_key_bytes = attr.ib(type=bytes) @property def signing_key(self): return ed25519.signing_keypair_from_string(self.signing_key_bytes)[0] def get_published_announcements(client): """ Get a flattened list of all announcements sent using all introducer clients. """ return list( announcement for introducer_client in client.introducer_clients for announcement in introducer_client.published_announcements ) class UseTestPlugins(object): """ A fixture which enables loading Twisted plugins from the Tahoe-LAFS test suite. """ def setUp(self): """ Add the testing package ``plugins`` directory to the ``twisted.plugins`` aggregate package. """ import twisted.plugins testplugins = FilePath(__file__).sibling("plugins") twisted.plugins.__path__.insert(0, testplugins.path) def cleanUp(self): """ Remove the testing package ``plugins`` directory from the ``twisted.plugins`` aggregate package. """ import twisted.plugins testplugins = FilePath(__file__).sibling("plugins") twisted.plugins.__path__.remove(testplugins.path) def getDetails(self): return {} @attr.s class UseNode(object): """ A fixture which creates a client node. :ivar dict[bytes, bytes] plugin_config: Configuration items to put in the node's configuration. :ivar bytes storage_plugin: The name of a storage plugin to enable. :ivar FilePath basedir: The base directory of the node. :ivar str introducer_furl: The introducer furl with which to configure the client. :ivar dict[bytes, bytes] node_config: Configuration items for the *node* section of the configuration. :ivar _Config config: The complete resulting configuration. """ plugin_config = attr.ib() storage_plugin = attr.ib() basedir = attr.ib(validator=attr.validators.instance_of(FilePath)) introducer_furl = attr.ib(validator=attr.validators.instance_of(str), converter=six.ensure_str) node_config : dict[bytes,bytes] = attr.ib(default=attr.Factory(dict)) config = attr.ib(default=None) reactor = attr.ib(default=None) def setUp(self): self.assigner = SameProcessStreamEndpointAssigner() self.assigner.setUp() def format_config_items(config): return "\n".join( " = ".join((key, value)) for (key, value) in list(config.items()) ) if self.plugin_config is None: plugin_config_section = "" else: plugin_config_section = ( "[storageclient.plugins.{storage_plugin}]\n" "{config}\n").format( storage_plugin=self.storage_plugin, config=format_config_items(self.plugin_config), ) if self.storage_plugin is None: plugins = "" else: plugins = "storage.plugins = {}".format(self.storage_plugin) write_introducer( self.basedir, "default", self.introducer_furl, ) node_config = self.node_config.copy() if "tub.port" not in node_config: if "tub.location" in node_config: raise ValueError( "UseNode fixture does not support specifying tub.location " "without tub.port" ) # Don't use the normal port auto-assignment logic. It produces # collisions and makes tests fail spuriously. tub_location, tub_endpoint = self.assigner.assign(self.reactor) node_config.update({ "tub.port": tub_endpoint, "tub.location": tub_location, }) self.config = config_from_string( self.basedir.asTextMode().path, "tub.port", "[node]\n" "{node_config}\n" "\n" "[client]\n" "{plugins}\n" "{plugin_config_section}\n" .format( plugins=plugins, node_config=format_config_items(node_config), plugin_config_section=plugin_config_section, ) ) def create_node(self): return create_client_from_config( self.config, _introducer_factory=MemoryIntroducerClient, ) def cleanUp(self): self.assigner.tearDown() def getDetails(self): return {} @implementer(IPlugin, IStreamServerEndpointStringParser) class AdoptedServerPort(object): """ Parse an ``adopt-socket:`` endpoint description by adopting ``fd`` as a listening TCP port. """ prefix = "adopt-socket" def parseStreamServer(self, reactor, fd): # type: ignore # https://twistedmatrix.com/trac/ticket/10134 log.msg("Adopting {}".format(fd)) # AdoptedStreamServerEndpoint wants to own the file descriptor. It # will duplicate it and then close the one we pass in. This means it # is really only possible to adopt a particular file descriptor once. # # This wouldn't matter except one of the tests wants to stop one of # the nodes and start it up again. This results in exactly an attempt # to adopt a particular file descriptor twice. # # So we'll dup it ourselves. AdoptedStreamServerEndpoint can do # whatever it wants to the result - the original will still be valid # and reusable. return AdoptedStreamServerEndpoint(reactor, os.dup(int(fd)), AF_INET) def really_bind(s, addr): # Arbitrarily decide we'll try 100 times. We don't want to try forever in # case this is a persistent problem. Trying is cheap, though, so we may # as well try a lot. Hopefully the OS isn't so bad at allocating a port # for us that it takes more than 2 iterations. for i in range(100): try: s.bind(addr) except socket_error as e: if e.errno == EADDRINUSE: continue raise else: return raise Exception("Many bind attempts failed with EADDRINUSE") class SameProcessStreamEndpointAssigner(object): """ A fixture which can assign streaming server endpoints for use *in this process only*. An effort is made to avoid address collisions for this port but the logic for doing so is platform-dependent (sorry, Windows). This is more reliable than trying to listen on a hard-coded non-zero port number. It is at least as reliable as trying to listen on port number zero on Windows and more reliable than doing that on other platforms. """ def setUp(self): self._cleanups = [] # Make sure the `adopt-socket` endpoint is recognized. We do this # instead of providing a dropin because we don't want to make this # endpoint available to random other applications. f = UseTestPlugins() f.setUp() self._cleanups.append(f.cleanUp) def tearDown(self): for c in self._cleanups: c() def assign(self, reactor): """ Make a new streaming server endpoint and return its string description. This is intended to help write config files that will then be read and used in this process. :param reactor: The reactor which will be used to listen with the resulting endpoint. If it provides ``IReactorSocket`` then resulting reliability will be extremely high. If it doesn't, resulting reliability will be pretty alright. :return: A two-tuple of (location hint, port endpoint description) as strings. """ if sys.platform != "win32" and IReactorSocket.providedBy(reactor): # On this platform, we can reliable pre-allocate a listening port. # Once it is bound we know it will not fail later with EADDRINUSE. s = socket(AF_INET, SOCK_STREAM) # We need to keep ``s`` alive as long as the file descriptor we put in # this string might still be used. We could dup() the descriptor # instead but then we've only inverted the cleanup problem: gone from # don't-close-too-soon to close-just-late-enough. So we'll leave # ``s`` alive and use it as the cleanup mechanism. self._cleanups.append(s.close) s.setblocking(False) really_bind(s, ("127.0.0.1", 0)) s.listen(SOMAXCONN) host, port = s.getsockname() location_hint = "tcp:%s:%d" % (host, port) port_endpoint = "adopt-socket:fd=%d" % (s.fileno(),) else: # On other platforms, we blindly guess and hope we get lucky. portnum = iputil.allocate_tcp_port() location_hint = "tcp:127.0.0.1:%d" % (portnum,) port_endpoint = "tcp:%d:interface=127.0.0.1" % (portnum,) return location_hint, port_endpoint @implementer(IPullProducer) class DummyProducer(object): def resumeProducing(self): pass def stopProducing(self): pass @implementer(IImmutableFileNode) class FakeCHKFileNode(object): # type: ignore # incomplete implementation """I provide IImmutableFileNode, but all of my data is stored in a class-level dictionary.""" def __init__(self, filecap, all_contents): precondition(isinstance(filecap, (uri.CHKFileURI, uri.LiteralFileURI)), filecap) self.all_contents = all_contents self.my_uri = filecap self.storage_index = self.my_uri.get_storage_index() def get_uri(self): return self.my_uri.to_string() def get_write_uri(self): return None def get_readonly_uri(self): return self.my_uri.to_string() def get_cap(self): return self.my_uri def get_verify_cap(self): return self.my_uri.get_verify_cap() def get_repair_cap(self): return self.my_uri.get_verify_cap() def get_storage_index(self): return self.storage_index def check(self, monitor, verify=False, add_lease=False): s = StubServer(b"\x00"*20) r = CheckResults(self.my_uri, self.storage_index, healthy=True, recoverable=True, count_happiness=10, count_shares_needed=3, count_shares_expected=10, count_shares_good=10, count_good_share_hosts=10, count_recoverable_versions=1, count_unrecoverable_versions=0, servers_responding=[s], sharemap={1: [s]}, count_wrong_shares=0, list_corrupt_shares=[], count_corrupt_shares=0, list_incompatible_shares=[], count_incompatible_shares=0, summary="", report=[], share_problems=[], servermap=None) return defer.succeed(r) def check_and_repair(self, monitor, verify=False, add_lease=False): d = self.check(verify) def _got(cr): r = CheckAndRepairResults(self.storage_index) r.pre_repair_results = r.post_repair_results = cr return r d.addCallback(_got) return d def is_mutable(self): return False def is_readonly(self): return True def is_unknown(self): return False def is_allowed_in_immutable_directory(self): return True def raise_error(self): pass def get_size(self): if isinstance(self.my_uri, uri.LiteralFileURI): return self.my_uri.get_size() try: data = self.all_contents[self.my_uri.to_string()] except KeyError as le: raise NotEnoughSharesError(le, 0, 3) return len(data) def get_current_size(self): return defer.succeed(self.get_size()) def read(self, consumer, offset=0, size=None): # we don't bother to call registerProducer/unregisterProducer, # because it's a hassle to write a dummy Producer that does the right # thing (we have to make sure that DummyProducer.resumeProducing # writes the data into the consumer immediately, otherwise it will # loop forever). d = defer.succeed(None) d.addCallback(self._read, consumer, offset, size) return d def _read(self, ignored, consumer, offset, size): if isinstance(self.my_uri, uri.LiteralFileURI): data = self.my_uri.data else: if self.my_uri.to_string() not in self.all_contents: raise NotEnoughSharesError(None, 0, 3) data = self.all_contents[self.my_uri.to_string()] start = offset if size is not None: end = offset + size else: end = len(data) consumer.write(data[start:end]) return consumer def get_best_readable_version(self): return defer.succeed(self) def download_to_data(self): return download_to_data(self) download_best_version = download_to_data def get_size_of_best_version(self): return defer.succeed(self.get_size) def make_chk_file_cap(size): return uri.CHKFileURI(key=os.urandom(16), uri_extension_hash=os.urandom(32), needed_shares=3, total_shares=10, size=size) def make_chk_file_uri(size): return make_chk_file_cap(size).to_string() def create_chk_filenode(contents, all_contents): filecap = make_chk_file_cap(len(contents)) n = FakeCHKFileNode(filecap, all_contents) all_contents[filecap.to_string()] = contents return n @implementer(IMutableFileNode, ICheckable) class FakeMutableFileNode(object): # type: ignore # incomplete implementation """I provide IMutableFileNode, but all of my data is stored in a class-level dictionary.""" MUTABLE_SIZELIMIT = 10000 _public_key: rsa.PublicKey | None _private_key: rsa.PrivateKey | None def __init__(self, storage_broker, secret_holder, default_encoding_parameters, history, all_contents, keypair: tuple[rsa.PublicKey, rsa.PrivateKey] | None ): self.all_contents = all_contents self.file_types: dict[bytes, int] = {} # storage index => MDMF_VERSION or SDMF_VERSION self.init_from_cap(make_mutable_file_cap(keypair)) self._k = default_encoding_parameters['k'] self._segsize = default_encoding_parameters['max_segment_size'] if keypair is None: self._public_key = self._private_key = None else: self._public_key, self._private_key = keypair def create(self, contents, version=SDMF_VERSION): if version == MDMF_VERSION and \ isinstance(self.my_uri, (uri.ReadonlySSKFileURI, uri.WriteableSSKFileURI)): self.init_from_cap(make_mdmf_mutable_file_cap()) self.file_types[self.storage_index] = version initial_contents = self._get_initial_contents(contents) data = initial_contents.read(initial_contents.get_size()) data = b"".join(data) self.all_contents[self.storage_index] = data return defer.succeed(self) def _get_initial_contents(self, contents): if contents is None: return MutableData(b"") if IMutableUploadable.providedBy(contents): return contents assert callable(contents), "%s should be callable, not %s" % \ (contents, type(contents)) return contents(self) def init_from_cap(self, filecap): assert isinstance(filecap, (uri.WriteableSSKFileURI, uri.ReadonlySSKFileURI, uri.WriteableMDMFFileURI, uri.ReadonlyMDMFFileURI)) self.my_uri = filecap self.storage_index = self.my_uri.get_storage_index() if isinstance(filecap, (uri.WriteableMDMFFileURI, uri.ReadonlyMDMFFileURI)): self.file_types[self.storage_index] = MDMF_VERSION else: self.file_types[self.storage_index] = SDMF_VERSION return self def get_cap(self): return self.my_uri def get_readcap(self): return self.my_uri.get_readonly() def get_uri(self): return self.my_uri.to_string() def get_write_uri(self): if self.is_readonly(): return None return self.my_uri.to_string() def get_readonly(self): return self.my_uri.get_readonly() def get_readonly_uri(self): return self.my_uri.get_readonly().to_string() def get_verify_cap(self): return self.my_uri.get_verify_cap() def get_repair_cap(self): if self.my_uri.is_readonly(): return None return self.my_uri def is_readonly(self): return self.my_uri.is_readonly() def is_mutable(self): return self.my_uri.is_mutable() def is_unknown(self): return False def is_allowed_in_immutable_directory(self): return not self.my_uri.is_mutable() def raise_error(self): pass def get_writekey(self): return b"\x00"*16 def get_size(self): return len(self.all_contents[self.storage_index]) def get_current_size(self): return self.get_size_of_best_version() def get_size_of_best_version(self): return defer.succeed(len(self.all_contents[self.storage_index])) def get_storage_index(self): return self.storage_index def get_servermap(self, mode): return defer.succeed(None) def get_version(self): assert self.storage_index in self.file_types return self.file_types[self.storage_index] def check(self, monitor, verify=False, add_lease=False): s = StubServer(b"\x00"*20) r = CheckResults(self.my_uri, self.storage_index, healthy=True, recoverable=True, count_happiness=10, count_shares_needed=3, count_shares_expected=10, count_shares_good=10, count_good_share_hosts=10, count_recoverable_versions=1, count_unrecoverable_versions=0, servers_responding=[s], sharemap={b"seq1-abcd-sh0": [s]}, count_wrong_shares=0, list_corrupt_shares=[], count_corrupt_shares=0, list_incompatible_shares=[], count_incompatible_shares=0, summary="", report=[], share_problems=[], servermap=None) return defer.succeed(r) def check_and_repair(self, monitor, verify=False, add_lease=False): d = self.check(verify) def _got(cr): r = CheckAndRepairResults(self.storage_index) r.pre_repair_results = r.post_repair_results = cr return r d.addCallback(_got) return d def deep_check(self, verify=False, add_lease=False): d = self.check(verify) def _done(r): dr = DeepCheckResults(self.storage_index) dr.add_check(r, []) return dr d.addCallback(_done) return d def deep_check_and_repair(self, verify=False, add_lease=False): d = self.check_and_repair(verify) def _done(r): dr = DeepCheckAndRepairResults(self.storage_index) dr.add_check(r, []) return dr d.addCallback(_done) return d def download_best_version(self): return defer.succeed(self._download_best_version()) def _download_best_version(self, ignored=None): if isinstance(self.my_uri, uri.LiteralFileURI): return self.my_uri.data if self.storage_index not in self.all_contents: raise NotEnoughSharesError(None, 0, 3) return self.all_contents[self.storage_index] def overwrite(self, new_contents): assert not self.is_readonly() new_data = new_contents.read(new_contents.get_size()) new_data = b"".join(new_data) self.all_contents[self.storage_index] = new_data return defer.succeed(None) def modify(self, modifier): # this does not implement FileTooLargeError, but the real one does return defer.maybeDeferred(self._modify, modifier) def _modify(self, modifier): assert not self.is_readonly() old_contents = self.all_contents[self.storage_index] new_data = modifier(old_contents, None, True) self.all_contents[self.storage_index] = new_data return None # As actually implemented, MutableFilenode and MutableFileVersion # are distinct. However, nothing in the webapi uses (yet) that # distinction -- it just uses the unified download interface # provided by get_best_readable_version and read. When we start # doing cooler things like LDMF, we will want to revise this code to # be less simplistic. def get_best_readable_version(self): return defer.succeed(self) def get_best_mutable_version(self): return defer.succeed(self) # Ditto for this, which is an implementation of IWriteable. # XXX: Declare that the same is implemented. def update(self, data, offset): assert not self.is_readonly() def modifier(old, servermap, first_time): new = old[:offset] + b"".join(data.read(data.get_size())) new += old[len(new):] return new return self.modify(modifier) def read(self, consumer, offset=0, size=None): data = self._download_best_version() if size: data = data[offset:offset+size] consumer.write(data) return defer.succeed(consumer) def make_mutable_file_cap( keypair: tuple[rsa.PublicKey, rsa.PrivateKey] | None = None, ) -> uri.WriteableSSKFileURI: """ Create a local representation of a mutable object. :param keypair: If None, a random keypair will be generated for the new object. Otherwise, this is the keypair for that object. """ if keypair is None: writekey = os.urandom(16) fingerprint = os.urandom(32) else: pubkey, privkey = keypair pubkey_s = rsa.der_string_from_verifying_key(pubkey) privkey_s = rsa.der_string_from_signing_key(privkey) writekey = hashutil.ssk_writekey_hash(privkey_s) fingerprint = hashutil.ssk_pubkey_fingerprint_hash(pubkey_s) return uri.WriteableSSKFileURI( writekey=writekey, fingerprint=fingerprint, ) def make_mdmf_mutable_file_cap(): return uri.WriteableMDMFFileURI(writekey=os.urandom(16), fingerprint=os.urandom(32)) def make_mutable_file_uri(mdmf=False): if mdmf: uri = make_mdmf_mutable_file_cap() else: uri = make_mutable_file_cap() return uri.to_string() def make_verifier_uri(): return uri.SSKVerifierURI(storage_index=os.urandom(16), fingerprint=os.urandom(32)).to_string() def create_mutable_filenode(contents, mdmf=False, all_contents=None): # XXX: All of these arguments are kind of stupid. if mdmf: cap = make_mdmf_mutable_file_cap() else: cap = make_mutable_file_cap() encoding_params = {} encoding_params['k'] = 3 encoding_params['max_segment_size'] = 128*1024 filenode = FakeMutableFileNode(None, None, encoding_params, None, all_contents, None) filenode.init_from_cap(cap) if mdmf: filenode.create(MutableData(contents), version=MDMF_VERSION) else: filenode.create(MutableData(contents), version=SDMF_VERSION) return filenode class LoggingServiceParent(service.MultiService): def log(self, *args, **kwargs): return log.msg(*args, **kwargs) TEST_DATA=b"\x02"*(Uploader.URI_LIT_SIZE_THRESHOLD+1) class WebErrorMixin(object): def explain_web_error(self, f): # an error on the server side causes the client-side getPage() to # return a failure(t.web.error.Error), and its str() doesn't show the # response body, which is where the useful information lives. Attach # this method as an errback handler, and it will reveal the hidden # message. f.trap(WebError) print("Web Error:", f.value, ":", f.value.response) return f def _shouldHTTPError(self, res, which, validator): if isinstance(res, failure.Failure): res.trap(WebError) return validator(res) else: self.fail("%s was supposed to Error, not get '%s'" % (which, res)) def shouldHTTPError(self, which, code=None, substring=None, response_substring=None, callable=None, *args, **kwargs): # returns a Deferred with the response body if isinstance(substring, bytes): substring = str(substring, "ascii") if isinstance(response_substring, str): response_substring = response_substring.encode("ascii") assert substring is None or isinstance(substring, str) assert response_substring is None or isinstance(response_substring, bytes) assert callable def _validate(f): if code is not None: self.failUnlessEqual(f.value.status, b"%d" % code, which) if substring: code_string = str(f) self.failUnless(substring in code_string, "%s: substring '%s' not in '%s'" % (which, substring, code_string)) response_body = f.value.response if response_substring: self.failUnless(response_substring in response_body, "%r: response substring %r not in %r" % (which, response_substring, response_body)) return response_body d = defer.maybeDeferred(callable, *args, **kwargs) d.addBoth(self._shouldHTTPError, which, _validate) return d @inlineCallbacks def assertHTTPError(self, url, code, response_substring, method="get", persistent=False, **args): response = yield treq.request(method, url, persistent=persistent, **args) body = yield response.content() self.assertEquals(response.code, code) if response_substring is not None: if isinstance(response_substring, str): response_substring = response_substring.encode("utf-8") self.assertIn(response_substring, body) returnValue(body) class ErrorMixin(WebErrorMixin): def explain_error(self, f): if f.check(defer.FirstError): print("First Error:", f.value.subFailure) return f def corrupt_field(data, offset, size, debug=False): if random.random() < 0.5: newdata = testutil.flip_one_bit(data, offset, size) if debug: log.msg("testing: corrupting offset %d, size %d flipping one bit orig: %r, newdata: %r" % (offset, size, data[offset:offset+size], newdata[offset:offset+size])) return newdata else: newval = testutil.insecurerandstr(size) if debug: log.msg("testing: corrupting offset %d, size %d randomizing field, orig: %r, newval: %r" % (offset, size, data[offset:offset+size], newval)) return data[:offset]+newval+data[offset+size:] def _corrupt_nothing(data, debug=False): """Leave the data pristine. """ return data def _corrupt_file_version_number(data, debug=False): """Scramble the file data -- the share file version number have one bit flipped or else will be changed to a random value.""" return corrupt_field(data, 0x00, 4) def _corrupt_size_of_file_data(data, debug=False): """Scramble the file data -- the field showing the size of the share data within the file will be set to one smaller.""" return corrupt_field(data, 0x04, 4) def _corrupt_sharedata_version_number(data, debug=False): """Scramble the file data -- the share data version number will have one bit flipped or else will be changed to a random value, but not 1 or 2.""" return corrupt_field(data, 0x0c, 4) sharevernum = struct.unpack(">L", data[0x0c:0x0c+4])[0] assert sharevernum in (1, 2), "This test is designed to corrupt immutable shares of v1 or v2 in specific ways." newsharevernum = sharevernum while newsharevernum in (1, 2): newsharevernum = random.randrange(0, 2**32) newsharevernumbytes = struct.pack(">L", newsharevernum) return data[:0x0c] + newsharevernumbytes + data[0x0c+4:] def _corrupt_sharedata_version_number_to_plausible_version(data, debug=False): """Scramble the file data -- the share data version number will be changed to 2 if it is 1 or else to 1 if it is 2.""" sharevernum = struct.unpack(">L", data[0x0c:0x0c+4])[0] assert sharevernum in (1, 2), "This test is designed to corrupt immutable shares of v1 or v2 in specific ways." if sharevernum == 1: newsharevernum = 2 else: newsharevernum = 1 newsharevernumbytes = struct.pack(">L", newsharevernum) return data[:0x0c] + newsharevernumbytes + data[0x0c+4:] def _corrupt_segment_size(data, debug=False): """Scramble the file data -- the field showing the size of the segment will have one bit flipped or else be changed to a random value.""" sharevernum = struct.unpack(">L", data[0x0c:0x0c+4])[0] assert sharevernum in (1, 2), "This test is designed to corrupt immutable shares of v1 or v2 in specific ways." if sharevernum == 1: return corrupt_field(data, 0x0c+0x04, 4, debug=False) else: return corrupt_field(data, 0x0c+0x04, 8, debug=False) def _corrupt_size_of_sharedata(data, debug=False): """Scramble the file data -- the field showing the size of the data within the share data will have one bit flipped or else will be changed to a random value.""" sharevernum = struct.unpack(">L", data[0x0c:0x0c+4])[0] assert sharevernum in (1, 2), "This test is designed to corrupt immutable shares of v1 or v2 in specific ways." if sharevernum == 1: return corrupt_field(data, 0x0c+0x08, 4) else: return corrupt_field(data, 0x0c+0x0c, 8) def _corrupt_offset_of_sharedata(data, debug=False): """Scramble the file data -- the field showing the offset of the data within the share data will have one bit flipped or else be changed to a random value.""" sharevernum = struct.unpack(">L", data[0x0c:0x0c+4])[0] assert sharevernum in (1, 2), "This test is designed to corrupt immutable shares of v1 or v2 in specific ways." if sharevernum == 1: return corrupt_field(data, 0x0c+0x0c, 4) else: return corrupt_field(data, 0x0c+0x14, 8) def _corrupt_offset_of_ciphertext_hash_tree(data, debug=False): """Scramble the file data -- the field showing the offset of the ciphertext hash tree within the share data will have one bit flipped or else be changed to a random value. """ sharevernum = struct.unpack(">L", data[0x0c:0x0c+4])[0] assert sharevernum in (1, 2), "This test is designed to corrupt immutable shares of v1 or v2 in specific ways." if sharevernum == 1: return corrupt_field(data, 0x0c+0x14, 4, debug=False) else: return corrupt_field(data, 0x0c+0x24, 8, debug=False) def _corrupt_offset_of_block_hashes(data, debug=False): """Scramble the file data -- the field showing the offset of the block hash tree within the share data will have one bit flipped or else will be changed to a random value.""" sharevernum = struct.unpack(">L", data[0x0c:0x0c+4])[0] assert sharevernum in (1, 2), "This test is designed to corrupt immutable shares of v1 or v2 in specific ways." if sharevernum == 1: return corrupt_field(data, 0x0c+0x18, 4) else: return corrupt_field(data, 0x0c+0x2c, 8) def _corrupt_offset_of_block_hashes_to_truncate_crypttext_hashes(data, debug=False): """Scramble the file data -- the field showing the offset of the block hash tree within the share data will have a multiple of hash size subtracted from it, thus causing the downloader to download an incomplete crypttext hash tree.""" sharevernum = struct.unpack(">L", data[0x0c:0x0c+4])[0] assert sharevernum in (1, 2), "This test is designed to corrupt immutable shares of v1 or v2 in specific ways." if sharevernum == 1: curval = struct.unpack(">L", data[0x0c+0x18:0x0c+0x18+4])[0] newval = random.randrange(0, max(1, (curval//hashutil.CRYPTO_VAL_SIZE)//2))*hashutil.CRYPTO_VAL_SIZE newvalstr = struct.pack(">L", newval) return data[:0x0c+0x18]+newvalstr+data[0x0c+0x18+4:] else: curval = struct.unpack(">Q", data[0x0c+0x2c:0x0c+0x2c+8])[0] newval = random.randrange(0, max(1, (curval//hashutil.CRYPTO_VAL_SIZE)//2))*hashutil.CRYPTO_VAL_SIZE newvalstr = struct.pack(">Q", newval) return data[:0x0c+0x2c]+newvalstr+data[0x0c+0x2c+8:] def _corrupt_offset_of_share_hashes(data, debug=False): """Scramble the file data -- the field showing the offset of the share hash tree within the share data will have one bit flipped or else will be changed to a random value.""" sharevernum = struct.unpack(">L", data[0x0c:0x0c+4])[0] assert sharevernum in (1, 2), "This test is designed to corrupt immutable shares of v1 or v2 in specific ways." if sharevernum == 1: return corrupt_field(data, 0x0c+0x1c, 4) else: return corrupt_field(data, 0x0c+0x34, 8) def _corrupt_offset_of_uri_extension(data, debug=False): """Scramble the file data -- the field showing the offset of the uri extension will have one bit flipped or else will be changed to a random value.""" sharevernum = struct.unpack(">L", data[0x0c:0x0c+4])[0] assert sharevernum in (1, 2), "This test is designed to corrupt immutable shares of v1 or v2 in specific ways." if sharevernum == 1: return corrupt_field(data, 0x0c+0x20, 4) else: return corrupt_field(data, 0x0c+0x3c, 8) def _corrupt_offset_of_uri_extension_to_force_short_read(data, debug=False): """Scramble the file data -- the field showing the offset of the uri extension will be set to the size of the file minus 3. This means when the client tries to read the length field from that location it will get a short read -- the result string will be only 3 bytes long, not the 4 or 8 bytes necessary to do a successful struct.unpack.""" sharevernum = struct.unpack(">L", data[0x0c:0x0c+4])[0] assert sharevernum in (1, 2), "This test is designed to corrupt immutable shares of v1 or v2 in specific ways." # The "-0x0c" in here is to skip the server-side header in the share # file, which the client doesn't see when seeking and reading. if sharevernum == 1: if debug: log.msg("testing: corrupting offset %d, size %d, changing %d to %d (len(data) == %d)" % (0x2c, 4, struct.unpack(">L", data[0x2c:0x2c+4])[0], len(data)-0x0c-3, len(data))) return data[:0x2c] + struct.pack(">L", len(data)-0x0c-3) + data[0x2c+4:] else: if debug: log.msg("testing: corrupting offset %d, size %d, changing %d to %d (len(data) == %d)" % (0x48, 8, struct.unpack(">Q", data[0x48:0x48+8])[0], len(data)-0x0c-3, len(data))) return data[:0x48] + struct.pack(">Q", len(data)-0x0c-3) + data[0x48+8:] def _corrupt_mutable_share_data(data, debug=False): prefix = data[:32] assert MutableShareFile.is_valid_header(prefix), "This function is designed to corrupt mutable shares of v1, and the magic number doesn't look right: %r vs %r" % (prefix, MutableShareFile.MAGIC) data_offset = MutableShareFile.DATA_OFFSET sharetype = data[data_offset:data_offset+1] assert sharetype == b"\x00", "non-SDMF mutable shares not supported" (version, ig_seqnum, ig_roothash, ig_IV, ig_k, ig_N, ig_segsize, ig_datalen, offsets) = unpack_header(data[data_offset:]) assert version == 0, "this function only handles v0 SDMF files" start = data_offset + offsets["share_data"] length = data_offset + offsets["enc_privkey"] - start return corrupt_field(data, start, length) def _corrupt_share_data(data, debug=False): """Scramble the file data -- the field containing the share data itself will have one bit flipped or else will be changed to a random value.""" sharevernum = struct.unpack(">L", data[0x0c:0x0c+4])[0] assert sharevernum in (1, 2), "This test is designed to corrupt immutable shares of v1 or v2 in specific ways, not v%d." % sharevernum if sharevernum == 1: sharedatasize = struct.unpack(">L", data[0x0c+0x08:0x0c+0x08+4])[0] return corrupt_field(data, 0x0c+0x24, sharedatasize) else: sharedatasize = struct.unpack(">Q", data[0x0c+0x08:0x0c+0x0c+8])[0] return corrupt_field(data, 0x0c+0x44, sharedatasize) def _corrupt_share_data_last_byte(data, debug=False): """Scramble the file data -- flip all bits of the last byte.""" sharevernum = struct.unpack(">L", data[0x0c:0x0c+4])[0] assert sharevernum in (1, 2), "This test is designed to corrupt immutable shares of v1 or v2 in specific ways, not v%d." % sharevernum if sharevernum == 1: sharedatasize = struct.unpack(">L", data[0x0c+0x08:0x0c+0x08+4])[0] offset = 0x0c+0x24+sharedatasize-1 else: sharedatasize = struct.unpack(">Q", data[0x0c+0x08:0x0c+0x0c+8])[0] offset = 0x0c+0x44+sharedatasize-1 newdata = data[:offset] + byteschr(ord(data[offset:offset+1])^0xFF) + data[offset+1:] if debug: log.msg("testing: flipping all bits of byte at offset %d: %r, newdata: %r" % (offset, data[offset], newdata[offset])) return newdata def _corrupt_crypttext_hash_tree(data, debug=False): """Scramble the file data -- the field containing the crypttext hash tree will have one bit flipped or else will be changed to a random value. """ sharevernum = struct.unpack(">L", data[0x0c:0x0c+4])[0] assert sharevernum in (1, 2), "This test is designed to corrupt immutable shares of v1 or v2 in specific ways." if sharevernum == 1: crypttexthashtreeoffset = struct.unpack(">L", data[0x0c+0x14:0x0c+0x14+4])[0] blockhashesoffset = struct.unpack(">L", data[0x0c+0x18:0x0c+0x18+4])[0] else: crypttexthashtreeoffset = struct.unpack(">Q", data[0x0c+0x24:0x0c+0x24+8])[0] blockhashesoffset = struct.unpack(">Q", data[0x0c+0x2c:0x0c+0x2c+8])[0] return corrupt_field(data, 0x0c+crypttexthashtreeoffset, blockhashesoffset-crypttexthashtreeoffset, debug=debug) def _corrupt_crypttext_hash_tree_byte_x221(data, debug=False): """Scramble the file data -- the byte at offset 0x221 will have its 7th (b1) bit flipped. """ sharevernum = struct.unpack(">L", data[0x0c:0x0c+4])[0] assert sharevernum in (1, 2), "This test is designed to corrupt immutable shares of v1 or v2 in specific ways." if debug: log.msg("original data: %r" % (data,)) return data[:0x0c+0x221] + byteschr(ord(data[0x0c+0x221:0x0c+0x221+1])^0x02) + data[0x0c+0x2210+1:] def _corrupt_block_hashes(data, debug=False): """Scramble the file data -- the field containing the block hash tree will have one bit flipped or else will be changed to a random value. """ sharevernum = struct.unpack(">L", data[0x0c:0x0c+4])[0] assert sharevernum in (1, 2), "This test is designed to corrupt immutable shares of v1 or v2 in specific ways." if sharevernum == 1: blockhashesoffset = struct.unpack(">L", data[0x0c+0x18:0x0c+0x18+4])[0] sharehashesoffset = struct.unpack(">L", data[0x0c+0x1c:0x0c+0x1c+4])[0] else: blockhashesoffset = struct.unpack(">Q", data[0x0c+0x2c:0x0c+0x2c+8])[0] sharehashesoffset = struct.unpack(">Q", data[0x0c+0x34:0x0c+0x34+8])[0] return corrupt_field(data, 0x0c+blockhashesoffset, sharehashesoffset-blockhashesoffset) def _corrupt_share_hashes(data, debug=False): """Scramble the file data -- the field containing the share hash chain will have one bit flipped or else will be changed to a random value. """ sharevernum = struct.unpack(">L", data[0x0c:0x0c+4])[0] assert sharevernum in (1, 2), "This test is designed to corrupt immutable shares of v1 or v2 in specific ways." if sharevernum == 1: sharehashesoffset = struct.unpack(">L", data[0x0c+0x1c:0x0c+0x1c+4])[0] uriextoffset = struct.unpack(">L", data[0x0c+0x20:0x0c+0x20+4])[0] else: sharehashesoffset = struct.unpack(">Q", data[0x0c+0x34:0x0c+0x34+8])[0] uriextoffset = struct.unpack(">Q", data[0x0c+0x3c:0x0c+0x3c+8])[0] return corrupt_field(data, 0x0c+sharehashesoffset, uriextoffset-sharehashesoffset) def _corrupt_length_of_uri_extension(data, debug=False): """Scramble the file data -- the field showing the length of the uri extension will have one bit flipped or else will be changed to a random value.""" sharevernum = struct.unpack(">L", data[0x0c:0x0c+4])[0] assert sharevernum in (1, 2), "This test is designed to corrupt immutable shares of v1 or v2 in specific ways." if sharevernum == 1: uriextoffset = struct.unpack(">L", data[0x0c+0x20:0x0c+0x20+4])[0] return corrupt_field(data, uriextoffset, 4) else: uriextoffset = struct.unpack(">Q", data[0x0c+0x3c:0x0c+0x3c+8])[0] return corrupt_field(data, 0x0c+uriextoffset, 8) def _corrupt_uri_extension(data, debug=False): """Scramble the file data -- the field containing the uri extension will have one bit flipped or else will be changed to a random value.""" sharevernum = struct.unpack(">L", data[0x0c:0x0c+4])[0] assert sharevernum in (1, 2), "This test is designed to corrupt immutable shares of v1 or v2 in specific ways." if sharevernum == 1: uriextoffset = struct.unpack(">L", data[0x0c+0x20:0x0c+0x20+4])[0] uriextlen = struct.unpack(">L", data[0x0c+uriextoffset:0x0c+uriextoffset+4])[0] else: uriextoffset = struct.unpack(">Q", data[0x0c+0x3c:0x0c+0x3c+8])[0] uriextlen = struct.unpack(">Q", data[0x0c+uriextoffset:0x0c+uriextoffset+8])[0] return corrupt_field(data, 0x0c+uriextoffset, uriextlen) @attr.s @implementer(IAddressFamily) class ConstantAddresses(object): """ Pretend to provide support for some address family but just hand out canned responses. """ _listener = attr.ib(default=None) _handler = attr.ib(default=None) def get_listener(self): if self._listener is None: raise Exception("{!r} has no listener.") return self._listener def get_client_endpoint(self): if self._handler is None: raise Exception("{!r} has no client endpoint.") return self._handler @contextmanager def disable_modules(*names): """ A context manager which makes modules appear to be missing while it is active. :param *names: The names of the modules to disappear. Only top-level modules are supported (that is, "." is not allowed in any names). This is an implementation shortcoming which could be lifted if desired. """ if any("." in name for name in names): raise ValueError("Names containing '.' are not supported.") missing = object() modules = list(sys.modules.get(n, missing) for n in names) for n in names: sys.modules[n] = None yield for n, original in zip(names, modules): if original is missing: del sys.modules[n] else: sys.modules[n] = original class _TestCaseMixin(object): """ A mixin for ``TestCase`` which collects helpful behaviors for subclasses. Those behaviors are: * All of the features of testtools TestCase. * Each test method will be run in a unique Eliot action context which identifies the test and collects all Eliot log messages emitted by that test (including setUp and tearDown messages). * trial-compatible mktemp method * unittest2-compatible assertRaises helper * Automatic cleanup of tempfile.tempdir mutation (once pervasive through the Tahoe-LAFS test suite, perhaps gone now but someone should verify this). """ def setUp(self): # Restore the original temporary directory. Node ``init_tempdir`` # mangles it and many tests manage to get that method called. self.addCleanup( partial(setattr, tempfile, "tempdir", tempfile.tempdir), ) return super(_TestCaseMixin, self).setUp() class _DummyCase(_case.TestCase): def dummy(self): pass _dummyCase = _DummyCase("dummy") def mktemp(self): return mktemp() def assertRaises(self, *a, **kw): return self._dummyCase.assertRaises(*a, **kw) def failUnless(self, *args, **kwargs): """Backwards compatibility method.""" self.assertTrue(*args, **kwargs) def failIf(self, *args, **kwargs): """Backwards compatibility method.""" self.assertFalse(*args, **kwargs) def failIfEqual(self, *args, **kwargs): """Backwards compatibility method.""" self.assertNotEqual(*args, **kwargs) def failUnlessEqual(self, *args, **kwargs): """Backwards compatibility method.""" self.assertEqual(*args, **kwargs) def failUnlessReallyEqual(self, *args, **kwargs): """Backwards compatibility method.""" self.assertReallyEqual(*args, **kwargs) class SyncTestCase(_TestCaseMixin, TestCase): """ A ``TestCase`` which can run tests that may return an already-fired ``Deferred``. """ run_tests_with = EliotLoggedRunTest.make_factory( SynchronousDeferredRunTest, ) class AsyncTestCase(_TestCaseMixin, TestCase): """ A ``TestCase`` which can run tests that may return a Deferred that will only fire if the global reactor is running. """ run_tests_with = EliotLoggedRunTest.make_factory( AsynchronousDeferredRunTest.make_factory(timeout=60.0), ) class AsyncBrokenTestCase(_TestCaseMixin, TestCase): """ A ``TestCase`` like ``AsyncTestCase`` but which spins the reactor a little longer than apparently necessary to clean out lingering unaccounted for event sources. Tests which require this behavior are broken and should be fixed so they pass with ``AsyncTestCase``. """ run_tests_with = EliotLoggedRunTest.make_factory( AsynchronousDeferredRunTestForBrokenTwisted.make_factory(timeout=60.0), ) class TrialTestCase(_TrialTestCase): """ A twisted.trial.unittest.TestCaes with Tahoe required fixes applied. Currently these are: - ensure that .fail() passes a bytes msg on Python2 """ def fail(self, msg): """ Ensure our msg is a native string on Python2. If it was Unicode, we encode it as utf8 and hope for the best. On Python3 we take no action. This is necessary because Twisted passes the 'msg' argument along to the constructor of an exception; on Python2, Exception will accept a `unicode` instance but will fail if you try to turn that Exception instance into a string. """ return super(TrialTestCase, self).fail(msg) tahoe_lafs-1.20.0/src/allmydata/test/common_storage.py0000644000000000000000000000357113615410400017731 0ustar00 def upload_immutable(storage_server, storage_index, renew_secret, cancel_secret, shares): """ Synchronously upload some immutable shares to a ``StorageServer``. :param allmydata.storage.server.StorageServer storage_server: The storage server object to use to perform the upload. :param bytes storage_index: The storage index for the immutable shares. :param bytes renew_secret: The renew secret for the implicitly created lease. :param bytes cancel_secret: The cancel secret for the implicitly created lease. :param dict[int, bytes] shares: A mapping from share numbers to share data to upload. The data for all shares must be of the same length. :return: ``None`` """ already, writers = storage_server.allocate_buckets( storage_index, renew_secret, cancel_secret, shares.keys(), len(next(iter(shares.values()))), ) for shnum, writer in writers.items(): writer.write(0, shares[shnum]) writer.close() def upload_mutable(storage_server, storage_index, secrets, shares): """ Synchronously upload some mutable shares to a ``StorageServer``. :param allmydata.storage.server.StorageServer storage_server: The storage server object to use to perform the upload. :param bytes storage_index: The storage index for the immutable shares. :param secrets: A three-tuple of a write enabler, renew secret, and cancel secret. :param dict[int, bytes] shares: A mapping from share numbers to share data to upload. :return: ``None`` """ test_and_write_vectors = { sharenum: ([], [(0, data)], None) for sharenum, data in shares.items() } read_vector = [] storage_server.slot_testv_and_readv_and_writev( storage_index, secrets, test_and_write_vectors, read_vector, ) tahoe_lafs-1.20.0/src/allmydata/test/common_system.py0000644000000000000000000013733213615410400017614 0ustar00""" Test infrastructure for integration-y tests that run actual nodes, like those in ``allmydata.test.test_system``. Ported to Python 3. """ from typing import Optional import os from functools import partial from twisted.internet import reactor from twisted.internet import defer from twisted.internet.defer import inlineCallbacks from twisted.internet.task import deferLater from twisted.application import service from foolscap.api import flushEventualQueue from allmydata import client from allmydata.introducer.server import create_introducer from allmydata.util import fileutil, log, pollmixin from allmydata.util.deferredutil import async_to_deferred from allmydata.storage import http_client from allmydata.storage_client import ( NativeStorageServer, HTTPNativeStorageServer, ) from twisted.python.filepath import ( FilePath, ) from .common import ( SameProcessStreamEndpointAssigner, ) from . import common_util as testutil from ..scripts.common import ( write_introducer, ) # our system test uses the same Tub certificates each time, to avoid the # overhead of key generation SYSTEM_TEST_CERTS = [ """-----BEGIN CERTIFICATE----- MIICojCCAYoCAQEwDQYJKoZIhvcNAQELBQAwFzEVMBMGA1UEAwwMbmV3cGJfdGhp bmd5MB4XDTIwMDEwMjAxNDAzM1oXDTIxMDEwMTAxNDAzM1owFzEVMBMGA1UEAwwM bmV3cGJfdGhpbmd5MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA1iNV z07PYwZwucl87QlL2TFZvDxD4flZ/p3BZE3DCT5Efn9w2NT4sHXL1e+R/qsDFuNG bw1y1TRM0DGK6Wr0XRT2mLQULNgB8y/HrhcSdONsYRyWdj+LimyECKjwh0iSkApv Yj/7IOuq6dOoh67YXPdf75OHLShm4+8q8fuwhBL+nuuO4NhZDJKupYHcnuCkcF88 LN77HKrrgbpyVmeghUkwJMLeJCewvYVlambgWRiuGGexFgAm6laS3rWetOcdm9eg FoA9PKNN6xvPatbj99MPoLpBbzsI64M0yT/wTSw1pj/Nom3rwfMa2OH8Kk7c8R/r U3xj4ZY1DTlGERvejQIDAQABMA0GCSqGSIb3DQEBCwUAA4IBAQAwyQjQ3ZgtJ3JW r3/EPdqSUBamTfXIpOh9rXmRjPpbe+MvenqIzl4q+GnkL5mdEb1e1hdKQZgFQ5Q5 tbcNIz6h5C07KaNtbqhZCx5c/RUEH87VeXuAuOqZHbZWJ18q0tnk+YgWER2TOkgE RI2AslcsJBt88UUOjHX6/7J3KjPFaAjW1QV3TTsHxk14aYDYJwPdz+ijchgbOPQ0 i+ilhzcB+qQnOC1s4xQSFo+zblTO7EgqM9KpupYfOVFh46P1Mak2W8EDvhz0livl OROXJ6nR/13lmQdfVX6T45d+ITBwtmW2nGAh3oI3JlArGKHaW+7qnuHR72q9FSES cEYA/wmk -----END CERTIFICATE----- -----BEGIN PRIVATE KEY----- MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDWI1XPTs9jBnC5 yXztCUvZMVm8PEPh+Vn+ncFkTcMJPkR+f3DY1PiwdcvV75H+qwMW40ZvDXLVNEzQ MYrpavRdFPaYtBQs2AHzL8euFxJ042xhHJZ2P4uKbIQIqPCHSJKQCm9iP/sg66rp 06iHrthc91/vk4ctKGbj7yrx+7CEEv6e647g2FkMkq6lgdye4KRwXzws3vscquuB unJWZ6CFSTAkwt4kJ7C9hWVqZuBZGK4YZ7EWACbqVpLetZ605x2b16AWgD08o03r G89q1uP30w+gukFvOwjrgzTJP/BNLDWmP82ibevB8xrY4fwqTtzxH+tTfGPhljUN OUYRG96NAgMBAAECggEAJ5xztBx0+nFnisZ9yG8uy6d4XPyc5gE1J4dRDdfgmyYc j3XNjx6ePi4cHZ/qVryVnrc+AS7wrgW1q9FuS81QFKPbFdZB4SW3/p85BbgY3uxu 0Ovz3T3V9y4polx12eCP0/tKLVd+gdF2VTik9Sxfs5rC8VNN7wmJNuK4A/k15sgy BIu/R8NlMNGQySNhtccp+dzB8uTyKx5zFZhVvnAK/3YX9BC2V4QBW9JxO4S8N0/9 48e9Sw/fGCfQ/EFPKGCvTvfuRqJ+4t5k10FygXJ+s+y70ifYi+aSsjJBuranbLJp g5TwhuKnTWs8Nth3YRLbcJL4VBIOehjAWy8pDMMtlQKBgQD0O8cHb8cOTGW0BijC NDofhA2GooQUUR3WL324PXWZq0DXuBDQhJVBKWO3AYonivhhd/qWO8lea9MEmU41 nKZ7maS4B8AJLJC08P8GL1uCIE/ezEXEi9JwC1zJiyl595Ap4lSAozH0DwjNvmGL 5mIdYg0BliqFXbloNJkNlb7INwKBgQDgdGEIWXc5Y1ncWNs6iDIV/t2MlL8vLrP0 hpkl/QiMndOQyD6JBo0+ZqvOQTSS4NTSxBROjPxvFbEJ3eH8Pmn8gHOf46fzP1OJ wlYv0gYzkN4FE/tN6JnO2u9pN0euyyZLM1fnEcrMWColMN8JlWjtA7Gbxm8lkfa4 3vicaJtlWwKBgQCQYL4ZgVR0+Wit8W4qz+EEPHYafvwBXqp6sXxqa7qXawtb+q3F 9nqdGLCfwMNA+QA37ksugI1byfXmpBH902r/aiZbvAkj4zpwHH9F0r0PwbY1iSA9 PkLahX0Gj8OnHFgWynsVyGOBWVnk9oSHxVt+7zWtGG5uhKdUGLPZugocJQKBgB61 7bzduOFiRZ5PjhdxISE/UQL2Kz6Cbl7rt7Kp72yF/7eUnnHTMqoyFBnRdCcQmi4I ZBrnUXbFigamlFAWHhxNWwSqeoVeychUjcRXQT/291nMhRsA02KpNA66YJV6+E9b xBA6r/vLqGCUUkAWcFfVpIyC1xxV32MmJvAHpBN3AoGAPF3MUFiO0iKNZfst6Tm3 rzrldLawDo98DRZ7Yb2kWlWZYqUk/Nvryvo2cns75WGSMDYVbbRp+BY7kZmNYa9K iQzKDL54ZRu6V+getJdeAO8yXoCmnZKxt5OHvOSrQMfAmFKSwLwxBbZBfXEyuune yfusXLtCgajpreoVIa0xWdQ= -----END PRIVATE KEY----- """, # 0 """-----BEGIN CERTIFICATE----- MIICojCCAYoCAQEwDQYJKoZIhvcNAQELBQAwFzEVMBMGA1UEAwwMbmV3cGJfdGhp bmd5MB4XDTIwMDEwMjAxNDAzM1oXDTIxMDEwMTAxNDAzM1owFzEVMBMGA1UEAwwM bmV3cGJfdGhpbmd5MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEApDzW 4ZBeK9w4xpRaed6lXzeCO0Xmr3f0ynbueSdiZ89FWoAMgK+SiBIOViYV6hfm0Wah lemSNzFGx5LvDSg2uwSqEP23DeM9O/SQPgIAiLeeEsYZJcgg2jz92YfFEaahsGdI 6qSP4XI2/5dgKRpPOYDGyw6R5PQR6w22Xq1WD1jBvImk/k09I9jHRn40pYbaJzbg U2aIjvOruo2kqe4f6iDqE0piYimAZJUvemu1UoyV5NG590hGkDuWsMD77+d2FxCj 9Nzb+iuuG3ksnanHPyXi1hQmzp5OmzVWaevCHinNjWgsuSuLGO9H2SLf3wwp2UCs EpKtzoKrnZdEg/anNwIDAQABMA0GCSqGSIb3DQEBCwUAA4IBAQChxtr67o1aZZMJ A6gESPtFjZLw6wG0j50JsrWKLvoXVts1ToJ9u2nx01aFKjBwb4Yg+vdJfDgIIAEm jS56h6H2DfJlkTWHmi8Vx1wuusWnrNwYMI53tdlRIpD2+Ne7yeoLQZcVN2wuPmxD Mbksg4AI4csmbkU/NPX5DtMy4EzM/pFvIcxNIVRUMVTFzn5zxhKfhyPqrMI4fxw1 UhUbEKO+QgIqTNp/dZ0lTbFs5HJQn6yirWyyvQKBPmaaK+pKd0RST/T38OU2oJ/J LojRs7ugCJ+bxJqegmQrdcVqZZGbpYeK4O/5eIn8KOlgh0nUza1MyjJJemgBBWf7 HoXB8Fge -----END CERTIFICATE----- -----BEGIN PRIVATE KEY----- MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQCkPNbhkF4r3DjG lFp53qVfN4I7Reavd/TKdu55J2Jnz0VagAyAr5KIEg5WJhXqF+bRZqGV6ZI3MUbH ku8NKDa7BKoQ/bcN4z079JA+AgCIt54SxhklyCDaPP3Zh8URpqGwZ0jqpI/hcjb/ l2ApGk85gMbLDpHk9BHrDbZerVYPWMG8iaT+TT0j2MdGfjSlhtonNuBTZoiO86u6 jaSp7h/qIOoTSmJiKYBklS96a7VSjJXk0bn3SEaQO5awwPvv53YXEKP03Nv6K64b eSydqcc/JeLWFCbOnk6bNVZp68IeKc2NaCy5K4sY70fZIt/fDCnZQKwSkq3Ogqud l0SD9qc3AgMBAAECggEBAIu55uaIOFYASZ1IYaEFNpRHWVisI5Js76nAfSo9w46l 3E8eWYSx2mxBUEkipco/A3RraFVuHaMvHRR1gUMkT0vUsAs8jxwVk+cKLh1S/rlR 3f4C4yotlSWWdjE3PQXDShQWCwb1ciNPVFMmqfzOEVDOqlHe12h97TCYverWdT0f 3LZICLQsZd1WPKnPNXrsRRDCBuRLapdg+M0oJ+y6IiCdm+qM7Qvaoef6hlvm5ECz LCM92db5BKTuPOQXMx2J8mjaBgU3aHxRV08IFgs7mI6q0t0FM7LlytIAJq1Hg5QU 36zDKo8tblkPijWZWlqlZCnlarrd3Ar/BiLEiuOGDMECgYEA1GOp0KHy0mbJeN13 +TDsgP7zhmqFcuJREu2xziNJPK2S06NfGYE8vuVqBGzBroLTQ3dK7rOJs9C6IjCE mH7ZeHzfcKohpZnl443vHMSpgdh/bXTEO1aQZNbJ2hLYs8ie/VqqHR0u6YtpUqZL LgaUA0U8GnlsO55B8kyCelckmDkCgYEAxfYQMPEEzg1tg2neqEfyoeY0qQTEJTeh CPMztowSJpIyF1rQH6TaG0ZchkiAkw3W58RVDfvK72TuVlC5Kz00C2/uPnrqm0dX iMPeML5rFlG3VGCrSTnAPI+az6P65q8zodqcTtA8xoxgPOlc/lINOxiTEMxLyeGF 8GyP+sCM2u8CgYEAvMBR05OJnEky9hJEpBZBqSZrQGL8dCwDh0HtCdi8JovPd/yx 8JW1aaWywXnx6uhjXoru8hJm54IxWV8rB+d716OKY7MfMfACqWejQDratgW0wY7L MjztGGD2hLLJGYXLHjfsBPHBllaKZKRbHe1Er19hWdndQWKVEwPB1X4KjKkCgYEA nWHmN3K2djbYtRyLR1CEBtDlVuaSJmCWp23q1BuCJqYeKtEpG69NM1f6IUws5Dyh eXtuf4KKMU8V6QueW1D6OomPaJ8CO9c5MWM/F5ObwY/P58Y/ByVhvwQQeToONC5g JzKNCF+nodZigKqrIwoKuMvtx/IT4vloKd+1jA5fLYMCgYBoT3HLCyATVdDSt1TZ SbEDoLSYt23KRjQV93+INP949dYCagtgh/kTzxBopw5FljISLfdYizIRo2AzhhfP WWpILlnt19kD+sNirJVqxJacfEZsu5baWTedI/yrCuVsAs/s3/EEY6q0Qywknxtp Fwh1/8y5t14ib5fxOVhi8X1nEA== -----END PRIVATE KEY----- """, # 1 """-----BEGIN CERTIFICATE----- MIICojCCAYoCAQEwDQYJKoZIhvcNAQELBQAwFzEVMBMGA1UEAwwMbmV3cGJfdGhp bmd5MB4XDTIwMDEwMjAxNDAzM1oXDTIxMDEwMTAxNDAzM1owFzEVMBMGA1UEAwwM bmV3cGJfdGhpbmd5MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAwMTn hXnpKHGAir3WYbOxefVrMA07OZNAsNa29nBwLA+NVIJNUFgquibMj7QYo8+M45oY 6LKr4yRcBryZVvyxfdr92xp8+kLeVApk2WLjkdBTRagHh9qdrY0hQmagCBN6/hLG Xug8VksQUdhX3vu6ZyMvTLfKRkDOMRVkRGRGg/dOcvom7zpqMCGYenMG2FStr6UV 3s3dlCSZZTdTX5Uoq6yfUUJE3nITGKjpnpJKqIs3PWCIxdj7INIcjJKvIdUcavIV 2hEhh60A8ltmtdpQAXVBE+U7aZgS1fGAWS2A0a3UwuP2pkQp6OyKCUVHpZatbl9F ahDN2QBzegv/rdJ1zwIDAQABMA0GCSqGSIb3DQEBCwUAA4IBAQAl4OQZ+FB9ZSUv FL/KwLNt+ONU8Sve/xiX+8vKAvgKm2FrjyK+AZPwibnu+FSt2G4ndZBx4Wvpe5V+ gCsbzSXlh9cDn2SRXyprt2l/8Fj4eUMaThmLKOK200/N/s2SpmBtnuflBrhNaJpw DEi2KEPuXsgvkuVzXN06j75cUHwn5LeWDAh0RalkVuGbEWBoFx9Hq8WECdlCy0YS y09+yO01qz70y88C2rPThKw8kP4bX8aFZbvsnRHsLu/8nEQNlrELcfBarPVHjJ/9 imxOdymJkV152V58voiXP/PwXhynctQbF7e+0UZ+XEGdbAbZA0BMl7z+b09Z+jF2 afm4mVox -----END CERTIFICATE----- -----BEGIN PRIVATE KEY----- MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDAxOeFeekocYCK vdZhs7F59WswDTs5k0Cw1rb2cHAsD41Ugk1QWCq6JsyPtBijz4zjmhjosqvjJFwG vJlW/LF92v3bGnz6Qt5UCmTZYuOR0FNFqAeH2p2tjSFCZqAIE3r+EsZe6DxWSxBR 2Ffe+7pnIy9Mt8pGQM4xFWREZEaD905y+ibvOmowIZh6cwbYVK2vpRXezd2UJJll N1NflSirrJ9RQkTechMYqOmekkqoizc9YIjF2Psg0hyMkq8h1Rxq8hXaESGHrQDy W2a12lABdUET5TtpmBLV8YBZLYDRrdTC4/amRCno7IoJRUellq1uX0VqEM3ZAHN6 C/+t0nXPAgMBAAECggEAF+2ZK4lZdsq4AQDVhqUuh4v+NSW/T0NHCWxto6OLWPzJ N09BV5LKIvdD9yaM1HCj9XCgXOooyfYuciuhARo20f+H+VWNY+c+/8GWiSFsTCJG 4+Oao7NwVSWqljp07Ou2Hamo9AjxzGhe6znmlmg62CiW63f45MWQkqksHA0yb5jg /onJ2//I+OI+aTKNfjt1G6h2x7oxeGTU1jJ0Hb2xSh+Mpqx9NDfb/KZyOndhSG5N xRVosQ6uV+9mqHxTTwTZurTG31uhZzarkMuqxhcHS94ub7berEc/OlqvbyMKNZ3A lzuvq0NBZhEUhAVgORAIS17r/q2BvyG4u5LFbG2p0QKBgQDeyyOl+A7xc4lPE2OL Z3KHJPP4RuUnHnWFC+bNdr5Ag8K7jcjZIcasyUom9rOR0Fpuw9wmXpp3+6fyp9bJ y6Bi5VioR0ZFP5X+nXxIN3yvgypu6AZvkhHrEFer+heGHxPlbwNKCKMbPzDZPBTZ vlC7g7xUUcpNmGhrOKr3Qq5FlwKBgQDdgCmRvsHUyzicn8TI3IJBAOcaQG0Yr/R2 FzBqNfHHx7fUZlJfKJsnu9R9VRZmBi4B7MA2xcvz4QrdZWEtY8uoYp8TAGILfW1u CP4ZHrzfDo/67Uzk2uTMTd0+JOqSm/HiVNguRPvC8EWBoFls+h129GKThMvKR1hP 1oarfAGIiQKBgQCIMAq5gHm59JMhqEt4QqMKo3cS9FtNX1wdGRpbzFMd4q0dstzs ha4Jnv3Z9YHtBzzQap9fQQMRht6yARDVx8hhy6o3K2J0IBtTSfdXubtZGkfNBb4x Y0vaseG1uam5jbO+0u5iygbSN/1nPUfNln2JMkzkCh8s8ZYavMgdX0BiPwKBgChR QL/Hog5yoy5XIoGRKaBdYrNzkKgStwObuvNKOGUt5DckHNA3Wu6DkOzzRO1zKIKv LlmJ7VLJ3qln36VcaeCPevcBddczkGyb9GxsHOLZCroY4YsykLzjW2cJXy0qd3/E A8mAQvc7ttsebciZSi2x1BOX82QxUlDN8ptaKglJAoGBAMnLN1TQB0xtWYDPGcGV 2IvgX7OTRRlMVrTvIOvP5Julux9z1r0x0cesl/jaXupsBUlLLicPyBMSBJrXlr24 mrgkodk4TdqO1VtBCZBqak97DHVezstMrbpCGlUD5jBnsHVRLERvS09QlGhqMeNL jpNQbWH9VhutzbvpYquKrhvK -----END PRIVATE KEY----- """, # 2 """-----BEGIN CERTIFICATE----- MIICojCCAYoCAQEwDQYJKoZIhvcNAQELBQAwFzEVMBMGA1UEAwwMbmV3cGJfdGhp bmd5MB4XDTIwMDEwMjAxNDAzM1oXDTIxMDEwMTAxNDAzM1owFzEVMBMGA1UEAwwM bmV3cGJfdGhpbmd5MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAypqi YTni3s60Uo8vgGcFvjWWkB5CD9Fx9pW/2KcxRJ/u137Y+BG8qWMA4lgII3ZIuvo4 6rLDiXnAnDZqUtrvZ90O/gH6RyQqX3AI4EwPvCnRIIe0okRcxnxYBL/LfBY54xuv 46JRYZP4c9IImqQH9QVo2/egtEzcpbmT/mfhpf6NGQWC3Xps2BqDT2SV/DrX/wPA 8P1atE1AxNp8ENxK/cjFAteEyDZOsDSa757ZHKAdM7L8rZ1Fd2xAA1Dq7IyYpTNE IX72xytWxllcNvSUPLT+oicsSZBadc/p3moc3tR/rNdgrHKybedadru/f9Gwpa+v 0sllZlEcVPSYddAzWwIDAQABMA0GCSqGSIb3DQEBCwUAA4IBAQCmk60Nj5FPvemx DSSQjJPyJoIDpTxQ4luSzIq4hPwlUXw7dqrvHyCWgn2YVe9xZsGrT/+n376ecmgu sw4s4qVhR9bzKkTMewjC2wUooTA5v9HYsNWZy3Ah7hHPbDHlMADYobjB5/XolNUP bCM9xALEdM9DxpC4vjUZexlRKmjww9QKE22jIM+bqsK0zqDSq+zHpfHNGGcS3vva OvI6FPc1fAr3pZpVzevMSN2zufIJwjL4FT5/uzwOCaSCwgR1ztD5CSbQLTLlwIsX S7h2WF9078XumeRjKejdjEjyH4abKRq8+5LVLcjKEpg7OvktuRpPoGPCEToaAzuv h+RSQwwY -----END CERTIFICATE----- -----BEGIN PRIVATE KEY----- MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDKmqJhOeLezrRS jy+AZwW+NZaQHkIP0XH2lb/YpzFEn+7Xftj4EbypYwDiWAgjdki6+jjqssOJecCc NmpS2u9n3Q7+AfpHJCpfcAjgTA+8KdEgh7SiRFzGfFgEv8t8FjnjG6/jolFhk/hz 0giapAf1BWjb96C0TNyluZP+Z+Gl/o0ZBYLdemzYGoNPZJX8Otf/A8Dw/Vq0TUDE 2nwQ3Er9yMUC14TINk6wNJrvntkcoB0zsvytnUV3bEADUOrsjJilM0QhfvbHK1bG WVw29JQ8tP6iJyxJkFp1z+neahze1H+s12CscrJt51p2u79/0bClr6/SyWVmURxU 9Jh10DNbAgMBAAECggEBALv7Q+Rf+C7wrQDZF6LUc9CrGfq4CGVy2IGJKgqT/jOF DO9nI1rv4hNr55sbQNneWtcZaYvht2mrzNlj57zepDjDM7DcFuLBHIuWgLXT/NmC FyZOo3vXYBlNr8EgT2XfnXAp9UWJCmc2CtUzsIYC4dsmXMeTd8kyc5tUl4r5ybTf 1g+RTck/IGgqdfzpuTsNl79FW2rP9z111Py6dbqgQzhuSAune9dnLFvZst8dyL8j FStETMxBM6jrCF1UcKXzG7trDHiCdzJ8WUhx6opN/8OasQGndwpXto6FZuBy/AVP 4kVQNpUXImYcLEpva0MqGRHg+YN+c84C71CMchnF4aECgYEA7J2go4CkCcZNKCy5 R5XVCqNFYRHjekR+UwH8cnCa7pMKKfP+lTCiBrO2q8zwWwknRMyuycS5g/xbSpg1 L6hi92CV1YQy1/JhlQedekjejNTTuLOPKf78AFNSfc7axDnes2v4Bvcdp9gsbUIO 10cXh0tOSLE7P9y+yC86KQkFAPECgYEA2zO0M2nvbPHv2jjtymY3pflYm0HzhM/T kPtue3GxOgbEPsHffBGssShBTE3yCOX3aAONXJucMrSAPL9iwUfgfGx6ADdkwBsA OjDlkxvTbP/9trE6/lsSPtGpWRdJNHqXN4Hx7gXJizRwG7Ym+oHvIIh53aIjdFoE HLQLpxObuQsCgYAuMQ99G83qQpYpc6GwAeYXL4yJyK453kky9z5LMQRt8rKXQhS/ F0FqQYc1vsplW0IZQkQVC5yT0Z4Yz+ICLcM0O9zEVAyA78ZxC42Io9UedSXn9tXK Awc7IQkHmmxGxm1dZYSEB5X4gFEb+zted3h2ZxMfScohS3zLI70c6a/aYQKBgQCU phRuxUkrTUpFZ1PCbN0R/ezbpLbaewFTEV7T8b6oxgvxLxI6FdZRcSYO89DNvf2w GLCVe6VKMWPBTlxPDEostndpjCcTq3vU+nHE+BrBkTvh14BVGzddSFsaYpMvNm8z ojiJHH2XnCDmefkm6lRacJKL/Tcj4SNmv6YjUEXLDwKBgF8WV9lzez3d/X5dphLy 2S7osRegH99iFanw0v5VK2HqDcYO9A7AD31D9nwX46QVYfgEwa6cHtVCZbpLeJpw qXnYXe/hUU3yn5ipdNJ0Dm/ZhJPDD8TeqhnRRhxbZmsXs8EzfwB2tcUbASvjb3qA vAaPlOSU1wXqhAsG9aVs8gtL -----END PRIVATE KEY----- """, # 3 """-----BEGIN CERTIFICATE----- MIICojCCAYoCAQEwDQYJKoZIhvcNAQELBQAwFzEVMBMGA1UEAwwMbmV3cGJfdGhp bmd5MB4XDTIwMDEwMjAxNDAzNFoXDTIxMDEwMTAxNDAzNFowFzEVMBMGA1UEAwwM bmV3cGJfdGhpbmd5MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAzUqQ M08E7F2ZE99bFHvpsR6LmgIJOOoGMXacTcEUhRF63E6+730FjxER2a30synv9GGS 3G9FstUmfhyimufkbTumri8Novw5CWZQLiE1rmMBI5nPcR2wAzy9z2odR6bfAwms yyc3IPYg1BEDBPZl0LCQrQRRU/rVOrbCf7IMq+ATazmBg01gXMzq2M953ieorkQX MsHVR/kyW0Q0yzhYF1OtIqbXxrdiZ+laTLWNqivj/FdegiWPCf8OcqpcpbgEjlDW gBcC/vre+0E+16nfUV8xHL5jseJMJqfT508OtHxAzp+2D7b54NvYNIvbOAP+F9gj aXy5mOvjXclK+hNmDwIDAQABMA0GCSqGSIb3DQEBCwUAA4IBAQAjZzTFKG7uoXxm BPHfQvsKHIB/Cx9zMKj6pLwJzCPHQBzKOMoUen09oq+fb77RM7WvdX0pvFgEXaJW q/ImooRMo+paf8GOZAuPwdafb2/OGdHZGZ2Cbo/ICGo1wGDCdMvbxTxrDNq1Yae+ m+2epN2pXAO1rlc7ktRkojM/qi3zXtbLjTs3IoPDXWhYPHdI1ThkneRmvxpzB1rW 2SBqj2snvyI+/3k3RHmldcdOrTlgWQ9hq05jWR8IVtRUFFVn9A+yQC3gnnLIUhwP HJWwTIPuYW25TuxFxYZXIbnAiluZL0UIjd3IAwxaafvB6uhI7v0K789DKj2vRUkY E8ptxZH4 -----END CERTIFICATE----- -----BEGIN PRIVATE KEY----- MIIEwAIBADANBgkqhkiG9w0BAQEFAASCBKowggSmAgEAAoIBAQDNSpAzTwTsXZkT 31sUe+mxHouaAgk46gYxdpxNwRSFEXrcTr7vfQWPERHZrfSzKe/0YZLcb0Wy1SZ+ HKKa5+RtO6auLw2i/DkJZlAuITWuYwEjmc9xHbADPL3Pah1Hpt8DCazLJzcg9iDU EQME9mXQsJCtBFFT+tU6tsJ/sgyr4BNrOYGDTWBczOrYz3neJ6iuRBcywdVH+TJb RDTLOFgXU60iptfGt2Jn6VpMtY2qK+P8V16CJY8J/w5yqlyluASOUNaAFwL++t77 QT7Xqd9RXzEcvmOx4kwmp9PnTw60fEDOn7YPtvng29g0i9s4A/4X2CNpfLmY6+Nd yUr6E2YPAgMBAAECggEBAIiL6uQl0AmDrBj6vHMghGzp+0MBza6MgngOA6L4JTTp ToYQ3pEe4D6rxOq7+QHeiBtNd0ilvn9XpVXGqCVOzrIVNiWvaGubRjjJU9WLA1Ct y4kpekAr1fIhScMXOsh45ub3XXZ27AVBkM5dTlvTpB8uAd0C/TFVqtR10WLsQ99h Zm9Jczgs/6InYTssnAaqdeCLAf1LbmO4zwFsJfJOeSGGT6WBwlpHwMAgPhg8OLEu kVWG7BEJ0hxcODk/es/vce9SN7BSyIzNY+qHcGtsrx/o0eO2Av/Z7ltV4Sz6UN1K 0y0OTiDyT/l62U2OugSN3wQ4xPTwlrWl7ZUHJmvpEaECgYEA+w2JoB2i1OV2JTPl Y0TKSKcZYdwn7Nwh4fxMAJNJ8UbpPqrZEo37nxqlWNJrY/jKX3wHVk4ESSTaxXgF UY7yKT0gRuD9+vE0gCbUmJQJTwbceNJUu4XrJ6SBtf72WgmphL+MtyKdwV8XltVl Yp0hkswGmxl+5+Js6Crh7WznPl8CgYEA0VYtKs2YaSmT1zraY6Fv3AIQZq012vdA 7nVxmQ6jKDdc401OWARmiv0PrZaVNiEJ1YV8KxaPrKTfwhWqxNegmEBgA1FZ66NN SAm8P9OCbt8alEaVkcATveXTeOCvfpZUO3sqZdDOiYLiLCsokHblkcenK85n0yT6 CzhTbvzDllECgYEAu9mfVy2Vv5OK2b+BLsw0SDSwa2cegL8eo0fzXqLXOzCCKqAQ GTAgTSbU/idEr+NjGhtmKg/qaQioogVyhVpenLjeQ+rqYDDHxfRIM3rhlD5gDg/j 0wUbtegEHrgOgcSlEW16zzWZsS2EKxq16BoHGx6K+tcS/FOShg5ASzWnuiUCgYEA sMz+0tLX8aG7CqHbRyBW8FMR9RY/kRMY1Q1+Bw40wMeZfSSSkYYN8T9wWWT/2rqm qp7V0zJ34BFUJoDUPPH84fok3Uh9EKZYpAoM4z9JP0jREwBWXMYEJnOQWtwxfFGN DLumgF2Nwtg3G6TL2s+AbtJYH4hxagQl5woIdYmnyzECgYEAsLASpou16A3uXG5J +5ZgF2appS9Yfrqfh6TKywMsGG/JuiH3djdYhbJFIRGeHIIDb4XEXOHrg/SFflas If0IjFRh9WCvQxnoRha3/pKRSc3OEka1MR/ZREK/d/LQEPmsRJVzY6ABKqmPAMDD 5CnG6Hz/rP87BiEKd1+3PGp8GCw= -----END PRIVATE KEY----- """, # 4 """-----BEGIN CERTIFICATE----- MIICojCCAYoCAQEwDQYJKoZIhvcNAQELBQAwFzEVMBMGA1UEAwwMbmV3cGJfdGhp bmd5MB4XDTIwMDEwMjAxNDAzNFoXDTIxMDEwMTAxNDAzNFowFzEVMBMGA1UEAwwM bmV3cGJfdGhpbmd5MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA0sap 75YbbkEL85LFava3FrO1jpgVteQ4NGxxy1Nu9w2hPfMMeCPWjB8UfAwFk+LVPyvW LAXd1zWL5rGpQ2ytIVQlTraR5EnALA1sMcQYbFz1ISPTYB031bEN/Ch8JWYwCG5A X2H4D6BC7NgT6YyWDt8vxQnqAisPHQ/OK4ABD15CwkTyPimek2/ufYN2dapg1xhG IUD96gqetJv9bu0r869s688kADIComsYG+8KKfFN67S3rSHMIpZPuGTtoHGnVO89 XBm0vNe0UxQkJEGJzZPn0tdec0LTC4GNtTaz5JuCjx/VsJBqrnTnHHjx0wFz8pff afCimRwA+LCopxPE1QIDAQABMA0GCSqGSIb3DQEBCwUAA4IBAQBOkAnpBb3nY+dG mKCjiLqSsuEPqpNiBYR+ue/8aVDnOKLKqAyQuyRZttQ7bPpKHaw7pwyCZH8iHnt6 pMCLCftNSlV2Fa8msRmuf5AiGjUvR1M8VtHWNYE8pedWrJqUgBhF/405B99yd8CT kQJXKF18LObj7YKNsWRoMkVgqlQzWDMEqbfmy9MhuLx2EZPsTB1L0BHNGGDVBd9o cpPLUixcc12u+RPMKq8x3KgwsnUf5vX/pCnoGcCy4JahWdDgcZlf0hUKGT7PUem5 CWW8SMeqSWQX9XpE5Qlm1+W/QXdDXLbbHqDtvBeUy3iFQe3C9RSkp0qdutxkAlFk f5QHXfJ7 -----END CERTIFICATE----- -----BEGIN PRIVATE KEY----- MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDSxqnvlhtuQQvz ksVq9rcWs7WOmBW15Dg0bHHLU273DaE98wx4I9aMHxR8DAWT4tU/K9YsBd3XNYvm salDbK0hVCVOtpHkScAsDWwxxBhsXPUhI9NgHTfVsQ38KHwlZjAIbkBfYfgPoELs 2BPpjJYO3y/FCeoCKw8dD84rgAEPXkLCRPI+KZ6Tb+59g3Z1qmDXGEYhQP3qCp60 m/1u7Svzr2zrzyQAMgKiaxgb7wop8U3rtLetIcwilk+4ZO2gcadU7z1cGbS817RT FCQkQYnNk+fS115zQtMLgY21NrPkm4KPH9WwkGqudOccePHTAXPyl99p8KKZHAD4 sKinE8TVAgMBAAECggEALU5EotoqJUXYEtAenUJQ0pFoWjE4oXNf3Wzd/O1/MZ19 ZjqDGKPjbxUTKyLOZB5i5gQ/MhFEwQiifMD9eB+5CyvyJPw7Wc28f/uWoQ/cjBZj Hm979PHy2X0IW4Y8QTG462b/cUE2t+0j1ZMQnKf6bVHuC7V41mR5CC8oitMl5y5g 34yJmWXlIA0ep/WotLMqvil6DnSM/2V8Ch4SxjnzPpjbe4Kj+woucGNr4UKstZER 8iuHTsR64LjoGktRnnMwZxGZQI7EC428zsliInuWMdXe//w2chLdkirqpSrIQwSZ 3jNWStqBXGYaRg5Z1ilBvHtXxkzDzbAlzRBzqfEwwQKBgQDqYdMRrzHJaXWLdsyU 6jAuNX9tLh7PcicjP93SbPujS6mWcNb+D/au+VhWD+dZQDPRZttXck7wvKY1lw1V MK0TYI7ydf8h3DFx3Mi6ZD4JVSU1MH233C3fv/FHenDoOvMXXRjUZxaRmuzFJvzt 6QlKIfSvwT+1wrOACNfteXfZUQKBgQDmN3Uuk01qvsETPwtWBp5RNcYhS/zGEQ7o Q4K+teU453r1v8BGsQrCqulIZ3clMkDru2UroeKn1pzyVAS2AgajgXzfXh3VeZh1 vHTLP91BBYZTTWggalEN4aAkf9bxX/hA+9Bw/dzZcQW2aNV7WrYuCSvp3SDCMina anQq/PaSRQKBgHjw23HfnegZI89AENaydQQTFNqolrtiYvGcbgC7vakITMzVEwrr /9VP0pYuBKmYKGTgF0RrNnKgVX+HnxibUmOSSpCv9GNrdJQVYfpT6XL1XYqxp91s nrs7FuxUMNiUOoWOw1Yuj4W4lH4y3QaCXgnDtbfPFunaOrdRWOIv8HjRAoGAV3NT mSitbNIfR69YIAqNky3JIJbb42VRc1tJzCYOd+o+pCF96ZyRCNehnDZpZQDM9n8N 9GAfWEBHCCpwS69DVFL422TGEnSJPJglCZwt8OgnWXd7CW05cvt1OMgzHyekhxLg 4Dse7J5pXBxAlAYmVCB5xPGR4xLpISX1EOtcwr0CgYEA5rA2IUfjZYb4mvFHMKyM xWZuV9mnl3kg0ULttPeOl3ppwjgRbWpyNgOXl8nVMYzxwT/A+xCPA18P0EcgNAWc frJqQYg3NMf+f0K1wSaswUSLEVrQOj25OZJNpb21JEiNfEd5DinVVj4BtVc6KSpS kvjbn2WhEUatc3lPL3V0Fkw= -----END PRIVATE KEY----- """, # 5 """-----BEGIN CERTIFICATE----- MIICojCCAYoCAQEwDQYJKoZIhvcNAQELBQAwFzEVMBMGA1UEAwwMbmV3cGJfdGhp bmd5MB4XDTIwMDEwMjAxNTExM1oXDTIxMDEwMTAxNTExM1owFzEVMBMGA1UEAwwM bmV3cGJfdGhpbmd5MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA1c5y S9IZHF9MIuwdafzhMkgP37I3RVpHEbpnPwnLFqSWelS5m2eDkwWd5SkfGjrmQ5q0 PEpqLlh3zHGw9yQjnHS3CCS1PwQ1kmwvpIK3HM5y8GM7ry1zkam8ZR4iX6Y7VG9g 9mhiVVFoVhe1gHeiC/3Mp6XeNuEiD0buM+8qZx9B21I+iwzy4wva7Gw0fJeq9G1c lq2rhpD1LlIEodimWOi7lOEkNmUiO1SvpdrGdxUDpTgbdg6r5pCGjOXLd74tAQHP P/LuqRNJDXtwvHtLIVQnW6wjjy4oiWZ8DXOdc9SkepwQLIF5Wh8O7MzF5hrd6Cvw SOD3EEsJbyycAob6RwIDAQABMA0GCSqGSIb3DQEBCwUAA4IBAQBDNcbKVUyGOAVm k3iVuzkkkymlTAMm/gsIs6loLJrkSqNg160FdVKJoZFjQtqoqLgLrntdCJ377nZ9 1i+yzbZsA4DA7nxj0IEdnd7rRYgGLspGqWeKSTROATeT4faLTXenecm0v2Rpxqc7 dSyeZJXOd2OoUu+Q64hzXCDXC6LNM+xZufxV9qv+8d+CipV6idSQZaUWSVuqFCwD PT0R4eWfkMMaM8QqtNot/hVCEaKT+9rG0mbpRe/b/qBy5SR0u+XgGEEIV+33L59T FXY+DpI1Dpt/bJFoUrfj6XohxdTdqYVCn1F8in98TsRcFHyH1xlkS3Y0RIiznc1C BwAoGZ4B -----END CERTIFICATE----- -----BEGIN PRIVATE KEY----- MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDVznJL0hkcX0wi 7B1p/OEySA/fsjdFWkcRumc/CcsWpJZ6VLmbZ4OTBZ3lKR8aOuZDmrQ8SmouWHfM cbD3JCOcdLcIJLU/BDWSbC+kgrccznLwYzuvLXORqbxlHiJfpjtUb2D2aGJVUWhW F7WAd6IL/cynpd424SIPRu4z7ypnH0HbUj6LDPLjC9rsbDR8l6r0bVyWrauGkPUu UgSh2KZY6LuU4SQ2ZSI7VK+l2sZ3FQOlOBt2DqvmkIaM5ct3vi0BAc8/8u6pE0kN e3C8e0shVCdbrCOPLiiJZnwNc51z1KR6nBAsgXlaHw7szMXmGt3oK/BI4PcQSwlv LJwChvpHAgMBAAECggEBAK0KLeUBgIM++Y7WDCRInzYjrn08bpE5tIU7mO4jDfQg dw1A3wtQZuOpyxW6B0siWlRis/aLv44M2cBkT3ZmEFBDAhOcKfh7fqQn3RNHG847 pDi8B4UKwxskBa7NCcLh9eirUA19hABLJ6dt/t6fdE5CNc2FZ+iAoyE8JfNwYKAd 6Fa3HqUBPNWt8ryj4ftgpMNBdfmLugEM4N20SXJA28hOq2lUcwNKQQ1xQrovl0ig iMbMWytV4gUPKC9Wra66OYIkk/K8teiUNIYA4JwAUVTs1NEWoyfwUTz1onutCkMl 5vY7JAqRoDWoSUX6FI+IHUdyqPAMdOMhC37gjrxoo2ECgYEA7trDMu6xsOwEckDh iz148kejMlnTTuCNetOFBw3njFgxISx0PrDLWmJmnHMxPv9AAjXYb2+UCCm3fj6Q OB8o4ZJm0n504qbFHcb2aI22U5hZ99ERvqx8WBnJ2RarIBmg06y0ktxq8gFR2qxF 0hWAOcDn1DWQ8QI0XBiFFcJTGtcCgYEA5SdlIXRnVZDKi5YufMAORG9i74dXUi0Y 02UoVxJ+q8VFu+TT8wrC5UQehG3gX+79Cz7hthhDqOSCv6zTyE4Evb6vf9OLgnVe E5iLF033zCxLSS9MgiZ+jTO+wK3RsapXDtGcSEk2P82Pj5seNf4Ei1GNCRlm1DbX 71wlikprHhECgYABqmLcExAIJM0vIsav2uDiB5/atQelMCmsZpcx4mXv85l8GrxA x6jTW4ZNpvv77Xm7yjZVKJkGqYvPBI6q5YS6dfPjmeAkyHbtazrCpeJUmOZftQSD qN5BGwTuT5sn4SXe9ABaWdEhGONCPBtMiLvZK0AymaEGHTbSQZWD/lPoBwKBgGhk qg2zmd/BNoSgxkzOsbE7jTbR0VX+dXDYhKgmJM7b8AjJFkWCgYcwoTZzV+RcW6rj 2q+6HhizAV2QvmpiIIbQd+Mj3EpybYk/1R2ox1qcUy/j/FbOcpihGiVtCjqF/2Mg 2rGTqMMoQl6JrBmsvyU44adjixTiZz0EHZYCkQoBAoGBAMRdmoR4mgIIWFPgSNDM ISLJxKvSFPYDLyAepLfo38NzKfPB/XuZrcOoMEWRBnLl6dNN0msuzXnPRcn1gc1t TG7db+hivAyUoRkIW3dB8pRj9dDUqO9OohjKsJxJaQCyH5vPkQFSLbTIgWrHhU+3 oSPiK/YngDV1AOmPDH7i62po -----END PRIVATE KEY----- """, #6 """-----BEGIN CERTIFICATE----- MIICojCCAYoCAQEwDQYJKoZIhvcNAQELBQAwFzEVMBMGA1UEAwwMbmV3cGJfdGhp bmd5MB4XDTIwMDEwMjAxNTExMloXDTIxMDEwMTAxNTExMlowFzEVMBMGA1UEAwwM bmV3cGJfdGhpbmd5MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAojGu fQaTVT9DJWJ/zogGfrryEJXYVy9c441O5MrLlRx7nCIWIUs2NEhHDJdqJjYOTdmk K98VhdMpDPZwxjgvvZrh43lStBRIW3zZxv747rSl2VtpSqD/6UNWJe5u4SR7oga4 JfITOKHg/+ASxnOxp/iu6oT6jBL6T7KSPh6Rf2+it2rsjhktRreFDJ2hyroNq1w4 ZVNCcNPgUIyos8u9RQKAWRNchFh0p0FCS9xNrn3e+yHnt+p6mOOF2gMzfXT/M2hq KQNmc5D3yNoH2smWoz7F3XsRjIB1Ie4VWoRRaGEy7RwcwiDfcaemD0rQug6iqH7N oomF6f3R4DyvVVLUkQIDAQABMA0GCSqGSIb3DQEBCwUAA4IBAQB/8SX6qyKsOyex v3wubgN3FPyU9PqMfEzrFM6X5sax0VMVbSnekZrrXpdnXYV+3FBu2GLLQc900ojj vKD+409JIriTcwdFGdLrQPTCRWkEOae8TlXpTxuNqJfCPVNxFN0znoat1bSRsX1U K0mfEETQ3ARwlTkrF9CM+jkU3k/pnc9MoCLif8P7OAF38AmIbuTUG6Gpzy8RytJn m5AiA3sds5R0rpGUu8mFeBpT6jIA1QF2g+QNHKOQcfJdCdfqTjKw5y34hjFqbWG9 RxWGeGNZkhC/jADCt+m+R6+hlyboLuIcVp8NJw6CGbr1+k136z/Dj+Fdhm6FzF7B qULeRQJ+ -----END CERTIFICATE----- -----BEGIN PRIVATE KEY----- MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCiMa59BpNVP0Ml Yn/OiAZ+uvIQldhXL1zjjU7kysuVHHucIhYhSzY0SEcMl2omNg5N2aQr3xWF0ykM 9nDGOC+9muHjeVK0FEhbfNnG/vjutKXZW2lKoP/pQ1Yl7m7hJHuiBrgl8hM4oeD/ 4BLGc7Gn+K7qhPqMEvpPspI+HpF/b6K3auyOGS1Gt4UMnaHKug2rXDhlU0Jw0+BQ jKizy71FAoBZE1yEWHSnQUJL3E2ufd77Iee36nqY44XaAzN9dP8zaGopA2ZzkPfI 2gfayZajPsXdexGMgHUh7hVahFFoYTLtHBzCIN9xp6YPStC6DqKofs2iiYXp/dHg PK9VUtSRAgMBAAECggEANjn0A3rqUUr4UQxwfIV/3mj0O1VN4kBEhxOcd+PRUsYW EapXycPSmII9ttj8tU/HUoHcYIqSMI7bn6jZJXxtga/BrALJAsnxMx031k8yvOQK uvPT7Q6M4NkReVcRHRbMeuxSLuWTRZDhn8qznEPb9rOvD1tsRN6nb3PdbwVbUcZh 2F6JDrTyI/Df6nrYQAWOEe2ay7tzgrNYE4vh+DW7oVmyHRgFYA+DIG5Q+7OVWeW5 bwYYPKlo4/B0L+GfMKfMVZ+5TvFWAK0YD1e/CW1Gv+i/8dWm4O7UNGg5mTnrIcy1 g5wkKbyea02/np2B/XBsSWXDl6rTDHL7ay0rH2hjEQKBgQDMKSm3miQTIcL/F2kG ieapmRtSc7cedP967IwUfjz4+pxPa4LiU47OCGp1bmUTuJAItyQyu/5O3uLpAriD PTU+oVlhqt+lI6+SJ4SIYw01/iWI3EF2STwXVnohWG1EgzuFM/EqoB+mrodNONfG UmP58vI9Is8fdugXgpTz4Yq9pQKBgQDLYJoyMVrYTvUn5oWft8ptsWZn6JZXt5Bd aXh+YhNmtCrSORL3XjcH4yjlcn7X8Op33WQTbPo7QAJ1CumJzAI88BZ/8za638xb nLueviZApCt0bNMEEdxDffxHFc5TyHE+obMKFfApbCnD0ggO6lrZ8jK9prArLOCp mRU9SSRffQKBgAjoBszeqZI4F9SfBdLmMyzU5A89wxBOFFMdfKLsOua1sBn627PZ 51Hvpg1HaptoosfujWK1NsvkB0wY9UmsYuU/jrGnDaibnO4oUSzN/WaMlsCYszZg zYFLIXrQ67tgajlOYcf1Qkw4MujYgPlC4N+njI/EM/rwagGUjcDx5uaNAoGASyqz EuYG63eTSGH89SEaohw0+yaNmnHv23aF4EAjZ4wjX3tUtTSPJk0g6ly84Nbb8d1T hZJ7kbaAsf2Mfy91jEw4JKYhjkP05c8x0OP6g12p6efmvdRUEmXX/fXjQjgNEtb0 sz+UedrOPN+9trWLSo4njsyyw+JcTpKTtQj5dokCgYEAg9Y3msg+GvR5t/rPVlKd keZkrAp0xBJZgqG7CTPXWS1FjwbAPo7x4ZOwtsgjCuE52lar4j+r2Il+CDYeLfxN h/Jfn6S9ThUh+B1PMvKMMnJUahg8cVL8uQuBcbAy8HPRK78WO2BTnje44wFAJwTc 0liuYqVxZIRlFLRl8nGqog8= -----END PRIVATE KEY----- """, #7 """-----BEGIN CERTIFICATE----- MIICojCCAYoCAQEwDQYJKoZIhvcNAQELBQAwFzEVMBMGA1UEAwwMbmV3cGJfdGhp bmd5MB4XDTIwMDEwMjAxNTExMloXDTIxMDEwMTAxNTExMlowFzEVMBMGA1UEAwwM bmV3cGJfdGhpbmd5MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAu9oO cFlNukUcLfFrfkEaUiilcHLmn5OokQbj95CGd2ehQCCVwrkunYLBisthRaancFFb /yM998B0IUsKTsoLi5DAN3/SkSm6GiQIGO05E4eBPljwJ61QQMxh8+1TwQ9HTun1 ZE1lhVN1aRmI9VsbyTQLjXh9OFNLSJEKb29mXsgzYwYwNOvo+idzXpy4bMyNoGxY Y+s2FIKehNHHCv4ravDn8rf6DtDOvyN4d0/QyNws9FpAZMXmLwtBJ9exOqKFW43w 97NxgdNiTFyttrTKTi0b+9v3GVdcEZw5b2RMIKi6ZzPof6/0OlThK6C3xzFK3Bp4 PMjTfXw5yyRGVBnZZwIDAQABMA0GCSqGSIb3DQEBCwUAA4IBAQA4Ms6LqzMu757z bxISiErRls6fcnq0fpSmiPNHNKM7YwG9KHYwPT6A0UMt30zDwNOXCQBI19caGeeO MLPWa7Gcqm2XZB2jQwvLRPeFSy9fm6RzJFeyhrh/uFEwUetwYmi/cqeIFDRDBQKn bOaXkBk0AaSmI5nRYfuqpMMjaKOFIFcoADw4l9wWhv6DmnrqANzIdsvoSXi5m8RL FcZQDZyHFlHh3P3tLkmQ7ErM2/JDwWWPEEJMlDm/q47FTOQSXZksTI3WRqbbKVv3 iQlJjpgi9yAuxZwoM3M4975iWH4LCZVMCSqmKCBt1h9wv4LxqX/3kfZhRdy1gG+j 41NOSwJ/ -----END CERTIFICATE----- -----BEGIN PRIVATE KEY----- MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQC72g5wWU26RRwt 8Wt+QRpSKKVwcuafk6iRBuP3kIZ3Z6FAIJXCuS6dgsGKy2FFpqdwUVv/Iz33wHQh SwpOyguLkMA3f9KRKboaJAgY7TkTh4E+WPAnrVBAzGHz7VPBD0dO6fVkTWWFU3Vp GYj1WxvJNAuNeH04U0tIkQpvb2ZeyDNjBjA06+j6J3NenLhszI2gbFhj6zYUgp6E 0ccK/itq8Ofyt/oO0M6/I3h3T9DI3Cz0WkBkxeYvC0En17E6ooVbjfD3s3GB02JM XK22tMpOLRv72/cZV1wRnDlvZEwgqLpnM+h/r/Q6VOEroLfHMUrcGng8yNN9fDnL JEZUGdlnAgMBAAECggEALlZdlW0R9U6y4spYf65Dddy84n4VUWu0+wE+HoUyBiYz 6oOfLYdMbmIgp8H/XpT7XINVNBxXXtPEUaoXAtRoAKdWItqO8Gvgki4tKSjrGVwl j2GU69SepT1FNExoiojgSCEB/RnyXu71WVWJKSyuL/V8nAsKqGgze9T7Q/2wvNQt SQqLxZlrWF0P8WqaAiSrHV4GnDrdeF+k1KBo2+pSaDNv6cNwOyVG8EII9tqhF8kj 6nD6846ish6OqmlSisaSGopJZL1DCQzszFMxKd2+iBDY7Kn6hVIhRaNnaZUFhpKM dNh6hBqOycMepAp0sz5pdo+fxpifkoR/cPWgyC3QkQKBgQDixe9VsiZ7u2joxF/9 JcAExKhqE28OUmIwt6/j+uzYShxN6Oo9FUo3ICtAPCCFsjhvb3Qum7FspmxrqtNy fzclibZJPO8ey2PzqaiOfiVfgJmNSvoCOdgM4OqFLtRO6eSTzhJeI4VPrPcq/5la 0FuOi1WZs/Au9llqLqGSDH3UAwKBgQDUD/bSJbOk5SvNjFtFm0ClVJr66mJ5e4uN 4VGv8KGFAJ+ExIxujAukfKdwLjS1wEy2RePcshfT8Y9FVh/Q1KzzrQi3Gwmfq1G6 Dpu2HlJpaZl+9T81x2KS8GP3QNczWMe2nh7Lj+6st+b4F+6FYbVTFnHaae27sXrD XPX15+uxzQKBgGy+pBWBF4kwBo/QU4NuTdU7hNNRPGkuwl1ASH1Xv6m8aDRII8Nk 6TDkITltW98g5oUxehI7oOpMKCO9SCZYsNY0YpBeQwCOYgDfc6/Y+A0C+x9RO/BD UsJiPLPfD/pDmNPz9sTj3bKma+RXq29sCOujD0pkiiHLCnerotkJWnGHAoGAAkCJ JoIv/jhQ1sX+0iZr8VWMr819bjzZppAWBgBQNtFi4E4WD7Z9CSopvQ9AkA2SwvzL BrT9e8q88sePXvBjRdM4nHk1CPUQ0SEGllCMH4J3ltmT6kZLzbOv3BhcMLdop4/W U+MbbcomMcxPRCtdeZxraR5m3+9qlliOZCYqYqECgYA5eLdxgyHxCS33QGFHRvXI TLAHIrr7wK1xwgkmZlLzYSQ8Oqh1UEbgoMt4ulRczP2g7TCfvANw2Sw0H2Q5a6Fj cnwVcXJ38DLg0GCPMwzE8dK7d8tKtV6kGiKy+KFvoKChPjE6uxhKKmCJaSwtQEPS vsjX3iiIgUQPsSz8RrNFfQ== -----END PRIVATE KEY----- """, #8 """-----BEGIN CERTIFICATE----- MIICojCCAYoCAQEwDQYJKoZIhvcNAQELBQAwFzEVMBMGA1UEAwwMbmV3cGJfdGhp bmd5MB4XDTIwMDEwMjAxNTExMloXDTIxMDEwMTAxNTExMlowFzEVMBMGA1UEAwwM bmV3cGJfdGhpbmd5MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA5DNu CKhhl6wCbgoCkFemwJh3ATbAjhInHpvQWIFDfSK1USElCKxqosIxiBQCx3Zs2d/U GeIA7QAM2atNdXaateacEaKMmGE9LEtO0Dg5lmT43WzmGkG9NmCwK3JjAekc5S9d HKNtEQo7o8RKfj81zlDSq2kzliy98cimk24VBBGkS2Cn7Vy/mxMCqWjQazTXbpoS lXw6LiY5wFXQmXOB5GTSHvqyCtBQbOSSbJB77z/fm7bufTDObufTbJIq53WPt00Y f+JNnzkX1X0MaBCUztoZwoMaExWucMe/7xsQ46hDn6KB4b0lZk+gsK45QHxvPE1R 72+ZkkIrGS/ljIKahQIDAQABMA0GCSqGSIb3DQEBCwUAA4IBAQDib1653CneSmy2 gYzGeMlrI05Jqo3JuHNMQHzAjIrb4ee57VA4PTQa1ygGol/hVv6eTvZr3p2ospDS 5Kfwj1HLO4jSMX1Bnm1FG0naQogz2CD3xfYjbYOVRhAxpld1MNyRveIOhDRARY7N XNAaNPZ1ALrwbENSYArr18xDzgGWe/dgyRCEpCFIsztiA+7jGvrmAZgceIE8K3h3 fkvNmXBH58ZHAGTiyRriBZqS+DXrBrQOztXSJwFnOZnRt6/efeBupt8j5hxVpBLW vtjpBc23uUcbbHOY2AW2Bf+vIr4/LmJ/MheKV+maa2990vmC93tvWlFfc74mgUkW HJfXDmR6 -----END CERTIFICATE----- -----BEGIN PRIVATE KEY----- MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQDkM24IqGGXrAJu CgKQV6bAmHcBNsCOEicem9BYgUN9IrVRISUIrGqiwjGIFALHdmzZ39QZ4gDtAAzZ q011dpq15pwRooyYYT0sS07QODmWZPjdbOYaQb02YLArcmMB6RzlL10co20RCjuj xEp+PzXOUNKraTOWLL3xyKaTbhUEEaRLYKftXL+bEwKpaNBrNNdumhKVfDouJjnA VdCZc4HkZNIe+rIK0FBs5JJskHvvP9+btu59MM5u59NskirndY+3TRh/4k2fORfV fQxoEJTO2hnCgxoTFa5wx7/vGxDjqEOfooHhvSVmT6CwrjlAfG88TVHvb5mSQisZ L+WMgpqFAgMBAAECggEABTdPuo7uvCLIY2+DI319aEWT4sk3mYe8sSxqlLtPqZqT fmk9iXc3cMTzkOK0NY71af19waGy17f6kzchLCAr5SCCTLzkbc87MLn/8S530oI4 VgdZMxxxkL6hCD0zGiYT7QEqJa9unMcZGeMwuLYFKtQaHKTo8vPO26n0dMY9YLxj cNUxsKLcKk8dbfKKt4B4fZgB7nU0BG9YbKYZ3iZ7/3mG+6jA6u+VYc/WHYQjTmpL oLFN7NOe3R7jIx/kJ1OqNWqsFoLpyiiWd1Mr0l3EdD1kCudptMgD8hd++nx2Yk2w K4+CpOVIN/eCxDDaAOJgYjCtOayVwUkDAxRRt9VnAQKBgQD5s1j6RJtBNTlChVxS W3WpcG4q8933AiUY/Chx0YTyopOiTi7AGUaA8AOLFBcO2npa+vzC+lvuOyrgOtVW sD10H2v5jNKlbeBp+Q9rux2LAyp4TvzdXWKhVyZrdtITF0hn6vEYNp7MtyWRFb1O 3Ie5HQBPHtzllFOMynacjOdjpQKBgQDp9TrbfOmwGWmwPKmaFKuy8BKxjJM+ct0X 4Xs1uSy9Z9Y8QlDNbNaooI8DA1NY0jDVHwemiGC4bYsBNKNRcbI0s2nr0hQMft42 P/NpugHv0YXiVz+5bfim4woTiHHbfREqchlIGo3ryClAiDU9fYZwTOtb9jPIhX3G 9v+OsoMlYQKBgQDJUQW90S5zJlwh+69xXvfAQjswOimNCpeqSzK4gTn0/YqV4v7i Nf6X2eqhaPMmMJNRYuYCtSMFMYLiAc0a9UC2rNa6/gSfB7VU+06phtTMzSKimNxa BP6OIduB7Ox2I+Fmlw8GfJMPbeHF1YcpW7e5UV58a9+g4TNzYZC7qwarWQKBgQCA FFaCbmHonCD18F/REFvm+/Lf7Ft3pp5PQouXH6bUkhIArzVZIKpramqgdaOdToSZ SAGCM8rvbFja8hwurBWpMEdeaIW9SX8RJ/Vz/fateYDYJnemZgPoKQcNJnded5t8 Jzab+J2VZODgiTDMVvnQZOu8To6OyjXPRM0nK6cMQQKBgQDyX44PHRRhEXDgJFLU qp2ODL54Qadc/thp2m+JmAvqmCCLwuYlGpRKVkLLuZW9W6RlVqarOC3VD3wX5PRZ IsyCGLi+Jbrv9JIrYUXE80xNeQVNhrrf02OW0KHbqGxRaNOmp1THPw98VUGR2J/q YAp6XUXU7LEBUrowye+Ty2o7Lg== -----END PRIVATE KEY----- """, #9 """-----BEGIN CERTIFICATE----- MIICojCCAYoCAQEwDQYJKoZIhvcNAQELBQAwFzEVMBMGA1UEAwwMbmV3cGJfdGhp bmd5MB4XDTIwMDEwMjAxNTExMVoXDTIxMDEwMTAxNTExMVowFzEVMBMGA1UEAwwM bmV3cGJfdGhpbmd5MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA1k2R PWYihftppo3CoxeseFwgg7guxZVkP7aAur5uBzSeAB7sBG1G2bRrwMX71S4xPwot zYiEoxUrTStUqEKjL2aozfHsXnHZ7kwwUgZFDZUg+ve2tZDA3HCUr4tLYKlyFqpx 2nCouc45MjQ4wAxRl4rQxIUG2uSTzvP+xXtjoJYMIEEyCpcsRXfqfVkEUe9nrPsF 0Ibzk7Cyt75HDI4uEzBuHux0DYuGy6R02jz/vf/dIZ4WepjSY06xpblTHZgieDRX fU2+YOcvb0eDHyA8Q5p8ropK71MNIP5+kffFd90SVr4EkCA8S+cd6FdKQasRr+jF 9MUhMS4ObvlrYTG+hwIDAQABMA0GCSqGSIb3DQEBCwUAA4IBAQCy62MZ3+59/VpX c9Hsmb4/BMWt0irLJit4w4SkuYKGFLCMKZI4LN4pEkXaiE1eqF2DNS1qOvl5luty Zz4oggrqilwuFeH98o9Zeg9SYnouuypORVP/3DPbJF/jiQg5J8kJb1sy+BjRiT8I 5X6/cCBYT+MljFz5tpqWOtWTgA30e1BV8JFj8F4dgUcWsAVT/I4l9zgMLUnhcO6E wLtEE0I6aT1RHJB28ndwJzj4La98Oirw7LAEAWbExWYB90ypLaGY+JVJe3f5fijC fJpQ2mbs4syXDmb5bU2C2pGPTKZPcyx15iQrq1uHInD0facOw+pmllAFxuG96lA1 +o2VzKwP -----END CERTIFICATE----- -----BEGIN PRIVATE KEY----- MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDWTZE9ZiKF+2mm jcKjF6x4XCCDuC7FlWQ/toC6vm4HNJ4AHuwEbUbZtGvAxfvVLjE/Ci3NiISjFStN K1SoQqMvZqjN8execdnuTDBSBkUNlSD697a1kMDccJSvi0tgqXIWqnHacKi5zjky NDjADFGXitDEhQba5JPO8/7Fe2OglgwgQTIKlyxFd+p9WQRR72es+wXQhvOTsLK3 vkcMji4TMG4e7HQNi4bLpHTaPP+9/90hnhZ6mNJjTrGluVMdmCJ4NFd9Tb5g5y9v R4MfIDxDmnyuikrvUw0g/n6R98V33RJWvgSQIDxL5x3oV0pBqxGv6MX0xSExLg5u +WthMb6HAgMBAAECggEAeCyRSNwQeg/NZD/UqP6qkegft52+ZMBssinWsGH/c3z3 KVwtwCHDfGvnjPe5TAeWSCKeIsbukkFZwfGNjLmppvgrqymCAkhYDICfDDBF4uMA 1pu40sJ01Gkxh+tV/sOmnb1BEVzh0Sgq/NM6C8ActR18CugKOw+5L3G2KeoSqUbT 2hcPUsnik10KwqW737GQW4LtEQEr/iRmQkxI3+HBzvPWjFZzjOcpUph+FW5TXtaU T26mt1j+FjbdvvhCuRMY/VZBJ5h1RKU95r57F1AjW/C0RRJ8FxR1CeSy4IlmQBrh 6wAa3Tdm0k/n4ZspC9bF5eVTJEtb0AohiYZrIa8MuQKBgQD8yjCLYa41H304odCx NwPRJcmlIk5YGxPrhHAT9GEgU6n/no7YMVx1L7fNLcMjAyx54jauEU7J19Aki7eV SIdU9TwqmkOAFfM6TOEJZiOi66gABOxeK2yDyfmR6Apaw3caku4O058t4KVwHSCB DanYCMzxCBqS9jUTTyAh0fMg6wKBgQDZBkIukg3FKPor5LzkUXIKnNHYPfHbERHw piWS6GZwqhuWNlOCWxiBR4rEUU/RbFQZw/FCi5OuAk2lBC0LBmC0/Sz4/+xDdCbv uNhMOTRcy9nFVpmpIWCx4N/KmXHEuFxli/JNXux7iki74AVC9VPrAt/kCvwf06Df oDb8ljdR1QKBgQChVOD6c5Lc8IXYeN1Z3IShHH6+11AsxstFyjZFZff+y6Z5L1Z2 /7nESHoDhqs9Uy81cnv3R7CC/Ssnx8uYiLtmK0UE44Mk4d1jXeFZQEiKF+AWcw3v Y8NTsLmItxC0sH75BMDN0Z2LiA3Nqaku8+trpuI1Cjj7hgqFkkAtlXKXlQKBgBMb c/Q5s7CqHOyEZQUNDqdUiz0opwSMijHPzvsSLwK4V1lwSwXtE0k+jT8fkZF0oirq j3E2bLgjR8bBiV2xIA6PQ8hgb+K4dT0h3xlG6A9Le07egwTbBXJjxBBIVjXlrWzb V2fsdZGi6ShxXsU4aD0GscOYG/6JWV6W8oBmkVRJAoGAepIZ+OYmFjb7uxdh4EtP hluEtx5bLOLuo6c0S149omUXUhbsuyzTZS6Ip9ySDMnK3954c4Q4WJ4yQKixQNVq 78aDfy4hP/8TE/Q9CRddUof2P33PJMhVNqzTRYMpqV+zxifvtw3hoDTLKHTQxCR2 M1+O4VvokU5pBqUpGXiMDfs= -----END PRIVATE KEY----- """, #10 """-----BEGIN CERTIFICATE----- MIICojCCAYoCAQEwDQYJKoZIhvcNAQELBQAwFzEVMBMGA1UEAwwMbmV3cGJfdGhp bmd5MB4XDTIwMDEwMjAxNTExMVoXDTIxMDEwMTAxNTExMVowFzEVMBMGA1UEAwwM bmV3cGJfdGhpbmd5MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAnbCU M37hG7zrCyyJEI6pZmOomnI+CozbP5KAhWSV5y7R5H6lcAEG2UDV+lCUxHT2ufOa i1H16bXyBt7VoMTHIH50S58NUCUEXcuRWVR16tr8CzcTHQAkfIrmhY2XffPilX7h aw35UkoVmXcqSDNNJD6jmvWexvmbhzVWW8Vt5Pivet2/leVuqPXB54/alSbkC74m x6X5XKQc6eyPsb1xvNBuiSpFzdqbEn7lUwj6jFTkh9tlixgmgx+J0XoQXbawyrAg rcIQcse/Ww+KBA1KSccFze+XBTbIull4boYhbJqkb6DW5bY7/me2nNxE9DRGwq+S kBsKq3YKeCf8LEhfqQIDAQABMA0GCSqGSIb3DQEBCwUAA4IBAQAD+tWGFhINYsWT ibKWlCGgBc5uB7611cLCevx1yAL6SaOECVCQXzaaXIaETSbyY03UO2yBy3Pl10FV GYXLrAWTFZsNVJm55XIibTNw1UBPNwdIoCSzAYuOgMF0GHhTTQU0hNYWstOnnE2T 6lSAZQZFkaW4ZKs6sUp42Em9Bu99PehyIgnw14qb9NPg5qKdi2GAvkImZCrGpMdK OF31U7Ob0XQ0lxykcNgG4LlUACd+QxLfNpmLBZUGfikexYa1VqBFm3oAvTt8ybNQ qr7AKXDFnW75aCBaMpQWzrstA7yYZ3D9XCd5ZNf6d08lGM/oerDAIGnZOZPJgs5U FaWPHdS9 -----END CERTIFICATE----- -----BEGIN PRIVATE KEY----- MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQCdsJQzfuEbvOsL LIkQjqlmY6iacj4KjNs/koCFZJXnLtHkfqVwAQbZQNX6UJTEdPa585qLUfXptfIG 3tWgxMcgfnRLnw1QJQRdy5FZVHXq2vwLNxMdACR8iuaFjZd98+KVfuFrDflSShWZ dypIM00kPqOa9Z7G+ZuHNVZbxW3k+K963b+V5W6o9cHnj9qVJuQLvibHpflcpBzp 7I+xvXG80G6JKkXN2psSfuVTCPqMVOSH22WLGCaDH4nRehBdtrDKsCCtwhByx79b D4oEDUpJxwXN75cFNsi6WXhuhiFsmqRvoNbltjv+Z7ac3ET0NEbCr5KQGwqrdgp4 J/wsSF+pAgMBAAECggEAPSu1ofBTRN5ZU4FYPlsJLdX1Hsy4coFHv/aF8rkdSYwp EflrFfLgBEEZgLvnqfoxh9sPFYKa4amaFL42ouIS2PEVDgzKLk/dzMDeRof0IkIG yhb4TCS1ArcjS6WsociNGi8ZJN1L3Xctv9WxSkbUYv4Fm2Qyzr8fbSjssjb5NXwD K11fsj6Pfy/mQrI0TSTlzWC7ARIlCMTWQ8G8zEU6bMFIG6DMjt2J4VgYVXUKetZA VPuS+pwkH2obQe6FLRuiNxH4GitVAASYPea6foER4AggBMRp8q8F6+WssjoyEORb 0sJxmnxhznoTRMCuTsUj6XMgmOTOnA3lQXsIB0DYcQKBgQDO6mMRVVFWzgkE9Q5/ 36n06KvGYF9TCRDL9vRC8kCqcGd1Hy6jRj0D8049KUHaN74pfWg6gsQjPkKzwKnC vxNl72tVvLqm7Fo531BGfKK/46ZvxeWMMraNW4+9LhwMPu2LN5OEdwwCgyaURpxh ktCp+RrGjz08Kn82X1jJPdwxDQKBgQDDGMvZ7ZUDGq5+RJkmHJ58lQtiaMZclmYV R9YwOxJV6ino3EYrGOtUkqiemgAACdMWE/JMJlB1/JINawJwUsZ2XDp/9jNLPgLc gphCmagaO34U/YMaJbJIK2gkCX7p8EcD+x45qWa0bEMPW38QfN/qQdUPjNmpuIiI Zleyl1TqDQKBgQCvIoat0ighsAzETGN0aqzhJdrW8xVcJA06hpFi5MdFPBTldno0 KqxUXqj3badWe94SIhqJg8teBUHSAZ3uv2o82nRgQnk99km8OD8rGi1q+9YRP1C2 5OnNJhW4y4FkABNxxZ2v/k+FBNsvn8CXefvyEm3OaMks1s+MBxIQa7KnNQKBgFwX HUo+GiN/+bPCf6P8yFa4J8qI+HEF0SPkZ9cWWx5QzP2M1FZNie++1nce7DcYbBo0 yh9lyn8W/H328AzDFckS2c5DEY1HtSQPRP3S+AWB5Y7U54h1GMV2L88q6ExWzb60 T10aeE9b9v+NydmniC5UatTPQIMbht8Tp/u18TAVAoGBAJphAfiqWWV2M5aBCSXq WxLZ71AJ0PZBmRa/9iwtccwXQpMcW6wHK3YSQxci+sB97TElRa3/onlVSpohrUtg VCvCwfSHX1LmrfWNSkoJZwCQt+YYuMqW86K0tzLzI1EMjIH9LgQvB6RR26PZQs+E jr1ZvRc+wPTq6sxCF1h9ZAfN -----END PRIVATE KEY----- """, #11 ] # To disable the pre-computed tub certs, uncomment this line. # SYSTEM_TEST_CERTS = [] def flush_but_dont_ignore(res): d = flushEventualQueue() def _done(ignored): return res d.addCallback(_done) return d def _render_config(config): """ Convert a ``dict`` of ``dict`` of ``unicode`` to an ini-format string. """ return u"\n\n".join(list( _render_config_section(k, v) for (k, v) in config.items() )) def _render_config_section(heading, values): """ Convert a ``unicode`` heading and a ``dict`` of ``unicode`` to an ini-format section as ``unicode``. """ return u"[{}]\n{}\n".format( heading, _render_section_values(values) ) def _render_section_values(values): """ Convert a ``dict`` of ``unicode`` to the body of an ini-format section as ``unicode``. """ return u"\n".join(list( u"{} = {}".format(k, v) for (k, v) in sorted(values.items()) )) @async_to_deferred async def spin_until_cleanup_done(value=None, timeout=10): """ At the end of the test, spin until the reactor has no more DelayedCalls and file descriptors (or equivalents) registered. This prevents dirty reactor errors, while also not hard-coding a fixed amount of time, so it can finish faster on faster computers. There is also a timeout: if it takes more than 10 seconds (by default) for the remaining reactor state to clean itself up, the presumption is that it will never get cleaned up and the spinning stops. Make sure to run as last thing in tearDown. """ def num_fds(): if hasattr(reactor, "handles"): # IOCP! return len(reactor.handles) else: # Normal reactor; having internal readers still registered is fine, # that's not our code. return len( set(reactor.getReaders()) - set(reactor._internalReaders) ) + len(reactor.getWriters()) for i in range(timeout * 1000): # There's a single DelayedCall for AsynchronousDeferredRunTest's # timeout... if (len(reactor.getDelayedCalls()) < 2 and num_fds() == 0): break await deferLater(reactor, 0.001) return value class SystemTestMixin(pollmixin.PollMixin, testutil.StallMixin): # If set to True, use Foolscap for storage protocol. If set to False, HTTP # will be used when possible. If set to None, this suggests a bug in the # test code. FORCE_FOOLSCAP_FOR_STORAGE : Optional[bool] = None # If True, reduce the timeout on connections: REDUCE_HTTP_CLIENT_TIMEOUT : bool = True def setUp(self): if os.getenv("TAHOE_DEBUG_BLOCKING") == "1": from .blocking import catch_blocking_in_event_loop catch_blocking_in_event_loop(self) self._http_client_pools = [] http_client.StorageClientFactory.start_test_mode(self._got_new_http_connection_pool) self.addCleanup(http_client.StorageClientFactory.stop_test_mode) self.port_assigner = SameProcessStreamEndpointAssigner() self.port_assigner.setUp() self.addCleanup(self.port_assigner.tearDown) self.sparent = service.MultiService() self.sparent.startService() def _got_new_http_connection_pool(self, pool): # Make sure the pool closes cached connections quickly: pool.cachedConnectionTimeout = 0.1 # Register the pool for shutdown later: self._http_client_pools.append(pool) # Disable retries: pool.retryAutomatically = False # Make a much more aggressive timeout for connections, we're connecting # locally after all... and also make sure it's lower than the delay we # add in tearDown, to prevent dirty reactor issues. getConnection = pool.getConnection def getConnectionWithTimeout(*args, **kwargs): d = getConnection(*args, **kwargs) d.addTimeout(1, reactor) return d if self.REDUCE_HTTP_CLIENT_TIMEOUT: pool.getConnection = getConnectionWithTimeout def close_idle_http_connections(self): """Close all HTTP client connections that are just hanging around.""" return defer.gatherResults( [pool.closeCachedConnections() for pool in self._http_client_pools] ) def tearDown(self): log.msg("shutting down SystemTest services") d = self.sparent.stopService() d.addBoth(flush_but_dont_ignore) d.addBoth(lambda x: self.close_idle_http_connections().addCallback(lambda _: x)) d.addBoth(spin_until_cleanup_done) return d def getdir(self, subdir): return os.path.join(self.basedir, subdir) def add_service(self, s): s.setServiceParent(self.sparent) return s def _create_introducer(self): """ :returns: (via Deferred) an Introducer instance """ iv_dir = self.getdir("introducer") if not os.path.isdir(iv_dir): _, web_port_endpoint = self.port_assigner.assign(reactor) main_location_hint, main_port_endpoint = self.port_assigner.assign(reactor) introducer_config = ( u"[node]\n" u"nickname = introducer \N{BLACK SMILING FACE}\n" + u"web.port = {}\n".format(web_port_endpoint) + u"tub.port = {}\n".format(main_port_endpoint) + u"tub.location = {}\n".format(main_location_hint) ).encode("utf-8") fileutil.make_dirs(iv_dir) fileutil.write( os.path.join(iv_dir, 'tahoe.cfg'), introducer_config, ) if SYSTEM_TEST_CERTS: os.mkdir(os.path.join(iv_dir, "private")) f = open(os.path.join(iv_dir, "private", "node.pem"), "w") f.write(SYSTEM_TEST_CERTS[0]) f.close() return create_introducer(basedir=iv_dir) def _get_introducer_web(self): with open(os.path.join(self.getdir("introducer"), "node.url"), "r") as f: return f.read().strip() @inlineCallbacks def set_up_nodes(self, NUMCLIENTS=5): """ Create an introducer and ``NUMCLIENTS`` client nodes pointed at it. All of the nodes are running in this process. As a side-effect, set: * ``numclients`` to ``NUMCLIENTS`` * ``introducer`` to the ``_IntroducerNode`` instance * ``introweb_url`` to the introducer's HTTP API endpoint. :param int NUMCLIENTS: The number of client nodes to create. :return: A ``Deferred`` that fires when the nodes have connected to each other. """ self.assertIn( self.FORCE_FOOLSCAP_FOR_STORAGE, (True, False), "You forgot to set FORCE_FOOLSCAP_FOR_STORAGE on {}".format(self.__class__) ) self.numclients = NUMCLIENTS self.introducer = yield self._create_introducer() self.add_service(self.introducer) self.introweb_url = self._get_introducer_web() yield self._set_up_client_nodes(self.FORCE_FOOLSCAP_FOR_STORAGE) native_server = next(iter(self.clients[0].storage_broker.get_known_servers())) if self.FORCE_FOOLSCAP_FOR_STORAGE: expected_storage_server_class = NativeStorageServer else: expected_storage_server_class = HTTPNativeStorageServer self.assertIsInstance(native_server, expected_storage_server_class) @inlineCallbacks def _set_up_client_nodes(self, force_foolscap): q = self.introducer self.introducer_furl = q.introducer_url self.clients = [] basedirs = [] for i in range(self.numclients): basedirs.append((yield self._set_up_client_node(i, force_foolscap))) # start clients[0], wait for it's tub to be ready (at which point it # will have registered the helper furl). c = yield client.create_client(basedirs[0]) c.setServiceParent(self.sparent) self.clients.append(c) with open(os.path.join(basedirs[0],"private","helper.furl"), "r") as f: helper_furl = f.read() self.helper_furl = helper_furl if self.numclients >= 2: with open(os.path.join(basedirs[1], 'tahoe.cfg'), 'a+') as f: f.write( "[client]\n" "helper.furl = {}\n".format(helper_furl) ) # this starts the rest of the clients for i in range(1, self.numclients): c = yield client.create_client(basedirs[i]) c.setServiceParent(self.sparent) self.clients.append(c) log.msg("STARTING") yield self.wait_for_connections() log.msg("CONNECTED") # now find out where the web port was self.webish_url = self.clients[0].getServiceNamed("webish").getURL() if self.numclients >=2: # and the helper-using webport self.helper_webish_url = self.clients[1].getServiceNamed("webish").getURL() def _generate_config(self, which, basedir, force_foolscap=False): config = {} allclients = set(range(self.numclients)) except1 = allclients - {1} feature_matrix = { ("client", "nickname"): except1, # Auto-assigning addresses is extremely failure prone and not # amenable to automated testing in _this_ manner. ("node", "tub.port"): allclients, ("node", "tub.location"): allclients, # client 0 runs a webserver and a helper # client 1 runs a webserver but no helper ("node", "web.port"): {0, 1}, ("node", "timeout.keepalive"): {0}, ("node", "timeout.disconnect"): {1}, ("helper", "enabled"): {0}, } def setconf(config, which, section, feature, value): if which in feature_matrix.get((section, feature), {which}): config.setdefault(section, {})[feature] = value setnode = partial(setconf, config, which, "node") sethelper = partial(setconf, config, which, "helper") setnode("nickname", u"client %d \N{BLACK SMILING FACE}" % (which,)) setconf(config, which, "storage", "force_foolscap", str(force_foolscap)) setconf(config, which, "client", "force_foolscap", str(force_foolscap)) tub_location_hint, tub_port_endpoint = self.port_assigner.assign(reactor) setnode("tub.port", tub_port_endpoint) setnode("tub.location", tub_location_hint) _, web_port_endpoint = self.port_assigner.assign(reactor) setnode("web.port", web_port_endpoint) setnode("timeout.keepalive", "600") setnode("timeout.disconnect", "1800") sethelper("enabled", "True") iyaml = ("introducers:\n" " petname2:\n" " furl: %s\n") % self.introducer_furl iyaml_fn = os.path.join(basedir, "private", "introducers.yaml") fileutil.write(iyaml_fn, iyaml) return _render_config(config) def _set_up_client_node(self, which, force_foolscap): basedir = self.getdir("client%d" % (which,)) fileutil.make_dirs(os.path.join(basedir, "private")) if len(SYSTEM_TEST_CERTS) > (which + 1): f = open(os.path.join(basedir, "private", "node.pem"), "w") f.write(SYSTEM_TEST_CERTS[which + 1]) f.close() config = self._generate_config(which, basedir, force_foolscap) fileutil.write(os.path.join(basedir, 'tahoe.cfg'), config) return basedir def bounce_client(self, num): c = self.clients[num] d = c.disownServiceParent() # I think windows requires a moment to let the connection really stop # and the port number made available for re-use. TODO: examine the # behavior, see if this is really the problem, see if we can do # better than blindly waiting for a second. d.addCallback(self.stall, 1.0) @defer.inlineCallbacks def _stopped(res): new_c = yield client.create_client(self.getdir("client%d" % num)) self.clients[num] = new_c new_c.setServiceParent(self.sparent) d.addCallback(_stopped) d.addCallback(lambda res: self.wait_for_connections()) def _maybe_get_webport(res): if num == 0: # now find out where the web port was self.webish_url = self.clients[0].getServiceNamed("webish").getURL() d.addCallback(_maybe_get_webport) return d @defer.inlineCallbacks def add_extra_node(self, client_num, helper_furl=None, add_to_sparent=False): # usually this node is *not* parented to our self.sparent, so we can # shut it down separately from the rest, to exercise the # connection-lost code basedir = FilePath(self.getdir("client%d" % client_num)) basedir.makedirs() config = ( "[node]\n" "tub.location = {}\n" "tub.port = {}\n" "[client]\n" ).format(*self.port_assigner.assign(reactor)) if helper_furl: config += "helper.furl = %s\n" % helper_furl basedir.child("tahoe.cfg").setContent(config.encode("utf-8")) private = basedir.child("private") private.makedirs() write_introducer( basedir, "default", self.introducer_furl, ) c = yield client.create_client(basedir.path) self.clients.append(c) self.numclients += 1 if add_to_sparent: c.setServiceParent(self.sparent) else: c.startService() yield self.wait_for_connections() defer.returnValue(c) def _check_connections(self): for i, c in enumerate(self.clients): if not c.connected_to_introducer(): log.msg("%s not connected to introducer yet" % (i,)) return False sb = c.get_storage_broker() connected_servers = sb.get_connected_servers() connected_names = sorted(list( connected.get_nickname() for connected in sb.get_known_servers() if connected.is_connected() )) if len(connected_servers) != self.numclients: wanted = sorted(list( client.nickname for client in self.clients )) log.msg( "client %s storage broker connected to %s, missing %s" % ( i, connected_names, set(wanted) - set(connected_names), ) ) return False log.msg("client %s storage broker connected to %s, happy" % ( i, connected_names, )) up = c.getServiceNamed("uploader") if up._helper_furl and not up._helper: log.msg("Helper fURL but no helper") return False return True def wait_for_connections(self, ignored=None): return self.poll(self._check_connections, timeout=200) tahoe_lafs-1.20.0/src/allmydata/test/common_util.py0000644000000000000000000003451013615410400017237 0ustar00""" Ported to Python 3. """ import os import sys import time import signal from functools import ( partial, ) from random import randrange from io import ( TextIOWrapper, BytesIO, ) from twisted.internet import reactor, defer from twisted.python import failure from twisted.trial import unittest from ..util.assertutil import precondition from ..scripts import runner from allmydata.util.encodingutil import unicode_platform, get_filesystem_encoding, argv_type, unicode_to_argv def bchr(s): return bytes([s]) def skip_if_cannot_represent_filename(u): precondition(isinstance(u, str)) enc = get_filesystem_encoding() if not unicode_platform(): try: u.encode(enc) except UnicodeEncodeError: raise unittest.SkipTest("A non-ASCII filename could not be encoded on this platform.") def _getvalue(io): """ Read out the complete contents of a file-like object. """ io.seek(0) return io.read() def maybe_unicode_to_argv(o): """Convert object to argv form if necessary.""" if isinstance(o, str): return unicode_to_argv(o) return o def run_cli_native(verb, *args, **kwargs): """ Run a Tahoe-LAFS CLI command specified as bytes (on Python 2) or Unicode (on Python 3); basically, it accepts a native string. Most code should prefer ``run_cli_unicode`` which deals with all the necessary encoding considerations. :param runner.Options options: The options instance to use to parse the given arguments. :param str verb: The command to run. For example, ``"create-node"``. :param [str] args: The arguments to pass to the command. For example, ``("--hostname=localhost",)``. :param [str] nodeargs: Extra arguments to pass to the Tahoe executable before ``verb``. :param bytes|unicode stdin: Text or bytes to pass to the command via stdin. :param NoneType|str encoding: The name of an encoding which stdout and stderr will be configured to use. ``None`` means matching default behavior for the given Python version. :param bool return_bytes: If False, stdout/stderr is native string, matching native behavior. If True, stdout/stderr are returned as bytes. """ options = kwargs.pop("options", runner.Options()) nodeargs = kwargs.pop("nodeargs", []) encoding = kwargs.pop("encoding", None) or getattr(sys.stdout, "encoding") or "utf-8" return_bytes = kwargs.pop("return_bytes", False) verb = maybe_unicode_to_argv(verb) args = [maybe_unicode_to_argv(a) for a in args] nodeargs = [maybe_unicode_to_argv(a) for a in nodeargs] precondition( all(isinstance(arg, argv_type) for arg in [verb] + nodeargs + list(args)), "arguments to run_cli must be {argv_type} -- convert using unicode_to_argv".format(argv_type=argv_type), verb=verb, args=args, nodeargs=nodeargs, ) argv = ["tahoe"] + nodeargs + [verb] + list(args) stdin = kwargs.get("stdin", "") if True: # The new behavior, the Python 3 behavior, is to accept unicode and # encode it using a specific encoding. For older versions of Python 3, # the encoding is determined from LANG (bad) but for newer Python 3, # the encoding is either LANG if it supports full Unicode, otherwise # utf-8 (good). Tests can pass in different encodings to exercise # different behaviors. if isinstance(stdin, str): stdin = stdin.encode(encoding) stdin = TextIOWrapper(BytesIO(stdin), encoding) stdout = TextIOWrapper(BytesIO(), encoding) stderr = TextIOWrapper(BytesIO(), encoding) options.stdin = stdin d = defer.succeed(argv) d.addCallback( partial( runner.parse_or_exit, options, ), stdout=stdout, stderr=stderr, ) d.addCallback( runner.dispatch, reactor, stdin=stdin, stdout=stdout, stderr=stderr, ) def _done(rc, stdout=stdout, stderr=stderr): if return_bytes: stdout = stdout.buffer stderr = stderr.buffer return 0, _getvalue(stdout), _getvalue(stderr) def _err(f, stdout=stdout, stderr=stderr): f.trap(SystemExit) if return_bytes: stdout = stdout.buffer stderr = stderr.buffer return f.value.code, _getvalue(stdout), _getvalue(stderr) d.addCallbacks(_done, _err) return d def run_cli_unicode(verb, argv, nodeargs=None, stdin=None, encoding=None): """ Run a Tahoe-LAFS CLI command. :param unicode verb: The command to run. For example, ``u"create-node"``. :param [unicode] argv: The arguments to pass to the command. For example, ``[u"--hostname=localhost"]``. :param [unicode] nodeargs: Extra arguments to pass to the Tahoe executable before ``verb``. :param unicode stdin: Text to pass to the command via stdin. :param NoneType|str encoding: The name of an encoding to use for all bytes/unicode conversions necessary *and* the encoding to cause stdio to declare with its ``encoding`` attribute. ``None`` means ASCII will be used and no declaration will be made at all. """ if nodeargs is None: nodeargs = [] precondition( all(isinstance(arg, str) for arg in [verb] + nodeargs + argv), "arguments to run_cli_unicode must be unicode", verb=verb, nodeargs=nodeargs, argv=argv, ) codec = encoding or "ascii" encode = lambda t: t d = run_cli_native( encode(verb), nodeargs=list(encode(arg) for arg in nodeargs), stdin=encode(stdin), encoding=encoding, *list(encode(arg) for arg in argv) ) def maybe_decode(result): code, stdout, stderr = result if isinstance(stdout, bytes): stdout = stdout.decode(codec) if isinstance(stderr, bytes): stderr = stderr.decode(codec) return code, stdout, stderr d.addCallback(maybe_decode) return d run_cli = run_cli_native def parse_cli(*argv): # This parses the CLI options (synchronously), and returns the Options # argument, or throws usage.UsageError if something went wrong. return runner.parse_options(argv) class DevNullDictionary(dict): def __setitem__(self, key, value): return def insecurerandstr(n): return b''.join(map(bchr, list(map(randrange, [0]*n, [256]*n)))) def flip_bit(good, which): """Flip the low-order bit of good[which].""" if which == -1: pieces = good[:which], good[-1:], b"" else: pieces = good[:which], good[which:which+1], good[which+1:] return pieces[0] + bchr(ord(pieces[1]) ^ 0x01) + pieces[2] def flip_one_bit(s, offset=0, size=None): """ flip one random bit of the string s, in a byte greater than or equal to offset and less than offset+size. """ precondition(isinstance(s, bytes)) if size is None: size=len(s)-offset i = randrange(offset, offset+size) result = s[:i] + bchr(ord(s[i:i+1])^(0x01<" def disconnected(self): """Disconnect the canary, to be called by test code. Can only happen once. """ if self.disconnectors is not None: for (f, args, kwargs) in list(self.disconnectors.values()): f(*args, **kwargs) self.disconnectors = None class ShouldFailMixin(object): def shouldFail(self, expected_failure, which, substring, callable, *args, **kwargs): """Assert that a function call raises some exception. This is a Deferred-friendly version of TestCase.assertRaises() . Suppose you want to verify the following function: def broken(a, b, c): if a < 0: raise TypeError('a must not be negative') return defer.succeed(b+c) You can use: d = self.shouldFail(TypeError, 'test name', 'a must not be negative', broken, -4, 5, c=12) in your test method. The 'test name' string will be included in the error message, if any, because Deferred chains frequently make it difficult to tell which assertion was tripped. The substring= argument, if not None, must appear in the 'repr' of the message wrapped by this Failure, or the test will fail. """ assert substring is None or isinstance(substring, (bytes, str)) d = defer.maybeDeferred(callable, *args, **kwargs) def done(res): if isinstance(res, failure.Failure): res.trap(expected_failure) if substring: self.failUnless(substring in str(res), "%s: substring '%s' not in '%s'" % (which, substring, str(res))) # return the Failure for further analysis, but in a form that # doesn't make the Deferred chain think that we failed. return [res] else: self.fail("%s was supposed to raise %s, not get '%s'" % (which, expected_failure, res)) d.addBoth(done) return d class TestMixin(SignalMixin): def setUp(self): return super(TestMixin, self).setUp() def tearDown(self): self.clean_pending(required_to_quiesce=True) return super(TestMixin, self).tearDown() def clean_pending(self, dummy=None, required_to_quiesce=True): """ This handy method cleans all pending tasks from the reactor. When writing a unit test, consider the following question: Is the code that you are testing required to release control once it has done its job, so that it is impossible for it to later come around (with a delayed reactor task) and do anything further? If so, then trial will usefully test that for you -- if the code under test leaves any pending tasks on the reactor then trial will fail it. On the other hand, some code is *not* required to release control -- some code is allowed to continuously maintain control by rescheduling reactor tasks in order to do ongoing work. Trial will incorrectly require that code to clean up all its tasks from the reactor. Most people think that such code should be amended to have an optional "shutdown" operation that releases all control, but on the contrary it is good design for some code to *not* have a shutdown operation, but instead to have a "crash-only" design in which it recovers from crash on startup. If the code under test is of the "long-running" kind, which is *not* required to shutdown cleanly in order to pass tests, then you can simply call testutil.clean_pending() at the end of the unit test, and trial will be satisfied. """ pending = reactor.getDelayedCalls() active = bool(pending) for p in pending: if p.active(): p.cancel() else: print("WEIRDNESS! pending timed call not active!") if required_to_quiesce and active: self.fail("Reactor was still active when it was required to be quiescent.") class TimezoneMixin(object): def setTimezone(self, timezone): def tzset_if_possible(): # Windows doesn't have time.tzset(). if hasattr(time, 'tzset'): time.tzset() unset = object() originalTimezone = os.environ.get('TZ', unset) def restoreTimezone(): if originalTimezone is unset: del os.environ['TZ'] else: os.environ['TZ'] = originalTimezone tzset_if_possible() os.environ['TZ'] = timezone self.addCleanup(restoreTimezone) tzset_if_possible() def have_working_tzset(self): return hasattr(time, 'tzset') __all__ = [ "TestMixin", "ShouldFailMixin", "StallMixin", "run_cli", "parse_cli", "DevNullDictionary", "insecurerandstr", "flip_bit", "flip_one_bit", "SignalMixin", "skip_if_cannot_represent_filename", "ReallyEqualMixin" ] tahoe_lafs-1.20.0/src/allmydata/test/common_web.py0000644000000000000000000000542613615410400017043 0ustar00""" Ported to Python 3. """ from six import ensure_str __all__ = [ "do_http", "render", ] from twisted.internet.defer import ( inlineCallbacks, returnValue, ) from twisted.web.error import ( Error, ) from twisted.python.reflect import ( fullyQualifiedName, ) from twisted.internet.defer import ( succeed, ) from twisted.web.test.requesthelper import ( DummyChannel, ) from twisted.web.error import ( UnsupportedMethod, ) from twisted.web.http import ( NOT_ALLOWED, ) from twisted.web.server import ( NOT_DONE_YET, ) import treq from ..webish import ( TahoeLAFSRequest, ) class VerboseError(Error): """Include the HTTP body response too.""" def __str__(self): return Error.__str__(self) + " " + ensure_str(self.response) @inlineCallbacks def do_http(method, url, **kwargs): """ Run HTTP query, return Deferred of body as bytes. """ response = yield treq.request(method, url, persistent=False, **kwargs) body = yield treq.content(response) # TODO: replace this with response.fail_for_status when # https://github.com/twisted/treq/pull/159 has landed if 400 <= response.code < 600: raise VerboseError( response.code, response="For request {!r} to {!r}, got: {!r}".format( method, url, body)) returnValue(body) def render(resource, query_args): """ Render (in the manner of the Twisted Web Site) a Twisted ``Resource`` against a request with the given query arguments . :param resource: The page or resource to render. :param query_args: The query arguments to put into the request being rendered. A mapping from ``bytes`` to ``list`` of ``bytes``. :return Deferred: A Deferred that fires with the rendered response body as ``bytes``. """ channel = DummyChannel() request = TahoeLAFSRequest(channel) request.method = b"GET" request.args = query_args request.prepath = [b""] request.postpath = [] try: result = resource.render(request) except UnsupportedMethod: request.setResponseCode(NOT_ALLOWED) result = b"" if isinstance(result, bytes): request.write(result) done = succeed(None) elif result == NOT_DONE_YET: if request.finished: done = succeed(None) else: done = request.notifyFinish() else: raise ValueError( "{!r} returned {!r}, required bytes or NOT_DONE_YET.".format( fullyQualifiedName(resource.render), result, ), ) def get_body(ignored): complete_response = channel.transport.written.getvalue() header, body = complete_response.split(b"\r\n\r\n", 1) return body done.addCallback(get_body) return done tahoe_lafs-1.20.0/src/allmydata/test/eliotutil.py0000644000000000000000000001145213615410400016724 0ustar00""" Tools aimed at the interaction between tests and Eliot. Ported to Python 3. """ from six import ensure_text __all__ = [ "RUN_TEST", "EliotLoggedRunTest", ] from typing import Callable from functools import ( partial, wraps, ) import attr from zope.interface import ( implementer, ) from eliot import ( ActionType, Field, ILogger, ) from eliot.testing import ( MemoryLogger, swap_logger, check_for_errors, ) from twisted.python.monkey import ( MonkeyPatcher, ) from ..util.jsonbytes import ( AnyBytesJSONEncoder ) _NAME = Field.for_types( u"name", [str], u"The name of the test.", ) RUN_TEST = ActionType( u"run-test", [_NAME], [], u"A test is run.", ) @attr.s class EliotLoggedRunTest(object): """ A *RunTest* implementation which surrounds test invocation with an Eliot-based action. This *RunTest* composes with another for convenience. :ivar case: The test case to run. :ivar handlers: Pass-through for the wrapped *RunTest*. :ivar last_resort: Pass-through for the wrapped *RunTest*. :ivar _run_tests_with_factory: A factory for the other *RunTest*. """ _run_tests_with_factory = attr.ib() case = attr.ib() handlers = attr.ib(default=None) last_resort = attr.ib(default=None) @classmethod def make_factory(cls, delegated_run_test_factory): return partial(cls, delegated_run_test_factory) @property def eliot_logger(self): return self.case.eliot_logger @eliot_logger.setter def eliot_logger(self, value): self.case.eliot_logger = value def addCleanup(self, *a, **kw): return self.case.addCleanup(*a, **kw) def id(self): return self.case.id() def run(self, result): """ Run the test case in the context of a distinct Eliot action. The action will finish after the test is done. It will note the name of the test being run. All messages emitted by the test will be validated. They will still be delivered to the global logger. """ # The idea here is to decorate the test method itself so that all of # the extra logic happens at the point where test/application logic is # expected to be. This `run` method is more like test infrastructure # and things do not go well when we add too much extra behavior here. # For example, exceptions raised here often just kill the whole # runner. patcher = MonkeyPatcher() # So, grab the test method. name = self.case._testMethodName original = getattr(self.case, name) decorated = with_logging(ensure_text(self.case.id()), original) patcher.addPatch(self.case, name, decorated) try: # Patch it in patcher.patch() # Then use the rest of the machinery to run it. return self._run_tests_with_factory( self.case, self.handlers, self.last_resort, ).run(result) finally: # Clean up the patching for idempotency or something. patcher.restore() def with_logging( test_id: str, test_method: Callable, ): """ Decorate a test method with additional log-related behaviors. 1. The test method will run in a distinct Eliot action. 2. Typed log messages will be validated. 3. Logged tracebacks will be added as errors. :param test_id: The full identifier of the test being decorated. :param test_method: The method itself. """ @wraps(test_method) def run_with_logging(*args, **kwargs): validating_logger = MemoryLogger(encoder=AnyBytesJSONEncoder) original = swap_logger(None) try: swap_logger(_TwoLoggers(original, validating_logger)) with RUN_TEST(name=test_id): try: return test_method(*args, **kwargs) finally: check_for_errors(validating_logger) finally: swap_logger(original) return run_with_logging @implementer(ILogger) class _TwoLoggers(object): """ Log to two loggers. A single logger can have multiple destinations so this isn't typically a useful thing to do. However, MemoryLogger has inline validation instead of destinations. That means this *is* useful to simultaneously write to the normal places and validate all written log messages. """ def __init__(self, a, b): """ :param ILogger a: One logger :param ILogger b: Another logger """ self._a = a # type: ILogger self._b = b # type: ILogger def write(self, dictionary, serializer=None): self._a.write(dictionary, serializer) self._b.write(dictionary, serializer) tahoe_lafs-1.20.0/src/allmydata/test/matchers.py0000644000000000000000000000717713615410400016531 0ustar00""" Testtools-style matchers useful to the Tahoe-LAFS test suite. Ported to Python 3. """ import attr from hyperlink import DecodedURL from testtools.matchers import ( Mismatch, AfterPreprocessing, MatchesStructure, MatchesDict, MatchesListwise, Always, Equals, ) from foolscap.furl import ( decode_furl, ) from allmydata.util import ( base32, ) from allmydata.node import ( read_config, ) from allmydata.crypto import ( ed25519, error, ) @attr.s class MatchesNodePublicKey(object): """ Match an object representing the node's private key. To verify, the private key is loaded from the node's private config directory at the time the match is checked. """ basedir = attr.ib() def match(self, other): """ Match a private key which is the same as the private key in the node at ``self.basedir``. :param other: A signing key (aka "private key") from ``allmydata.crypto.ed25519``. This is the key to check against the node's key. :return Mismatch: If the keys don't match. """ config = read_config(self.basedir, u"tub.port") privkey_bytes = config.get_private_config("node.privkey").encode("utf-8") private_key = ed25519.signing_keypair_from_string(privkey_bytes)[0] signature = ed25519.sign_data(private_key, b"") other_public_key = ed25519.verifying_key_from_signing_key(other) try: ed25519.verify_signature(other_public_key, signature, b"") except error.BadSignature: return Mismatch("The signature did not verify.") def matches_storage_announcement(basedir, anonymous=True, options=None): """ Match a storage announcement. :param bytes basedir: The path to the node base directory which is expected to emit the announcement. This is used to determine the key which is meant to sign the announcement. :param bool anonymous: If True, matches a storage announcement containing an anonymous access fURL. Otherwise, fails to match such an announcement. :param list[matcher]|NoneType options: If a list, matches a storage announcement containing a list of storage plugin options matching the elements of the list. If None, fails to match an announcement with storage plugin options. :return: A matcher with the requested behavior. """ announcement = { u"permutation-seed-base32": matches_base32(), } if anonymous: announcement[u"anonymous-storage-FURL"] = matches_furl() announcement[u"anonymous-storage-NURLs"] = matches_nurls() if options: announcement[u"storage-options"] = MatchesListwise(options) return MatchesStructure( # Has each of these keys with associated values that match service_name=Equals(u"storage"), ann=MatchesDict(announcement), signing_key=MatchesNodePublicKey(basedir), ) def matches_furl(): """ Match any Foolscap fURL byte string. """ return AfterPreprocessing(decode_furl, Always()) def matches_nurls(): """ Matches a sequence of NURLs. """ return AfterPreprocessing( lambda nurls: [DecodedURL.from_text(u) for u in nurls], Always() ) def matches_base32(): """ Match any base32 encoded byte string. """ return AfterPreprocessing(base32.a2b, Always()) class MatchesSameElements(object): """ Match if the two-tuple value given contains two elements that are equal to each other. """ def match(self, value): left, right = value return Equals(left).match(right) tahoe_lafs-1.20.0/src/allmydata/test/no_network.py0000644000000000000000000006000613615410400017076 0ustar00""" This contains a test harness that creates a full Tahoe grid in a single process (actually in a single MultiService) which does not use the network. It does not use an Introducer, and there are no foolscap Tubs. Each storage server puts real shares on disk, but is accessed through loopback RemoteReferences instead of over serialized SSL. It is not as complete as the common.SystemTestMixin framework (which does use the network), but should be considerably faster: on my laptop, it takes 50-80ms to start up, whereas SystemTestMixin takes close to 2s. This should be useful for tests which want to examine and/or manipulate the uploaded shares, checker/verifier/repairer tests, etc. The clients have no Tubs, so it is not useful for tests that involve a Helper. """ from __future__ import annotations from six import ensure_text from typing import Callable import os from base64 import b32encode from functools import ( partial, ) from zope.interface import implementer from twisted.application import service from twisted.internet import defer from twisted.python.failure import Failure from twisted.web.error import Error from foolscap.api import Referenceable, fireEventually, RemoteException from foolscap.ipb import ( IRemoteReference, ) import treq from allmydata.util.assertutil import _assert from allmydata import uri as tahoe_uri from allmydata.client import _Client from allmydata.storage.server import ( StorageServer, storage_index_to_dir, FoolscapStorageServer, ) from allmydata.util import fileutil, idlib, hashutil from allmydata.util.hashutil import permute_server_hash from allmydata.util.fileutil import abspath_expanduser_unicode from allmydata.interfaces import IStorageBroker, IServer from allmydata.storage_client import ( _StorageServer, ) from .common import ( SameProcessStreamEndpointAssigner, ) class IntentionalError(Exception): pass class Marker(object): pass fireNow = partial(defer.succeed, None) @implementer(IRemoteReference) # type: ignore # warner/foolscap#79 class LocalWrapper(object): """ A ``LocalWrapper`` presents the remote reference interface to a local object which implements a ``RemoteInterface``. """ def __init__(self, original, fireEventually=fireEventually): """ :param Callable[[], Deferred[None]] fireEventually: Get a Deferred that will fire at some point. This is used to control when ``callRemote`` calls the remote method. The default value allows the reactor to iterate before the call happens. Use ``fireNow`` to call the remote method synchronously. """ self.original = original self.broken = False self.hung_until = None self.post_call_notifier = None self.disconnectors = {} self.counter_by_methname = {} self._fireEventually = fireEventually def _clear_counters(self): self.counter_by_methname = {} def callRemoteOnly(self, methname, *args, **kwargs): d = self.callRemote(methname, *args, **kwargs) del d # explicitly ignored return None def callRemote(self, methname, *args, **kwargs): # this is ideally a Membrane, but that's too hard. We do a shallow # wrapping of inbound arguments, and per-methodname wrapping of # selected return values. def wrap(a): if isinstance(a, Referenceable): return self._wrap(a) else: return a args = tuple([wrap(a) for a in args]) kwargs = dict([(k,wrap(kwargs[k])) for k in kwargs]) def _really_call(): def incr(d, k): d[k] = d.setdefault(k, 0) + 1 incr(self.counter_by_methname, methname) meth = getattr(self.original, "remote_" + methname) return meth(*args, **kwargs) def _call(): if self.broken: if self.broken is not True: # a counter, not boolean self.broken -= 1 raise IntentionalError("I was asked to break") if self.hung_until: d2 = defer.Deferred() self.hung_until.addCallback(lambda ign: _really_call()) self.hung_until.addCallback(lambda res: d2.callback(res)) def _err(res): d2.errback(res) return res self.hung_until.addErrback(_err) return d2 return _really_call() d = self._fireEventually() d.addCallback(lambda res: _call()) def _wrap_exception(f): return Failure(RemoteException(f)) d.addErrback(_wrap_exception) def _return_membrane(res): # rather than complete the difficult task of building a # fully-general Membrane (which would locate all Referenceable # objects that cross the simulated wire and replace them with # wrappers), we special-case certain methods that we happen to # know will return Referenceables. if methname == "allocate_buckets": (alreadygot, allocated) = res for shnum in allocated: allocated[shnum] = self._wrap(allocated[shnum]) if methname == "get_buckets": for shnum in res: res[shnum] = self._wrap(res[shnum]) return res d.addCallback(_return_membrane) if self.post_call_notifier: d.addCallback(self.post_call_notifier, self, methname) return d def notifyOnDisconnect(self, f, *args, **kwargs): m = Marker() self.disconnectors[m] = (f, args, kwargs) return m def dontNotifyOnDisconnect(self, marker): del self.disconnectors[marker] def _wrap(self, value): return LocalWrapper(value, self._fireEventually) def wrap_storage_server(original): # Much of the upload/download code uses rref.version (which normally # comes from rrefutil.add_version_to_remote_reference). To avoid using a # network, we want a LocalWrapper here. Try to satisfy all these # constraints at the same time. wrapper = LocalWrapper(original) wrapper.version = original.remote_get_version() return wrapper @implementer(IServer) class NoNetworkServer(object): def __init__(self, serverid, rref): self.serverid = serverid self.rref = rref def __repr__(self): return "" % self.get_name() # Special method used by copy.copy() and copy.deepcopy(). When those are # used in allmydata.immutable.filenode to copy CheckResults during # repair, we want it to treat the IServer instances as singletons. def __copy__(self): return self def __deepcopy__(self, memodict): return self def upload_permitted(self): return True def get_serverid(self): return self.serverid def get_permutation_seed(self): return self.serverid def get_lease_seed(self): return self.serverid def get_foolscap_write_enabler_seed(self): return self.serverid def get_name(self): # Other implementations return bytes. return idlib.shortnodeid_b2a(self.serverid).encode("utf-8") def get_longname(self): return idlib.nodeid_b2a(self.serverid) def get_nickname(self): return "nickname" def get_rref(self): return self.rref def get_storage_server(self): if self.rref is None: return None return _StorageServer(lambda: self.rref) def get_version(self): return self.rref.version def start_connecting(self, trigger_cb): raise NotImplementedError @implementer(IStorageBroker) class NoNetworkStorageBroker(object): # type: ignore # missing many methods def get_servers_for_psi(self, peer_selection_index, for_upload=True): def _permuted(server): seed = server.get_permutation_seed() return permute_server_hash(peer_selection_index, seed) return sorted(self.get_connected_servers(), key=_permuted) def get_connected_servers(self): return self.client._servers def get_nickname_for_serverid(self, serverid): return None def when_connected_enough(self, threshold): return defer.Deferred() def get_all_serverids(self): return [] # FIXME? def get_known_servers(self): return [] # FIXME? def create_no_network_client(basedir): """ :return: a Deferred yielding an instance of _Client subclass which does no actual networking but has the same API. """ basedir = abspath_expanduser_unicode(str(basedir)) fileutil.make_dirs(os.path.join(basedir, "private"), 0o700) from allmydata.client import read_config config = read_config(basedir, u'client.port') storage_broker = NoNetworkStorageBroker() client = _NoNetworkClient( config, main_tub=None, i2p_provider=None, tor_provider=None, introducer_clients=[], storage_farm_broker=storage_broker ) # this is a (pre-existing) reference-cycle and also a bad idea, see: # https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2949 storage_broker.client = client return defer.succeed(client) class _NoNetworkClient(_Client): # type: ignore # tahoe-lafs/ticket/3573 """ Overrides all _Client networking functionality to do nothing. """ def init_connections(self): pass def create_main_tub(self): pass def init_introducer_client(self): pass def create_log_tub(self): pass def setup_logging(self): pass def startService(self): service.MultiService.startService(self) def stopService(self): return service.MultiService.stopService(self) def init_helper(self): pass def init_key_gen(self): pass def init_storage(self): pass def init_client_storage_broker(self): self.storage_broker = NoNetworkStorageBroker() self.storage_broker.client = self def init_stub_client(self): pass #._servers will be set by the NoNetworkGrid which creates us class SimpleStats(object): def __init__(self): self.counters = {} self.stats_producers = [] def count(self, name, delta=1): val = self.counters.setdefault(name, 0) self.counters[name] = val + delta def register_producer(self, stats_producer): self.stats_producers.append(stats_producer) def get_stats(self): stats = {} for sp in self.stats_producers: stats.update(sp.get_stats()) ret = { 'counters': self.counters, 'stats': stats } return ret class NoNetworkGrid(service.MultiService): def __init__(self, basedir, num_clients, num_servers, client_config_hooks, port_assigner): service.MultiService.__init__(self) # We really need to get rid of this pattern here (and # everywhere) in Tahoe where "async work" is started in # __init__ For now, we at least keep the errors so they can # cause tests to fail less-improperly (see _check_clients) self._setup_errors = [] self.port_assigner = port_assigner self.basedir = basedir fileutil.make_dirs(basedir) self.servers_by_number = {} # maps to StorageServer instance self.wrappers_by_id = {} # maps to wrapped StorageServer instance self.proxies_by_id = {} # maps to IServer on which .rref is a wrapped # StorageServer self.clients = [] self.client_config_hooks = client_config_hooks for i in range(num_servers): ss = self.make_server(i) self.add_server(i, ss) self.rebuild_serverlist() for i in range(num_clients): d = self.make_client(i) d.addCallback(lambda c: self.clients.append(c)) def _bad(f): self._setup_errors.append(f) d.addErrback(_bad) def _check_clients(self): """ The anti-pattern of doing async work in __init__ means we need to check if that work completed successfully. This method either returns nothing or raises an exception in case __init__ failed to complete properly """ if self._setup_errors: self._setup_errors[0].raiseException() @defer.inlineCallbacks def make_client(self, i, write_config=True): clientid = hashutil.tagged_hash(b"clientid", b"%d" % i)[:20] clientdir = os.path.join(self.basedir, "clients", idlib.shortnodeid_b2a(clientid)) fileutil.make_dirs(clientdir) tahoe_cfg_path = os.path.join(clientdir, "tahoe.cfg") if write_config: from twisted.internet import reactor _, port_endpoint = self.port_assigner.assign(reactor) with open(tahoe_cfg_path, "w") as f: f.write("[node]\n") f.write("nickname = client-%d\n" % i) f.write("web.port = {}\n".format(port_endpoint)) f.write("[storage]\n") f.write("enabled = false\n") else: _assert(os.path.exists(tahoe_cfg_path), tahoe_cfg_path=tahoe_cfg_path) c = None if i in self.client_config_hooks: # this hook can either modify tahoe.cfg, or return an # entirely new Client instance c = self.client_config_hooks[i](clientdir) if not c: c = yield create_no_network_client(clientdir) c.nodeid = clientid c.short_nodeid = b32encode(clientid).lower()[:8] c._servers = self.all_servers # can be updated later c.setServiceParent(self) defer.returnValue(c) def make_server(self, i, readonly=False): serverid = hashutil.tagged_hash(b"serverid", b"%d" % i)[:20] serverdir = os.path.join(self.basedir, "servers", idlib.shortnodeid_b2a(serverid), "storage") fileutil.make_dirs(serverdir) ss = StorageServer(serverdir, serverid, stats_provider=SimpleStats(), readonly_storage=readonly) ss._no_network_server_number = i return ss def add_server(self, i, ss): # to deal with the fact that all StorageServers are named 'storage', # we interpose a middleman middleman = service.MultiService() middleman.setServiceParent(self) ss.setServiceParent(middleman) serverid = ss.my_nodeid self.servers_by_number[i] = ss wrapper = wrap_storage_server(FoolscapStorageServer(ss)) self.wrappers_by_id[serverid] = wrapper self.proxies_by_id[serverid] = NoNetworkServer(serverid, wrapper) self.rebuild_serverlist() def get_all_serverids(self): return list(self.proxies_by_id.keys()) def rebuild_serverlist(self): self._check_clients() self.all_servers = frozenset(list(self.proxies_by_id.values())) for c in self.clients: c._servers = self.all_servers def remove_server(self, serverid): # it's enough to remove the server from c._servers (we don't actually # have to detach and stopService it) for i,ss in list(self.servers_by_number.items()): if ss.my_nodeid == serverid: del self.servers_by_number[i] break del self.wrappers_by_id[serverid] del self.proxies_by_id[serverid] self.rebuild_serverlist() return ss def break_server(self, serverid, count=True): # mark the given server as broken, so it will throw exceptions when # asked to hold a share or serve a share. If count= is a number, # throw that many exceptions before starting to work again. self.wrappers_by_id[serverid].broken = count def hang_server(self, serverid): # hang the given server ss = self.wrappers_by_id[serverid] assert ss.hung_until is None ss.hung_until = defer.Deferred() def unhang_server(self, serverid): # unhang the given server ss = self.wrappers_by_id[serverid] assert ss.hung_until is not None ss.hung_until.callback(None) ss.hung_until = None def nuke_from_orbit(self): """ Empty all share directories in this grid. It's the only way to be sure ;-) """ for server in list(self.servers_by_number.values()): for prefixdir in os.listdir(server.sharedir): if prefixdir != 'incoming': fileutil.rm_dir(os.path.join(server.sharedir, prefixdir)) class GridTestMixin(object): def setUp(self): self.s = service.MultiService() self.s.startService() return super(GridTestMixin, self).setUp() def tearDown(self): return defer.gatherResults([ self.s.stopService(), defer.maybeDeferred(super(GridTestMixin, self).tearDown), ]) def set_up_grid(self, num_clients=1, num_servers=10, client_config_hooks=None, oneshare=False): """ Create a Tahoe-LAFS storage grid. :param num_clients: See ``NoNetworkGrid`` :param num_servers: See `NoNetworkGrid`` :param client_config_hooks: See ``NoNetworkGrid`` :param bool oneshare: If ``True`` then the first client node is configured with ``n == k == happy == 1``. :return: ``None`` """ if client_config_hooks is None: client_config_hooks = {} # self.basedir must be set port_assigner = SameProcessStreamEndpointAssigner() port_assigner.setUp() self.addCleanup(port_assigner.tearDown) self.g = NoNetworkGrid(self.basedir, num_clients=num_clients, num_servers=num_servers, client_config_hooks=client_config_hooks, port_assigner=port_assigner, ) self.g.setServiceParent(self.s) if oneshare: c = self.get_client(0) c.encoding_params["k"] = 1 c.encoding_params["happy"] = 1 c.encoding_params["n"] = 1 self._record_webports_and_baseurls() def _record_webports_and_baseurls(self): self.g._check_clients() self.client_webports = [c.getServiceNamed("webish").getPortnum() for c in self.g.clients] self.client_baseurls = [c.getServiceNamed("webish").getURL() for c in self.g.clients] def get_client_config(self, i=0): self.g._check_clients() return self.g.clients[i].config def get_clientdir(self, i=0): # ideally, use something get_client_config() only, we # shouldn't need to manipulate raw paths.. return self.get_client_config(i).get_config_path() def get_client(self, i=0): self.g._check_clients() return self.g.clients[i] def restart_client(self, i=0): self.g._check_clients() client = self.g.clients[i] d = defer.succeed(None) d.addCallback(lambda ign: self.g.removeService(client)) @defer.inlineCallbacks def _make_client(ign): c = yield self.g.make_client(i, write_config=False) self.g.clients[i] = c self._record_webports_and_baseurls() d.addCallback(_make_client) return d def get_serverdir(self, i): return self.g.servers_by_number[i].storedir def iterate_servers(self): for i in sorted(self.g.servers_by_number.keys()): ss = self.g.servers_by_number[i] yield (i, ss, ss.storedir) def find_uri_shares(self, uri): si = tahoe_uri.from_string(uri).get_storage_index() prefixdir = storage_index_to_dir(si) shares = [] for i,ss in list(self.g.servers_by_number.items()): serverid = ss.my_nodeid basedir = os.path.join(ss.sharedir, prefixdir) if not os.path.exists(basedir): continue for f in os.listdir(basedir): try: shnum = int(f) shares.append((shnum, serverid, os.path.join(basedir, f))) except ValueError: pass return sorted(shares) def copy_shares(self, uri: bytes) -> dict[bytes, bytes]: """ Read all of the share files for the given capability from the storage area of the storage servers created by ``set_up_grid``. :param bytes uri: A Tahoe-LAFS data capability. :return: A ``dict`` mapping share file names to share file contents. """ shares = {} for (shnum, serverid, sharefile) in self.find_uri_shares(uri): with open(sharefile, "rb") as f: shares[sharefile] = f.read() return shares def restore_all_shares(self, shares): for sharefile, data in list(shares.items()): with open(sharefile, "wb") as f: f.write(data) def delete_share(self, sharenum_and_serverid_and_sharefile): (shnum, serverid, sharefile) = sharenum_and_serverid_and_sharefile os.unlink(sharefile) def delete_shares_numbered(self, uri, shnums): for (i_shnum, i_serverid, i_sharefile) in self.find_uri_shares(uri): if i_shnum in shnums: os.unlink(i_sharefile) def delete_all_shares(self, serverdir): sharedir = os.path.join(serverdir, "shares") for prefixdir in os.listdir(sharedir): if prefixdir != 'incoming': fileutil.rm_dir(os.path.join(sharedir, prefixdir)) def corrupt_share(self, sharenum_and_serverid_and_sharefile, corruptor_function): (shnum, serverid, sharefile) = sharenum_and_serverid_and_sharefile with open(sharefile, "rb") as f: sharedata = f.read() corruptdata = corruptor_function(sharedata) with open(sharefile, "wb") as f: f.write(corruptdata) def corrupt_shares_numbered(self, uri, shnums, corruptor, debug=False): for (i_shnum, i_serverid, i_sharefile) in self.find_uri_shares(uri): if i_shnum in shnums: with open(i_sharefile, "rb") as f: sharedata = f.read() corruptdata = corruptor(sharedata, debug=debug) with open(i_sharefile, "wb") as f: f.write(corruptdata) def corrupt_all_shares(self, uri: bytes, corruptor: Callable[[bytes, bool], bytes], debug: bool=False): """ Apply ``corruptor`` to the contents of all share files associated with a given capability and replace the share file contents with its result. """ for (i_shnum, i_serverid, i_sharefile) in self.find_uri_shares(uri): with open(i_sharefile, "rb") as f: sharedata = f.read() corruptdata = corruptor(sharedata, debug) with open(i_sharefile, "wb") as f: f.write(corruptdata) @defer.inlineCallbacks def GET(self, urlpath, followRedirect=False, return_response=False, method="GET", clientnum=0, **kwargs): # if return_response=True, this fires with (data, statuscode, # respheaders) instead of just data. url = self.client_baseurls[clientnum] + ensure_text(urlpath) response = yield treq.request(method, url, persistent=False, allow_redirects=followRedirect, **kwargs) data = yield response.content() if return_response: # we emulate the old HTTPClientGetFactory-based response, which # wanted a tuple of (bytestring of data, bytestring of response # code like "200" or "404", and a # twisted.web.http_headers.Headers instance). Fortunately treq's # response.headers has one. defer.returnValue( (data, str(response.code), response.headers) ) if 400 <= response.code < 600: raise Error(response.code, response=data) defer.returnValue(data) def PUT(self, urlpath, **kwargs): return self.GET(urlpath, method="PUT", **kwargs) tahoe_lafs-1.20.0/src/allmydata/test/storage_plugin.py0000644000000000000000000000622113615410400017732 0ustar00""" A storage server plugin the test suite can use to validate the functionality. Ported to Python 3. """ from six import ensure_str import attr from zope.interface import ( implementer, ) from twisted.internet.defer import ( succeed, ) from twisted.web.resource import ( Resource, ) from twisted.web.static import ( Data, ) from foolscap.api import ( RemoteInterface, ) from allmydata.interfaces import ( IFoolscapStoragePlugin, IStorageServer, ) from allmydata.client import ( AnnounceableStorageServer, ) from allmydata.util.jsonbytes import ( dumps, ) class RIDummy(RemoteInterface): __remote_name__ = "RIDummy.tahoe.allmydata.com" def just_some_method(): """ Just some method so there is something callable on this object. We won't pretend to actually offer any storage capabilities. """ # type ignored due to missing stubs for Twisted # https://twistedmatrix.com/trac/ticket/9717 @implementer(IFoolscapStoragePlugin) # type: ignore @attr.s class DummyStorage(object): name = attr.ib() @property def _client_section_name(self): return u"storageclient.plugins.{}".format(self.name) def get_storage_server(self, configuration, get_anonymous_storage_server): if u"invalid" in configuration: raise Exception("The plugin is unhappy.") announcement = {u"value": configuration.get(u"some", u"default-value")} storage_server = DummyStorageServer(get_anonymous_storage_server) return succeed( AnnounceableStorageServer( announcement, storage_server, ), ) def get_storage_client(self, configuration, announcement, get_rref): return DummyStorageClient( get_rref, dict(configuration.items(self._client_section_name, [])), announcement, ) def get_client_resource(self, configuration): """ :return: A static data resource that produces the given configuration when rendered, as an aid to testing. """ items = configuration.items(self._client_section_name, []) resource = Data( dumps(dict(items)).encode("utf-8"), ensure_str("text/json"), ) # Give it some dynamic stuff too. resource.putChild(b"counter", GetCounter()) return resource class GetCounter(Resource, object): """ ``GetCounter`` is a resource that returns a count of the number of times it has rendered a response to a GET request. :ivar int value: The number of ``GET`` requests rendered so far. """ value = 0 def render_GET(self, request): self.value += 1 return dumps({"value": self.value}).encode("utf-8") @implementer(RIDummy) @attr.s(frozen=True) class DummyStorageServer(object): # type: ignore # warner/foolscap#78 get_anonymous_storage_server = attr.ib() def remote_just_some_method(self): pass @implementer(IStorageServer) @attr.s class DummyStorageClient(object): # type: ignore # incomplete implementation get_rref = attr.ib() configuration = attr.ib() announcement = attr.ib() tahoe_lafs-1.20.0/src/allmydata/test/strategies.py0000644000000000000000000000514213615410400017063 0ustar00""" Hypothesis strategies use for testing Tahoe-LAFS. Ported to Python 3. """ from hypothesis.strategies import ( one_of, builds, binary, integers, ) from ..uri import ( WriteableSSKFileURI, WriteableMDMFFileURI, DirectoryURI, MDMFDirectoryURI, ) from allmydata.util.base32 import ( b2a, ) def write_capabilities(): """ Build ``IURI`` providers representing all kinds of write capabilities. """ return one_of([ ssk_capabilities(), mdmf_capabilities(), dir2_capabilities(), dir2_mdmf_capabilities(), ]) def ssk_capabilities(): """ Build ``WriteableSSKFileURI`` instances. """ return builds( WriteableSSKFileURI, ssk_writekeys(), ssk_fingerprints(), ) def _writekeys(size=16): """ Build ``bytes`` representing write keys. """ return binary(min_size=size, max_size=size) def ssk_writekeys(): """ Build ``bytes`` representing SSK write keys. """ return _writekeys() def _fingerprints(size=32): """ Build ``bytes`` representing fingerprints. """ return binary(min_size=size, max_size=size) def ssk_fingerprints(): """ Build ``bytes`` representing SSK fingerprints. """ return _fingerprints() def mdmf_capabilities(): """ Build ``WriteableMDMFFileURI`` instances. """ return builds( WriteableMDMFFileURI, mdmf_writekeys(), mdmf_fingerprints(), ) def mdmf_writekeys(): """ Build ``bytes`` representing MDMF write keys. """ return _writekeys() def mdmf_fingerprints(): """ Build ``bytes`` representing MDMF fingerprints. """ return _fingerprints() def dir2_capabilities(): """ Build ``DirectoryURI`` instances. """ return builds( DirectoryURI, ssk_capabilities(), ) def dir2_mdmf_capabilities(): """ Build ``MDMFDirectoryURI`` instances. """ return builds( MDMFDirectoryURI, mdmf_capabilities(), ) def offsets(min_value=0, max_value=2 ** 16): """ Build ``int`` values that could be used as valid offsets into a sequence (such as share data in a share file). """ return integers(min_value, max_value) def lengths(min_value=1, max_value=2 ** 16): """ Build ``int`` values that could be used as valid lengths of data (such as share data in a share file). """ return integers(min_value, max_value) def base32text(): """ Build text()s that are valid base32 """ return builds( lambda b: str(b2a(b), "ascii"), binary(), ) tahoe_lafs-1.20.0/src/allmydata/test/test_abbreviate.py0000644000000000000000000001267113615410400020061 0ustar00""" Tests for allmydata.util.abbreviate. Ported to Python 3. """ from datetime import timedelta from twisted.trial import unittest from allmydata.util import abbreviate class Abbreviate(unittest.TestCase): def test_abbrev_time_1s(self): diff = timedelta(seconds=1) s = abbreviate.abbreviate_time(diff) self.assertEqual('1 second ago', s) def test_abbrev_time_25s(self): diff = timedelta(seconds=25) s = abbreviate.abbreviate_time(diff) self.assertEqual('25 seconds ago', s) def test_abbrev_time_future_5_minutes(self): diff = timedelta(minutes=-5) s = abbreviate.abbreviate_time(diff) self.assertEqual('5 minutes in the future', s) def test_abbrev_time_hours(self): diff = timedelta(hours=4) s = abbreviate.abbreviate_time(diff) self.assertEqual('4 hours ago', s) def test_abbrev_time_day(self): diff = timedelta(hours=49) # must be more than 2 days s = abbreviate.abbreviate_time(diff) self.assertEqual('2 days ago', s) def test_abbrev_time_month(self): diff = timedelta(days=91) s = abbreviate.abbreviate_time(diff) self.assertEqual('3 months ago', s) def test_abbrev_time_year(self): diff = timedelta(weeks=(5 * 52) + 1) s = abbreviate.abbreviate_time(diff) self.assertEqual('5 years ago', s) def test_time(self): a = abbreviate.abbreviate_time self.failUnlessEqual(a(None), "unknown") self.failUnlessEqual(a(0), "0 seconds") self.failUnlessEqual(a(1), "1 second") self.failUnlessEqual(a(2), "2 seconds") self.failUnlessEqual(a(119), "119 seconds") MIN = 60 self.failUnlessEqual(a(2*MIN), "2 minutes") self.failUnlessEqual(a(60*MIN), "60 minutes") self.failUnlessEqual(a(179*MIN), "179 minutes") HOUR = 60*MIN self.failUnlessEqual(a(180*MIN), "3 hours") self.failUnlessEqual(a(4*HOUR), "4 hours") DAY = 24*HOUR MONTH = 30*DAY self.failUnlessEqual(a(2*DAY), "2 days") self.failUnlessEqual(a(2*MONTH), "2 months") YEAR = 365*DAY self.failUnlessEqual(a(5*YEAR), "5 years") def test_space(self): tests_si = [(None, "unknown"), (0, "0 B"), (1, "1 B"), (999, "999 B"), (1000, "1000 B"), (1023, "1023 B"), (1024, "1.02 kB"), (20*1000, "20.00 kB"), (1024*1024, "1.05 MB"), (1000*1000, "1.00 MB"), (1000*1000*1000, "1.00 GB"), (1000*1000*1000*1000, "1.00 TB"), (1000*1000*1000*1000*1000, "1.00 PB"), (1000*1000*1000*1000*1000*1000, "1.00 EB"), (1234567890123456789, "1.23 EB"), ] for (x, expected) in tests_si: got = abbreviate.abbreviate_space(x, SI=True) self.failUnlessEqual(got, expected) tests_base1024 = [(None, "unknown"), (0, "0 B"), (1, "1 B"), (999, "999 B"), (1000, "1000 B"), (1023, "1023 B"), (1024, "1.00 kiB"), (20*1024, "20.00 kiB"), (1000*1000, "976.56 kiB"), (1024*1024, "1.00 MiB"), (1024*1024*1024, "1.00 GiB"), (1024*1024*1024*1024, "1.00 TiB"), (1000*1000*1000*1000*1000, "909.49 TiB"), (1024*1024*1024*1024*1024, "1.00 PiB"), (1024*1024*1024*1024*1024*1024, "1.00 EiB"), (1234567890123456789, "1.07 EiB"), ] for (x, expected) in tests_base1024: got = abbreviate.abbreviate_space(x, SI=False) self.failUnlessEqual(got, expected) self.failUnlessEqual(abbreviate.abbreviate_space_both(1234567), "(1.23 MB, 1.18 MiB)") def test_parse_space(self): p = abbreviate.parse_abbreviated_size self.failUnlessEqual(p(""), None) self.failUnlessEqual(p(None), None) self.failUnlessEqual(p("123"), 123) self.failUnlessEqual(p("123B"), 123) self.failUnlessEqual(p("2K"), 2000) self.failUnlessEqual(p("2kb"), 2000) self.failUnlessEqual(p("2KiB"), 2048) self.failUnlessEqual(p("10MB"), 10*1000*1000) self.failUnlessEqual(p("10MiB"), 10*1024*1024) self.failUnlessEqual(p("5G"), 5*1000*1000*1000) self.failUnlessEqual(p("4GiB"), 4*1024*1024*1024) self.failUnlessEqual(p("3TB"), 3*1000*1000*1000*1000) self.failUnlessEqual(p("3TiB"), 3*1024*1024*1024*1024) self.failUnlessEqual(p("6PB"), 6*1000*1000*1000*1000*1000) self.failUnlessEqual(p("6PiB"), 6*1024*1024*1024*1024*1024) self.failUnlessEqual(p("9EB"), 9*1000*1000*1000*1000*1000*1000) self.failUnlessEqual(p("9EiB"), 9*1024*1024*1024*1024*1024*1024) e = self.failUnlessRaises(ValueError, p, "12 cubits") self.failUnlessIn("12 cubits", str(e)) e = self.failUnlessRaises(ValueError, p, "1 BB") self.failUnlessIn("1 BB", str(e)) e = self.failUnlessRaises(ValueError, p, "fhtagn") self.failUnlessIn("fhtagn", str(e)) tahoe_lafs-1.20.0/src/allmydata/test/test_auth.py0000644000000000000000000002746713615410400016727 0ustar00""" Ported to Python 3. """ from typing import Literal from hypothesis import ( given, ) from hypothesis.strategies import ( text, characters, lists, ) from twisted.trial import unittest from twisted.python import filepath from twisted.cred import error, credentials from twisted.conch import error as conch_error from twisted.conch.ssh import keys from allmydata.frontends import auth from allmydata.util.fileutil import abspath_expanduser_unicode DUMMY_KEY = keys.Key.fromString("""\ -----BEGIN RSA PRIVATE KEY----- MIICXQIBAAKBgQDEP3DYiukOu+NrUlBZeLL9JoHkK5nSvINYfeOQWYVW9J5NG485 pZFVUQKzvvht34Ihj4ucrrvj7vOp+FFvzxI+zHKBpDxyJwV96dvWDAZMjxTxL7iV 8HcO7hqgtQ/Xk1Kjde5lH3EOEDs3IhFHA+sox9y6i4A5NUr2AJZSHiOEVwIDAQAB AoGASrrNwefDr7SkeS2zIx7vKa8ML1LbFIBsk7n8ee9c8yvbTAl+lLkTiqV6ne/O sig2aYk75MI1Eirf5o2ElUsI6u36i6AeKL2u/W7tLBVijmBB8dTiWZ5gMOARWt8w daF2An2826YdcU+iNZ7Yi0q4xtlxHQn3JcNNWxicphLvt0ECQQDtajJ/bK+Nqd9j /WGvqYcMzkkorQq/0+MQYhcIwDlpf2Xoi45tP4HeoBubeJmU5+jXpXmdP5epWpBv k3ZCwV7pAkEA05xBP2HTdwRFTJov5I/w7uKOrn7mj7DCvSjQFCufyPOoCJJMeBSq tfCQlHFtwlkyNfiSbhtgZ0Pp6ovL+1RBPwJBAOlFRBKxrpgpxcXQK5BWqMwrT/S4 eWxb+6mYR3ugq4h91Zq0rJ+pG6irdhS/XV/SsZRZEXIxDoom4u3OXQ9gQikCQErM ywuaiuNhMRXY0uEaOHJYx1LLLLjSJKQ0zwiyOvMPnfAZtsojlAxoEtNGHSQ731HQ ogIlzzfxe7ga3mni6IUCQQCwNK9zwARovcQ8nByqotGQzohpl+1b568+iw8GXP2u dBSD8940XU3YW+oeq8e+p3yQ2GinHfeJ3BYQyNQLuMAJ -----END RSA PRIVATE KEY----- """) DUMMY_KEY_DSA = keys.Key.fromString("""\ -----BEGIN OPENSSH PRIVATE KEY----- b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAABsQAAAAdzc2gtZH NzAAAAgQDKMh/ELaiP21LYRBuPbUy7dUhv/XZwV7aS1LzxSP+KaJvtDOei8X76XEAfkqX+ aGh9eup+BLkezrV6LlpO9uPzhY8ChlKpkvw5PZKv/2agSrVxZyG7yEzHNtSBQXE6qNMwIk N/ycXLGCqyAhQSzRhLz9ETNaslRDLo7YyVWkiuAQAAABUA5nTatFKux5EqZS4EarMWFRBU i1UAAACAFpkkK+JsPixSTPyn0DNMoGKA0Klqy8h61Ds6pws+4+aJQptUBshpwNw1ypo7MO +goDZy3wwdWtURTPGMgesNdEfxp8L2/kqE4vpMK0myoczCqOiWMeNB/x1AStbSkBI8WmHW 2htgsC01xbaix/FrA3edK8WEyv+oIxlbV1FkrPkAAACANb0EpCc8uoR4/32rO2JLsbcLBw H5wc2khe7AKkIa9kUknRIRvoCZUtXF5XuXXdRmnpVEm2KcsLdtZjip43asQcqgt0Kz3nuF kAf7bI98G1waFUimcCSPsal4kCmW2HC11sg/BWOt5qczX/0/3xVxpo6juUeBq9ncnFTvPX 5fOlEAAAHoJkFqHiZBah4AAAAHc3NoLWRzcwAAAIEAyjIfxC2oj9tS2EQbj21Mu3VIb/12 cFe2ktS88Uj/imib7QznovF++lxAH5Kl/mhofXrqfgS5Hs61ei5aTvbj84WPAoZSqZL8OT 2Sr/9moEq1cWchu8hMxzbUgUFxOqjTMCJDf8nFyxgqsgIUEs0YS8/REzWrJUQy6O2MlVpI rgEAAAAVAOZ02rRSrseRKmUuBGqzFhUQVItVAAAAgBaZJCvibD4sUkz8p9AzTKBigNCpas vIetQ7OqcLPuPmiUKbVAbIacDcNcqaOzDvoKA2ct8MHVrVEUzxjIHrDXRH8afC9v5KhOL6 TCtJsqHMwqjoljHjQf8dQErW0pASPFph1tobYLAtNcW2osfxawN3nSvFhMr/qCMZW1dRZK z5AAAAgDW9BKQnPLqEeP99qztiS7G3CwcB+cHNpIXuwCpCGvZFJJ0SEb6AmVLVxeV7l13U Zp6VRJtinLC3bWY4qeN2rEHKoLdCs957hZAH+2yPfBtcGhVIpnAkj7GpeJAplthwtdbIPw VjreanM1/9P98VcaaOo7lHgavZ3JxU7z1+XzpRAAAAFQC7360pZLbv7PFt4BPFJ8zAHxAe QwAAAA5leGFya3VuQGJhcnlvbgECAwQ= -----END OPENSSH PRIVATE KEY----- """) ACCOUNTS = u"""\ # dennis {key} URI:DIR2:aaaaaaaaaaaaaaaaaaaaaaaaaa:1111111111111111111111111111111111111111111111111111 carol {key} URI:DIR2:cccccccccccccccccccccccccc:3333333333333333333333333333333333333333333333333333 """.format(key=str(DUMMY_KEY.public().toString("openssh"), "ascii")).encode("ascii") # Python str.splitlines considers NEXT LINE, LINE SEPARATOR, and PARAGRAPH # separator to be line separators, too. However, file.readlines() does not... LINE_SEPARATORS = ( '\x0a', # line feed '\x0b', # vertical tab '\x0c', # form feed '\x0d', # carriage return ) SURROGATES: Literal["Cs"] = "Cs" class AccountFileParserTests(unittest.TestCase): """ Tests for ``load_account_file`` and its helper functions. """ @given(lists( text(alphabet=characters( blacklist_categories=( # Surrogates are an encoding trick to help out UTF-16. # They're not necessary to represent any non-surrogate code # point in unicode. They're also not legal individually but # only in pairs. SURROGATES, ), # Exclude all our line separators too. blacklist_characters=("\n", "\r"), )), )) def test_ignore_comments(self, lines): """ ``auth.content_lines`` filters out lines beginning with `#` and empty lines. """ expected = set() # It's not clear that real files and StringIO behave sufficiently # similarly to use the latter instead of the former here. In # particular, they seem to have distinct and incompatible # line-splitting rules. bufpath = self.mktemp() with open(bufpath, "wt", encoding="utf-8") as buf: for line in lines: stripped = line.strip() is_content = stripped and not stripped.startswith("#") if is_content: expected.add(stripped) buf.write(line + "\n") with auth.open_account_file(bufpath) as buf: actual = set(auth.content_lines(buf)) self.assertEqual(expected, actual) def test_parse_accounts(self): """ ``auth.parse_accounts`` accepts an iterator of account lines and returns an iterator of structured account data. """ alice_key = DUMMY_KEY.public().toString("openssh").decode("utf-8") alice_cap = "URI:DIR2:aaaa:1111" bob_key = DUMMY_KEY_DSA.public().toString("openssh").decode("utf-8") bob_cap = "URI:DIR2:aaaa:2222" self.assertEqual( list(auth.parse_accounts([ "alice {} {}".format(alice_key, alice_cap), "bob {} {}".format(bob_key, bob_cap), ])), [ ("alice", DUMMY_KEY.public(), alice_cap), ("bob", DUMMY_KEY_DSA.public(), bob_cap), ], ) def test_parse_accounts_rejects_passwords(self): """ The iterator returned by ``auth.parse_accounts`` raises ``ValueError`` when processing reaches a line that has what looks like a password instead of an ssh key. """ with self.assertRaises(ValueError): list(auth.parse_accounts(["alice apassword URI:DIR2:aaaa:1111"])) def test_create_account_maps(self): """ ``auth.create_account_maps`` accepts an iterator of structured account data and returns two mappings: one from account name to rootcap, the other from account name to public keys. """ alice_cap = "URI:DIR2:aaaa:1111" alice_key = DUMMY_KEY.public() bob_cap = "URI:DIR2:aaaa:2222" bob_key = DUMMY_KEY_DSA.public() accounts = [ ("alice", alice_key, alice_cap), ("bob", bob_key, bob_cap), ] self.assertEqual( auth.create_account_maps(accounts), ({ b"alice": alice_cap.encode("utf-8"), b"bob": bob_cap.encode("utf-8"), }, { b"alice": [alice_key], b"bob": [bob_key], }), ) def test_load_account_file(self): """ ``auth.load_account_file`` accepts an iterator of serialized account lines and returns two mappings: one from account name to rootcap, the other from account name to public keys. """ alice_key = DUMMY_KEY.public().toString("openssh").decode("utf-8") alice_cap = "URI:DIR2:aaaa:1111" bob_key = DUMMY_KEY_DSA.public().toString("openssh").decode("utf-8") bob_cap = "URI:DIR2:aaaa:2222" accounts = [ "alice {} {}".format(alice_key, alice_cap), "bob {} {}".format(bob_key, bob_cap), "# carol {} {}".format(alice_key, alice_cap), ] self.assertEqual( auth.load_account_file(accounts), ({ b"alice": alice_cap.encode("utf-8"), b"bob": bob_cap.encode("utf-8"), }, { b"alice": [DUMMY_KEY.public()], b"bob": [DUMMY_KEY_DSA.public()], }), ) class AccountFileCheckerKeyTests(unittest.TestCase): """ Tests for key handling done by allmydata.frontends.auth.AccountFileChecker. """ def setUp(self): self.account_file = filepath.FilePath(self.mktemp()) self.account_file.setContent(ACCOUNTS) abspath = abspath_expanduser_unicode(str(self.account_file.path)) self.checker = auth.AccountFileChecker(None, abspath) def test_unknown_user(self): """ AccountFileChecker.requestAvatarId returns a Deferred that fires with UnauthorizedLogin if called with an SSHPrivateKey object with a username not present in the account file. """ key_credentials = credentials.SSHPrivateKey( b"dennis", b"md5", None, None, None) avatarId = self.checker.requestAvatarId(key_credentials) return self.assertFailure(avatarId, error.UnauthorizedLogin) def test_unrecognized_key(self): """ AccountFileChecker.requestAvatarId returns a Deferred that fires with UnauthorizedLogin if called with an SSHPrivateKey object with a public key other than the one indicated in the account file for the indicated user. """ wrong_key_blob = b"""\ ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAYQDJGMWlPXh2M3pYzTiamjcBIMqctt4VvLVW2QZgEFc86XhGjPXq5QAiRTKv9yVZJR9HW70CfBI7GHun8+v4Wb6aicWBoxgI3OB5NN+OUywdme2HSaif5yenFdQr0ME71Xs= """ key_credentials = credentials.SSHPrivateKey( b"carol", b"md5", wrong_key_blob, None, None) avatarId = self.checker.requestAvatarId(key_credentials) return self.assertFailure(avatarId, error.UnauthorizedLogin) def test_missing_signature(self): """ AccountFileChecker.requestAvatarId returns a Deferred that fires with ValidPublicKey if called with an SSHPrivateKey object with an authorized key for the indicated user but with no signature. """ right_key_blob = DUMMY_KEY.public().toString("openssh") key_credentials = credentials.SSHPrivateKey( b"carol", b"md5", right_key_blob, None, None) avatarId = self.checker.requestAvatarId(key_credentials) return self.assertFailure(avatarId, conch_error.ValidPublicKey) def test_wrong_signature(self): """ AccountFileChecker.requestAvatarId returns a Deferred that fires with UnauthorizedLogin if called with an SSHPrivateKey object with a public key matching that on the user's line in the account file but with the wrong signature. """ right_key_blob = DUMMY_KEY.public().toString("openssh") key_credentials = credentials.SSHPrivateKey( b"carol", b"md5", right_key_blob, b"signed data", b"wrong sig") avatarId = self.checker.requestAvatarId(key_credentials) return self.assertFailure(avatarId, error.UnauthorizedLogin) def test_authenticated(self): """ If called with an SSHPrivateKey object with a username and public key found in the account file and a signature that proves possession of the corresponding private key, AccountFileChecker.requestAvatarId returns a Deferred that fires with an FTPAvatarID giving the username and root capability for that user. """ username = b"carol" signed_data = b"signed data" signature = DUMMY_KEY.sign(signed_data) right_key_blob = DUMMY_KEY.public().toString("openssh") key_credentials = credentials.SSHPrivateKey( username, b"md5", right_key_blob, signed_data, signature) avatarId = self.checker.requestAvatarId(key_credentials) def authenticated(avatarId): self.assertEqual( (username, b"URI:DIR2:cccccccccccccccccccccccccc:3333333333333333333333333333333333333333333333333333"), (avatarId.username, avatarId.rootcap)) avatarId.addCallback(authenticated) return avatarId tahoe_lafs-1.20.0/src/allmydata/test/test_base32.py0000644000000000000000000000227313615410400017031 0ustar00""" Tests for allmydata.util.base32. Ported to Python 3. """ import base64 from twisted.trial import unittest from hypothesis import ( strategies as st, given, ) from allmydata.util import base32 class Base32(unittest.TestCase): @given(input_bytes=st.binary(max_size=100)) def test_a2b_b2a_match_Pythons(self, input_bytes): encoded = base32.b2a(input_bytes) x = base64.b32encode(input_bytes).rstrip(b"=").lower() self.failUnlessEqual(encoded, x) self.assertIsInstance(encoded, bytes) self.assertTrue(base32.could_be_base32_encoded(encoded)) decoded = base32.a2b(encoded) self.assertEqual(decoded, input_bytes) self.assertIsInstance(decoded, bytes) def test_b2a(self): self.failUnlessEqual(base32.b2a(b"\x12\x34"), b"ci2a") def test_b2a_or_none(self): self.failUnlessEqual(base32.b2a_or_none(None), None) self.failUnlessEqual(base32.b2a_or_none(b"\x12\x34"), b"ci2a") def test_a2b(self): self.failUnlessEqual(base32.a2b(b"ci2a"), b"\x12\x34") self.failUnlessRaises(AssertionError, base32.a2b, b"b0gus") self.assertFalse(base32.could_be_base32_encoded(b"b0gus")) tahoe_lafs-1.20.0/src/allmydata/test/test_base62.py0000644000000000000000000000664713615410400017045 0ustar00""" Tests for allmydata.util.base62. Ported to Python 3. """ from past.builtins import chr as byteschr import random, unittest from hypothesis import ( strategies as st, given, ) from allmydata.util import base62, mathutil def insecurerandstr(n): return bytes(list(map(random.randrange, [0]*n, [256]*n))) class Base62(unittest.TestCase): def _test_num_octets_that_encode_to_this_many_chars(self, chars, octets): assert base62.num_octets_that_encode_to_this_many_chars(chars) == octets, "%s != %s <- %s" % (octets, base62.num_octets_that_encode_to_this_many_chars(chars), chars) def _test_roundtrip(self, bs): encoded = base62.b2a(bs) decoded = base62.a2b(encoded) self.assertEqual(decoded, bs) self.assertIsInstance(encoded, bytes) self.assertIsInstance(bs, bytes) self.assertIsInstance(decoded, bytes) # Encoded string only uses values from the base62 allowed characters: self.assertFalse(set(encoded) - set(base62.chars)) @given(input_bytes=st.binary(max_size=100)) def test_roundtrip(self, input_bytes): self._test_roundtrip(input_bytes) def test_known_values(self): """Known values to ensure the algorithm hasn't changed.""" def check_expected(plaintext, encoded): result1 = base62.b2a(plaintext) self.assertEqual(encoded, result1) result2 = base62.a2b(encoded) self.assertEqual(plaintext, result2) check_expected(b"hello", b'7tQLFHz') check_expected(b"", b'0') check_expected(b"zzz", b'0Xg7e') check_expected(b"\x36\xffWAT", b'49pq4mq') check_expected(b"1234 22323", b'1A0afZe9mxSZpz') check_expected(b"______", b'0TmAuCHJX') def test_num_octets_that_encode_to_this_many_chars(self): return self._test_num_octets_that_encode_to_this_many_chars(2, 1) return self._test_num_octets_that_encode_to_this_many_chars(3, 2) return self._test_num_octets_that_encode_to_this_many_chars(5, 3) return self._test_num_octets_that_encode_to_this_many_chars(6, 4) def test_ende_0x00(self): return self._test_roundtrip(b'\x00') def test_ende_0x01(self): return self._test_roundtrip(b'\x01') def test_ende_0x0100(self): return self._test_roundtrip(b'\x01\x00') def test_ende_0x000000(self): return self._test_roundtrip(b'\x00\x00\x00') def test_ende_0x010000(self): return self._test_roundtrip(b'\x01\x00\x00') def test_ende_randstr(self): return self._test_roundtrip(insecurerandstr(2**4)) def test_ende_longrandstr(self): return self._test_roundtrip(insecurerandstr(random.randrange(0, 2**10))) def test_odd_sizes(self): for j in range(2**6): lib = random.randrange(1, 2**8) numos = mathutil.div_ceil(lib, 8) bs = insecurerandstr(numos) # zero-out unused least-sig bits if lib%8: b = ord(bs[-1:]) b = b >> (8 - (lib%8)) b = b << (8 - (lib%8)) bs = bs[:-1] + byteschr(b) asl = base62.b2a_l(bs, lib) assert len(asl) == base62.num_chars_that_this_many_octets_encode_to(numos) # the size of the base-62 encoding must be just right bs2l = base62.a2b_l(asl, lib) assert len(bs2l) == numos # the size of the result must be just right assert bs == bs2l tahoe_lafs-1.20.0/src/allmydata/test/test_checker.py0000644000000000000000000007761113615410400017366 0ustar00""" Ported to Python 3. """ import json import os.path, shutil from bs4 import BeautifulSoup from twisted.trial import unittest from twisted.internet import defer from zope.interface import implementer from twisted.web.resource import ( Resource, ) from twisted.web.template import ( renderElement, ) from allmydata import check_results, uri from allmydata import uri as tahoe_uri from allmydata.interfaces import ( IServer, ICheckResults, ICheckAndRepairResults, ) from allmydata.util import base32 from allmydata.web import check_results as web_check_results from allmydata.storage_client import StorageFarmBroker, NativeStorageServer from allmydata.storage.server import storage_index_to_dir from allmydata.monitor import Monitor from allmydata.test.no_network import GridTestMixin from allmydata.immutable.upload import Data from allmydata.mutable.publish import MutableData from .common import ( EMPTY_CLIENT_CONFIG, ) from .common_web import ( render, ) from .web.common import ( assert_soup_has_favicon, assert_soup_has_tag_with_content, ) class FakeClient(object): def get_storage_broker(self): return self.storage_broker @implementer(IServer) class FakeServer(object): # type: ignore # incomplete implementation def get_name(self): return "fake name" def get_longname(self): return "fake longname" def get_nickname(self): return "fake nickname" @implementer(ICheckResults) class FakeCheckResults(object): # type: ignore # incomplete implementation def __init__(self, si=None, healthy=False, recoverable=False, summary="fake summary"): self._storage_index = si self._is_healthy = healthy self._is_recoverable = recoverable self._summary = summary def get_storage_index(self): return self._storage_index def get_storage_index_string(self): return base32.b2a_or_none(self._storage_index) def is_healthy(self): return self._is_healthy def is_recoverable(self): return self._is_recoverable def get_summary(self): return self._summary def get_corrupt_shares(self): # returns a list of (IServer, storage_index, sharenum) return [(FakeServer(), b"", 0)] @implementer(ICheckAndRepairResults) class FakeCheckAndRepairResults(object): # type: ignore # incomplete implementation def __init__(self, si=None, repair_attempted=False, repair_success=False): self._storage_index = si self._repair_attempted = repair_attempted self._repair_success = repair_success def get_storage_index(self): return self._storage_index def get_pre_repair_results(self): return FakeCheckResults() def get_post_repair_results(self): return FakeCheckResults() def get_repair_attempted(self): return self._repair_attempted def get_repair_successful(self): return self._repair_success class ElementResource(Resource, object): def __init__(self, element): Resource.__init__(self) self.element = element def render(self, request): return renderElement(request, self.element) class WebResultsRendering(unittest.TestCase): @staticmethod def remove_tags(html): return BeautifulSoup(html, 'html5lib').get_text(separator=" ") def create_fake_client(self): sb = StorageFarmBroker(True, None, EMPTY_CLIENT_CONFIG) # s.get_name() (the "short description") will be "v0-00000000". # s.get_longname() will include the -long suffix. servers = [(b"v0-00000000-long", b"\x00"*20, "peer-0"), (b"v0-ffffffff-long", b"\xff"*20, "peer-f"), (b"v0-11111111-long", b"\x11"*20, "peer-11")] for (key_s, binary_tubid, nickname) in servers: server_id = key_s tubid_b32 = base32.b2a(binary_tubid) furl = "pb://%s@nowhere/fake" % str(tubid_b32, "utf-8") ann = { "version": 0, "service-name": "storage", "anonymous-storage-FURL": furl, "permutation-seed-base32": "", "nickname": str(nickname), "app-versions": {}, # need #466 and v2 introducer "my-version": "ver", "oldest-supported": "oldest", } s = NativeStorageServer(server_id, ann, None, None, None) sb.test_add_server(server_id, s) c = FakeClient() c.storage_broker = sb return c def render_json(self, resource): return self.successResultOf(render(resource, {b"output": [b"json"]})) def render_element(self, element, args=None): if args is None: args = {} return self.successResultOf(render(ElementResource(element), args)) def test_literal(self): lcr = web_check_results.LiteralCheckResultsRendererElement() html = self.render_element(lcr) self.failUnlessIn(b"Literal files are always healthy", html) html = self.render_element(lcr, args={b"return_to": [b"FOOURL"]}) self.failUnlessIn(b"Literal files are always healthy", html) self.failUnlessIn(b'Return to file.', html) c = self.create_fake_client() lcr = web_check_results.LiteralCheckResultsRenderer(c) js = self.render_json(lcr) j = json.loads(js) self.failUnlessEqual(j["storage-index"], "") self.failUnlessEqual(j["results"]["healthy"], True) def test_check(self): c = self.create_fake_client() sb = c.storage_broker serverid_1 = b"\x00"*20 serverid_f = b"\xff"*20 server_1 = sb.get_stub_server(serverid_1) server_f = sb.get_stub_server(serverid_f) u = uri.CHKFileURI(b"\x00"*16, b"\x00"*32, 3, 10, 1234) data = { "count_happiness": 8, "count_shares_needed": 3, "count_shares_expected": 9, "count_shares_good": 10, "count_good_share_hosts": 11, "count_recoverable_versions": 1, "count_unrecoverable_versions": 0, "servers_responding": [], "sharemap": {"shareid1": [server_1, server_f]}, "count_wrong_shares": 0, "list_corrupt_shares": [], "count_corrupt_shares": 0, "list_incompatible_shares": [], "count_incompatible_shares": 0, "report": [], "share_problems": [], "servermap": None, } cr = check_results.CheckResults(u, u.get_storage_index(), healthy=True, recoverable=True, summary="groovy", **data) w = web_check_results.CheckResultsRendererElement(c, cr) html = self.render_element(w) s = self.remove_tags(html) self.failUnlessIn("File Check Results for SI=2k6avp", s) # abbreviated self.failUnlessIn("Healthy : groovy", s) self.failUnlessIn("Share Counts: need 3-of-9, have 10", s) self.failUnlessIn("Happiness Level: 8", s) self.failUnlessIn("Hosts with good shares: 11", s) self.failUnlessIn("Corrupt shares: none", s) self.failUnlessIn("Wrong Shares: 0", s) self.failUnlessIn("Recoverable Versions: 1", s) self.failUnlessIn("Unrecoverable Versions: 0", s) self.failUnlessIn("Good Shares (sorted in share order): Share ID Nickname Node ID shareid1 peer-0 00000000 peer-f ffffffff", s) cr = check_results.CheckResults(u, u.get_storage_index(), healthy=False, recoverable=True, summary="ungroovy", **data) w = web_check_results.CheckResultsRendererElement(c, cr) html = self.render_element(w) s = self.remove_tags(html) self.failUnlessIn("File Check Results for SI=2k6avp", s) # abbreviated self.failUnlessIn("Not Healthy! : ungroovy", s) data["count_corrupt_shares"] = 1 data["list_corrupt_shares"] = [(server_1, u.get_storage_index(), 2)] cr = check_results.CheckResults(u, u.get_storage_index(), healthy=False, recoverable=False, summary="rather dead", **data) w = web_check_results.CheckResultsRendererElement(c, cr) html = self.render_element(w) s = self.remove_tags(html) self.failUnlessIn("File Check Results for SI=2k6avp", s) # abbreviated self.failUnlessIn("Not Recoverable! : rather dead", s) self.failUnlessIn("Corrupt shares: Share ID Nickname Node ID sh#2 peer-0 00000000", s) html = self.render_element(w) s = self.remove_tags(html) self.failUnlessIn("File Check Results for SI=2k6avp", s) # abbreviated self.failUnlessIn("Not Recoverable! : rather dead", s) html = self.render_element(w, args={b"return_to": [b"FOOURL"]}) self.failUnlessIn(b'Return to file/directory.', html) w = web_check_results.CheckResultsRenderer(c, cr) d = self.render_json(w) def _check_json(jdata): j = json.loads(jdata) self.failUnlessEqual(j["summary"], "rather dead") self.failUnlessEqual(j["storage-index"], "2k6avpjga3dho3zsjo6nnkt7n4") expected = {'count-happiness': 8, 'count-shares-expected': 9, 'healthy': False, 'count-unrecoverable-versions': 0, 'count-shares-needed': 3, 'sharemap': {"shareid1": ["v0-00000000-long", "v0-ffffffff-long"]}, 'count-recoverable-versions': 1, 'list-corrupt-shares': [["v0-00000000-long", "2k6avpjga3dho3zsjo6nnkt7n4", 2]], 'count-good-share-hosts': 11, 'count-wrong-shares': 0, 'count-shares-good': 10, 'count-corrupt-shares': 1, 'servers-responding': [], 'recoverable': False, } self.failUnlessEqual(j["results"], expected) _check_json(d) w = web_check_results.CheckResultsRendererElement(c, cr) d = self.render_element(w) def _check(html): s = self.remove_tags(html) self.failUnlessIn("File Check Results for SI=2k6avp", s) self.failUnlessIn("Not Recoverable! : rather dead", s) _check(html) def test_check_and_repair(self): c = self.create_fake_client() sb = c.storage_broker serverid_1 = b"\x00"*20 serverid_f = b"\xff"*20 u = uri.CHKFileURI(b"\x00"*16, b"\x00"*32, 3, 10, 1234) data = { "count_happiness": 5, "count_shares_needed": 3, "count_shares_expected": 10, "count_shares_good": 6, "count_good_share_hosts": 7, "count_recoverable_versions": 1, "count_unrecoverable_versions": 0, "servers_responding": [], "sharemap": {"shareid1": [sb.get_stub_server(serverid_1), sb.get_stub_server(serverid_f)]}, "count_wrong_shares": 0, "list_corrupt_shares": [], "count_corrupt_shares": 0, "list_incompatible_shares": [], "count_incompatible_shares": 0, "report": [], "share_problems": [], "servermap": None, } pre_cr = check_results.CheckResults(u, u.get_storage_index(), healthy=False, recoverable=True, summary="illing", **data) data = { "count_happiness": 9, "count_shares_needed": 3, "count_shares_expected": 10, "count_shares_good": 10, "count_good_share_hosts": 11, "count_recoverable_versions": 1, "count_unrecoverable_versions": 0, "servers_responding": [], "sharemap": {"shareid1": [sb.get_stub_server(serverid_1), sb.get_stub_server(serverid_f)]}, "count_wrong_shares": 0, "count_corrupt_shares": 0, "list_corrupt_shares": [], "list_incompatible_shares": [], "count_incompatible_shares": 0, "report": [], "share_problems": [], "servermap": None, } post_cr = check_results.CheckResults(u, u.get_storage_index(), healthy=True, recoverable=True, summary="groovy", **data) crr = check_results.CheckAndRepairResults(u.get_storage_index()) crr.pre_repair_results = pre_cr crr.post_repair_results = post_cr crr.repair_attempted = False w = web_check_results.CheckAndRepairResultsRendererElement(c, crr) html = self.render_element(w) s = self.remove_tags(html) self.failUnlessIn("File Check-And-Repair Results for SI=2k6avp", s) self.failUnlessIn("Healthy : groovy", s) self.failUnlessIn("No repair necessary", s) self.failUnlessIn("Post-Repair Checker Results:", s) self.failUnlessIn("Share Counts: need 3-of-10, have 10", s) crr.repair_attempted = True crr.repair_successful = True html = self.render_element(w) s = self.remove_tags(html) self.failUnlessIn("File Check-And-Repair Results for SI=2k6avp", s) self.failUnlessIn("Healthy : groovy", s) self.failUnlessIn("Repair successful", s) self.failUnlessIn("Post-Repair Checker Results:", s) crr.repair_attempted = True crr.repair_successful = False post_cr = check_results.CheckResults(u, u.get_storage_index(), healthy=False, recoverable=True, summary="better", **data) crr.post_repair_results = post_cr html = self.render_element(w) s = self.remove_tags(html) self.failUnlessIn("File Check-And-Repair Results for SI=2k6avp", s) self.failUnlessIn("Not Healthy! : better", s) self.failUnlessIn("Repair unsuccessful", s) self.failUnlessIn("Post-Repair Checker Results:", s) crr.repair_attempted = True crr.repair_successful = False post_cr = check_results.CheckResults(u, u.get_storage_index(), healthy=False, recoverable=False, summary="worse", **data) crr.post_repair_results = post_cr html = self.render_element(w) s = self.remove_tags(html) self.failUnlessIn("File Check-And-Repair Results for SI=2k6avp", s) self.failUnlessIn("Not Recoverable! : worse", s) self.failUnlessIn("Repair unsuccessful", s) self.failUnlessIn("Post-Repair Checker Results:", s) w = web_check_results.CheckAndRepairResultsRenderer(c, crr) j = json.loads(self.render_json(w)) self.failUnlessEqual(j["repair-attempted"], True) self.failUnlessEqual(j["storage-index"], "2k6avpjga3dho3zsjo6nnkt7n4") self.failUnlessEqual(j["pre-repair-results"]["summary"], "illing") self.failUnlessEqual(j["post-repair-results"]["summary"], "worse") w = web_check_results.CheckAndRepairResultsRenderer(c, None) j = json.loads(self.render_json(w)) self.failUnlessEqual(j["repair-attempted"], False) self.failUnlessEqual(j["storage-index"], "") def test_deep_check_renderer(self): status = check_results.DeepCheckResults(b"fake-root-si") status.add_check( FakeCheckResults(b"", False, False), (u"fake", u"unhealthy", u"unrecoverable") ) status.add_check( FakeCheckResults(b"", True, True), (u"fake", u"healthy", u"recoverable") ) status.add_check( FakeCheckResults(b"", True, False), (u"fake", u"healthy", u"unrecoverable") ) status.add_check( FakeCheckResults(b"", False, True), (u"fake", u"unhealthy", u"recoverable") ) monitor = Monitor() monitor.set_status(status) elem = web_check_results.DeepCheckResultsRendererElement(monitor) doc = self.render_element(elem) soup = BeautifulSoup(doc, 'html5lib') assert_soup_has_favicon(self, soup) assert_soup_has_tag_with_content( self, soup, u"title", u"Tahoe-LAFS - Deep Check Results" ) assert_soup_has_tag_with_content( self, soup, u"h1", "Deep-Check Results for root SI=" ) assert_soup_has_tag_with_content( self, soup, u"li", u"Objects Checked: 4" ) assert_soup_has_tag_with_content( self, soup, u"li", u"Objects Healthy: 2" ) assert_soup_has_tag_with_content( self, soup, u"li", u"Objects Unhealthy: 2" ) assert_soup_has_tag_with_content( self, soup, u"li", u"Objects Unrecoverable: 2" ) assert_soup_has_tag_with_content( self, soup, u"li", u"Corrupt Shares: 4" ) assert_soup_has_tag_with_content( self, soup, u"h2", u"Files/Directories That Had Problems:" ) assert_soup_has_tag_with_content( self, soup, u"li", u"fake/unhealthy/recoverable: fake summary" ) assert_soup_has_tag_with_content( self, soup, u"li", u"fake/unhealthy/unrecoverable: fake summary" ) assert_soup_has_tag_with_content( self, soup, u"h2", u"Servers on which corrupt shares were found" ) assert_soup_has_tag_with_content( self, soup, u"h2", u"Corrupt Shares" ) assert_soup_has_tag_with_content( self, soup, u"h2", u"All Results" ) def test_deep_check_and_repair_renderer(self): status = check_results.DeepCheckAndRepairResults(b"") status.add_check_and_repair( FakeCheckAndRepairResults(b"attempted/success", True, True), (u"attempted", u"success") ) status.add_check_and_repair( FakeCheckAndRepairResults(b"attempted/failure", True, False), (u"attempted", u"failure") ) status.add_check_and_repair( FakeCheckAndRepairResults(b"unattempted/failure", False, False), (u"unattempted", u"failure") ) monitor = Monitor() monitor.set_status(status) elem = web_check_results.DeepCheckAndRepairResultsRendererElement(monitor) doc = self.render_element(elem) soup = BeautifulSoup(doc, 'html5lib') assert_soup_has_favicon(self, soup) assert_soup_has_tag_with_content( self, soup, u"title", u"Tahoe-LAFS - Deep Check Results" ) assert_soup_has_tag_with_content( self, soup, u"h1", u"Deep-Check-And-Repair Results for root SI=" ) assert_soup_has_tag_with_content( self, soup, u"li", u"Objects Checked: 3" ) assert_soup_has_tag_with_content( self, soup, u"li", u"Objects Healthy (before repair): 0" ) assert_soup_has_tag_with_content( self, soup, u"li", u"Objects Unhealthy (before repair): 3" ) assert_soup_has_tag_with_content( self, soup, u"li", u"Corrupt Shares (before repair): 3" ) assert_soup_has_tag_with_content( self, soup, u"li", u"Repairs Attempted: 2" ) assert_soup_has_tag_with_content( self, soup, u"li", u"Repairs Successful: 1" ) assert_soup_has_tag_with_content( self, soup, u"li", "Repairs Unsuccessful: 1" ) assert_soup_has_tag_with_content( self, soup, u"li", u"Objects Healthy (after repair): 0" ) assert_soup_has_tag_with_content( self, soup, u"li", u"Objects Unhealthy (after repair): 3" ) assert_soup_has_tag_with_content( self, soup, u"li", u"Corrupt Shares (after repair): 3" ) assert_soup_has_tag_with_content( self, soup, u"h2", u"Files/Directories That Had Problems:" ) assert_soup_has_tag_with_content( self, soup, u"h2", u"Files/Directories That Still Have Problems:" ) assert_soup_has_tag_with_content( self, soup, u"h2", u"Servers on which corrupt shares were found" ) assert_soup_has_tag_with_content( self, soup, u"h2", u"Remaining Corrupt Shares" ) class BalancingAct(GridTestMixin, unittest.TestCase): # test for #1115 regarding the 'count-good-share-hosts' metric def add_server(self, server_number, readonly=False): assert self.g, "I tried to find a grid at self.g, but failed" ss = self.g.make_server(server_number, readonly) #log.msg("just created a server, number: %s => %s" % (server_number, ss,)) self.g.add_server(server_number, ss) def add_server_with_share(self, server_number, uri, share_number=None, readonly=False): self.add_server(server_number, readonly) if share_number is not None: self.copy_share_to_server(uri, share_number, server_number) def copy_share_to_server(self, uri, share_number, server_number): ss = self.g.servers_by_number[server_number] # Copy share i from the directory associated with the first # storage server to the directory associated with this one. assert self.g, "I tried to find a grid at self.g, but failed" assert self.shares, "I tried to find shares at self.shares, but failed" old_share_location = self.shares[share_number][2] new_share_location = os.path.join(ss.storedir, "shares") si = tahoe_uri.from_string(self.uri).get_storage_index() new_share_location = os.path.join(new_share_location, storage_index_to_dir(si)) if not os.path.exists(new_share_location): os.makedirs(new_share_location) new_share_location = os.path.join(new_share_location, str(share_number)) if old_share_location != new_share_location: shutil.copy(old_share_location, new_share_location) shares = self.find_uri_shares(uri) # Make sure that the storage server has the share. self.failUnless((share_number, ss.my_nodeid, new_share_location) in shares) def _pretty_shares_chart(self, uri): # Servers are labeled A-Z, shares are labeled 0-9 letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' assert len(self.g.servers_by_number) < len(letters), \ "This little printing function is only meant for < 26 servers" shares_chart = {} names = dict(zip([ss.my_nodeid for _,ss in self.g.servers_by_number.items()], letters)) for shnum, serverid, _ in self.find_uri_shares(uri): shares_chart.setdefault(shnum, []).append(names[serverid]) return shares_chart def test_good_share_hosts(self): self.basedir = "checker/BalancingAct/1115" self.set_up_grid(num_servers=1) c0 = self.g.clients[0] c0.encoding_params['happy'] = 1 c0.encoding_params['n'] = 4 c0.encoding_params['k'] = 3 DATA = b"data" * 100 d = c0.upload(Data(DATA, convergence=b"")) def _stash_immutable(ur): self.imm = c0.create_node_from_uri(ur.get_uri()) self.uri = self.imm.get_uri() d.addCallback(_stash_immutable) d.addCallback(lambda ign: self.find_uri_shares(self.uri)) def _store_shares(shares): self.shares = shares d.addCallback(_store_shares) def add_three(_, i): # Add a new server with just share 3 self.add_server_with_share(i, self.uri, 3) #print(self._pretty_shares_chart(self.uri)) for i in range(1,5): d.addCallback(add_three, i) def _check_and_repair(_): return self.imm.check_and_repair(Monitor()) def _check_counts(crr, shares_good, good_share_hosts): prr = crr.get_post_repair_results() self.failUnlessEqual(prr.get_share_counter_good(), shares_good) self.failUnlessEqual(prr.get_host_counter_good_shares(), good_share_hosts) """ Initial sharemap: 0:[A] 1:[A] 2:[A] 3:[A,B,C,D,E] 4 good shares, but 5 good hosts After deleting all instances of share #3 and repairing: 0:[A], 1:[A,B], 2:[C,A], 3:[E] # actually: {0: ['E', 'A'], 1: ['C', 'A'], 2: ['A', 'B'], 3: ['D']} Still 4 good shares but now 4 good hosts """ d.addCallback(_check_and_repair) d.addCallback(_check_counts, 4, 5) d.addCallback(lambda _: self.delete_shares_numbered(self.uri, [3])) d.addCallback(_check_and_repair) # it can happen that our uploader will choose, e.g., to upload # to servers B, C, D, E .. which will mean that all 5 serves # now contain our shares (and thus "respond"). def _check_happy(crr): prr = crr.get_post_repair_results() self.assertTrue(prr.get_host_counter_good_shares() >= 4) return crr d.addCallback(_check_happy) d.addCallback(lambda _: all([self.g.break_server(sid) for sid in self.g.get_all_serverids()])) d.addCallback(_check_and_repair) d.addCallback(_check_counts, 0, 0) return d class AddLease(GridTestMixin, unittest.TestCase): # test for #875, in which failures in the add-lease call cause # false-negatives in the checker def test_875(self): self.basedir = "checker/AddLease/875" self.set_up_grid(num_servers=1) c0 = self.g.clients[0] c0.encoding_params['happy'] = 1 self.uris = {} DATA = b"data" * 100 d = c0.upload(Data(DATA, convergence=b"")) def _stash_immutable(ur): self.imm = c0.create_node_from_uri(ur.get_uri()) d.addCallback(_stash_immutable) d.addCallback(lambda ign: c0.create_mutable_file(MutableData(b"contents"))) def _stash_mutable(node): self.mut = node d.addCallback(_stash_mutable) def _check_cr(cr, which): self.failUnless(cr.is_healthy(), which) # these two should work normally d.addCallback(lambda ign: self.imm.check(Monitor(), add_lease=True)) d.addCallback(_check_cr, "immutable-normal") d.addCallback(lambda ign: self.mut.check(Monitor(), add_lease=True)) d.addCallback(_check_cr, "mutable-normal") really_did_break = [] # now break the server's add_lease call def _break_add_lease(ign): def broken_add_lease(*args, **kwargs): really_did_break.append(1) raise KeyError("intentional failure, should be ignored") assert self.g.servers_by_number[0].add_lease self.g.servers_by_number[0].add_lease = broken_add_lease d.addCallback(_break_add_lease) # and confirm that the files still look healthy d.addCallback(lambda ign: self.mut.check(Monitor(), add_lease=True)) d.addCallback(_check_cr, "mutable-broken") d.addCallback(lambda ign: self.imm.check(Monitor(), add_lease=True)) d.addCallback(_check_cr, "immutable-broken") d.addCallback(lambda ign: self.failUnless(really_did_break)) return d class CounterHolder(object): def __init__(self): self._num_active_block_fetches = 0 self._max_active_block_fetches = 0 from allmydata.immutable.checker import ValidatedReadBucketProxy class MockVRBP(ValidatedReadBucketProxy): def __init__(self, sharenum, bucket, share_hash_tree, num_blocks, block_size, share_size, counterholder): ValidatedReadBucketProxy.__init__(self, sharenum, bucket, share_hash_tree, num_blocks, block_size, share_size) self.counterholder = counterholder def get_block(self, blocknum): self.counterholder._num_active_block_fetches += 1 if self.counterholder._num_active_block_fetches > self.counterholder._max_active_block_fetches: self.counterholder._max_active_block_fetches = self.counterholder._num_active_block_fetches d = ValidatedReadBucketProxy.get_block(self, blocknum) def _mark_no_longer_active(res): self.counterholder._num_active_block_fetches -= 1 return res d.addBoth(_mark_no_longer_active) return d class TooParallel(GridTestMixin, unittest.TestCase): # bug #1395: immutable verifier was aggressively parallized, checking all # blocks of all shares at the same time, blowing our memory budget and # crashing with MemoryErrors on >1GB files. def test_immutable(self): import allmydata.immutable.checker origVRBP = allmydata.immutable.checker.ValidatedReadBucketProxy self.basedir = "checker/TooParallel/immutable" # If any code asks to instantiate a ValidatedReadBucketProxy, # we give them a MockVRBP which is configured to use our # CounterHolder. counterholder = CounterHolder() def make_mock_VRBP(*args, **kwargs): return MockVRBP(counterholder=counterholder, *args, **kwargs) allmydata.immutable.checker.ValidatedReadBucketProxy = make_mock_VRBP d = defer.succeed(None) def _start(ign): self.set_up_grid(num_servers=4) self.c0 = self.g.clients[0] self.c0.encoding_params = { "k": 1, "happy": 4, "n": 4, "max_segment_size": 5, } self.uris = {} DATA = b"data" * 100 # 400/5 = 80 blocks return self.c0.upload(Data(DATA, convergence=b"")) d.addCallback(_start) def _do_check(ur): n = self.c0.create_node_from_uri(ur.get_uri()) return n.check(Monitor(), verify=True) d.addCallback(_do_check) def _check(cr): # the verifier works on all 4 shares in parallel, but only # fetches one block from each share at a time, so we expect to # see 4 parallel fetches self.failUnlessEqual(counterholder._max_active_block_fetches, 4) d.addCallback(_check) def _clean_up(res): allmydata.immutable.checker.ValidatedReadBucketProxy = origVRBP return res d.addBoth(_clean_up) return d tahoe_lafs-1.20.0/src/allmydata/test/test_client.py0000644000000000000000000015433413615410400017236 0ustar00from __future__ import annotations import os from unittest import skipIf from functools import ( partial, ) import twisted from yaml import ( safe_dump, ) from fixtures import ( Fixture, TempDir, ) from hypothesis import ( given, ) from hypothesis.strategies import ( sampled_from, booleans, ) from eliot.testing import ( assertHasAction, ) from twisted.trial import unittest from twisted.application import service from twisted.internet import defer from twisted.python.filepath import ( FilePath, ) from twisted.python.runtime import platform from testtools.matchers import ( Equals, AfterPreprocessing, MatchesListwise, MatchesDict, ContainsDict, Always, Is, raises, ) from testtools.twistedsupport import ( succeeded, failed, ) import allmydata import allmydata.util.log from allmydata.nodemaker import ( NodeMaker, ) from allmydata.node import OldConfigError, UnescapedHashError, create_node_dir from allmydata import client from allmydata.storage_client import ( StorageClientConfig, StorageFarmBroker, ) from allmydata.util import ( base32, fileutil, encodingutil, configutil, jsonbytes as json, ) from allmydata.util.eliotutil import capture_logging from allmydata.util.fileutil import abspath_expanduser_unicode from allmydata.interfaces import IFilesystemNode, IFileNode, \ IImmutableFileNode, IMutableFileNode, IDirectoryNode from allmydata.scripts.common import ( write_introducer, ) from foolscap.api import flushEventualQueue import allmydata.test.common_util as testutil from .common import ( superuser, EMPTY_CLIENT_CONFIG, SyncTestCase, AsyncBrokenTestCase, UseTestPlugins, MemoryIntroducerClient, get_published_announcements, UseNode, ) from .matchers import ( MatchesSameElements, matches_storage_announcement, matches_furl, ) from .strategies import ( write_capabilities, ) SOME_FURL = "pb://abcde@nowhere/fake" BASECONFIG = "[client]\n" class Basic(testutil.ReallyEqualMixin, unittest.TestCase): def test_loadable(self): basedir = "test_client.Basic.test_loadable" os.mkdir(basedir) fileutil.write(os.path.join(basedir, "tahoe.cfg"), \ BASECONFIG) return client.create_client(basedir) @defer.inlineCallbacks def test_unreadable_introducers(self): """ The Deferred from create_client fails when private/introducers.yaml is unreadable (but exists) """ basedir = "test_client.Basic.test_unreadable_introduers" os.mkdir(basedir, 0o700) os.mkdir(os.path.join(basedir, 'private'), 0o700) intro_fname = os.path.join(basedir, 'private', 'introducers.yaml') with open(intro_fname, 'w') as f: f.write("---\n") os.chmod(intro_fname, 0o000) self.addCleanup(lambda: os.chmod(intro_fname, 0o700)) with self.assertRaises(EnvironmentError): yield client.create_client(basedir) @defer.inlineCallbacks def test_comment(self): """ A comment character (#) in a furl results in an UnescapedHashError Failure. """ should_fail = [r"test#test", r"#testtest", r"test\\#test", r"test\#test", r"test\\\#test"] basedir = "test_client.Basic.test_comment" os.mkdir(basedir) def write_config(s): config = ("[client]\n" "helper.furl = %s\n" % s) fileutil.write(os.path.join(basedir, "tahoe.cfg"), config) for s in should_fail: write_config(s) with self.assertRaises(UnescapedHashError) as ctx: yield client.create_client(basedir) self.assertIn("[client]helper.furl", str(ctx.exception)) # if somebody knows a clever way to do this (cause # EnvironmentError when reading a file that really exists), on # windows, please fix this @skipIf(platform.isWindows(), "We don't know how to set permissions on Windows.") @skipIf(superuser, "cannot test as superuser with all permissions") def test_unreadable_config(self): basedir = "test_client.Basic.test_unreadable_config" os.mkdir(basedir) fn = os.path.join(basedir, "tahoe.cfg") fileutil.write(fn, BASECONFIG) old_mode = os.stat(fn).st_mode os.chmod(fn, 0) try: e = self.assertRaises( EnvironmentError, client.read_config, basedir, "client.port", ) self.assertIn("Permission denied", str(e)) finally: # don't leave undeleteable junk lying around os.chmod(fn, old_mode) def test_error_on_old_config_files(self): basedir = "test_client.Basic.test_error_on_old_config_files" os.mkdir(basedir) fileutil.write(os.path.join(basedir, "tahoe.cfg"), BASECONFIG + "[storage]\n" + "enabled = false\n" + "reserved_space = bogus\n") fileutil.write(os.path.join(basedir, "introducer.furl"), "") fileutil.write(os.path.join(basedir, "no_storage"), "") fileutil.write(os.path.join(basedir, "readonly_storage"), "") fileutil.write(os.path.join(basedir, "debug_discard_storage"), "") logged_messages = [] self.patch(twisted.python.log, 'msg', logged_messages.append) e = self.failUnlessRaises( OldConfigError, client.read_config, basedir, "client.port", ) abs_basedir = fileutil.abspath_expanduser_unicode(str(basedir)) self.failUnlessIn(os.path.join(abs_basedir, "introducer.furl"), e.args[0]) self.failUnlessIn(os.path.join(abs_basedir, "no_storage"), e.args[0]) self.failUnlessIn(os.path.join(abs_basedir, "readonly_storage"), e.args[0]) self.failUnlessIn(os.path.join(abs_basedir, "debug_discard_storage"), e.args[0]) for oldfile in ['introducer.furl', 'no_storage', 'readonly_storage', 'debug_discard_storage']: logged = [ m for m in logged_messages if ("Found pre-Tahoe-LAFS-v1.3 configuration file" in str(m) and oldfile in str(m)) ] self.failUnless(logged, (oldfile, logged_messages)) for oldfile in [ 'nickname', 'webport', 'keepalive_timeout', 'log_gatherer.furl', 'disconnect_timeout', 'advertised_ip_addresses', 'helper.furl', 'key_generator.furl', 'stats_gatherer.furl', 'sizelimit', 'run_helper']: logged = [ m for m in logged_messages if ("Found pre-Tahoe-LAFS-v1.3 configuration file" in str(m) and oldfile in str(m)) ] self.failIf(logged, (oldfile, logged_messages)) @defer.inlineCallbacks def test_secrets(self): """ A new client has renewal + cancel secrets """ basedir = "test_client.Basic.test_secrets" os.mkdir(basedir) fileutil.write(os.path.join(basedir, "tahoe.cfg"), \ BASECONFIG) c = yield client.create_client(basedir) secret_fname = os.path.join(basedir, "private", "secret") self.failUnless(os.path.exists(secret_fname), secret_fname) renew_secret = c.get_renewal_secret() self.failUnless(base32.b2a(renew_secret)) cancel_secret = c.get_cancel_secret() self.failUnless(base32.b2a(cancel_secret)) @defer.inlineCallbacks def test_nodekey_yes_storage(self): """ We have a nodeid if we're providing storage """ basedir = "test_client.Basic.test_nodekey_yes_storage" os.mkdir(basedir) fileutil.write(os.path.join(basedir, "tahoe.cfg"), BASECONFIG) c = yield client.create_client(basedir) self.failUnless(c.get_long_nodeid().startswith(b"v0-")) @defer.inlineCallbacks def test_nodekey_no_storage(self): """ We have a nodeid if we're not providing storage """ basedir = "test_client.Basic.test_nodekey_no_storage" os.mkdir(basedir) fileutil.write(os.path.join(basedir, "tahoe.cfg"), BASECONFIG + "[storage]\n" + "enabled = false\n") c = yield client.create_client(basedir) self.failUnless(c.get_long_nodeid().startswith(b"v0-")) def test_storage_anonymous_enabled_by_default(self): """ Anonymous storage access is enabled if storage is enabled and *anonymous* is not set. """ config = client.config_from_string( "test_storage_default_anonymous_enabled", "tub.port", BASECONFIG + ( "[storage]\n" "enabled = true\n" ) ) self.assertTrue(client.anonymous_storage_enabled(config)) def test_storage_anonymous_enabled_explicitly(self): """ Anonymous storage access is enabled if storage is enabled and *anonymous* is set to true. """ config = client.config_from_string( self.id(), "tub.port", BASECONFIG + ( "[storage]\n" "enabled = true\n" "anonymous = true\n" ) ) self.assertTrue(client.anonymous_storage_enabled(config)) def test_storage_anonymous_disabled_explicitly(self): """ Anonymous storage access is disabled if storage is enabled and *anonymous* is set to false. """ config = client.config_from_string( self.id(), "tub.port", BASECONFIG + ( "[storage]\n" "enabled = true\n" "anonymous = false\n" ) ) self.assertFalse(client.anonymous_storage_enabled(config)) def test_storage_anonymous_disabled_by_storage(self): """ Anonymous storage access is disabled if storage is disabled and *anonymous* is set to true. """ config = client.config_from_string( self.id(), "tub.port", BASECONFIG + ( "[storage]\n" "enabled = false\n" "anonymous = true\n" ) ) self.assertFalse(client.anonymous_storage_enabled(config)) @defer.inlineCallbacks def test_reserved_1(self): """ reserved_space option is propagated """ basedir = "client.Basic.test_reserved_1" os.mkdir(basedir) fileutil.write(os.path.join(basedir, "tahoe.cfg"), \ BASECONFIG + \ "[storage]\n" + \ "enabled = true\n" + \ "reserved_space = 1000\n") c = yield client.create_client(basedir) self.failUnlessEqual(c.getServiceNamed("storage").reserved_space, 1000) @defer.inlineCallbacks def test_reserved_2(self): """ reserved_space option understands 'K' to mean kilobytes """ basedir = "client.Basic.test_reserved_2" os.mkdir(basedir) fileutil.write(os.path.join(basedir, "tahoe.cfg"), \ BASECONFIG + \ "[storage]\n" + \ "enabled = true\n" + \ "reserved_space = 10K\n") c = yield client.create_client(basedir) self.failUnlessEqual(c.getServiceNamed("storage").reserved_space, 10*1000) @defer.inlineCallbacks def test_reserved_3(self): """ reserved_space option understands 'mB' to mean megabytes """ basedir = "client.Basic.test_reserved_3" os.mkdir(basedir) fileutil.write(os.path.join(basedir, "tahoe.cfg"), \ BASECONFIG + \ "[storage]\n" + \ "enabled = true\n" + \ "reserved_space = 5mB\n") c = yield client.create_client(basedir) self.failUnlessEqual(c.getServiceNamed("storage").reserved_space, 5*1000*1000) @defer.inlineCallbacks def test_reserved_4(self): """ reserved_space option understands 'Gb' to mean gigabytes """ basedir = "client.Basic.test_reserved_4" os.mkdir(basedir) fileutil.write(os.path.join(basedir, "tahoe.cfg"), \ BASECONFIG + \ "[storage]\n" + \ "enabled = true\n" + \ "reserved_space = 78Gb\n") c = yield client.create_client(basedir) self.failUnlessEqual(c.getServiceNamed("storage").reserved_space, 78*1000*1000*1000) @defer.inlineCallbacks def test_reserved_bad(self): """ reserved_space option produces errors on non-numbers """ basedir = "client.Basic.test_reserved_bad" os.mkdir(basedir) fileutil.write(os.path.join(basedir, "tahoe.cfg"), \ BASECONFIG + \ "[storage]\n" + \ "enabled = true\n" + \ "reserved_space = bogus\n") with self.assertRaises(ValueError): yield client.create_client(basedir) @defer.inlineCallbacks def test_web_apiauthtoken(self): """ Client loads the proper API auth token from disk """ basedir = u"client.Basic.test_web_apiauthtoken" create_node_dir(basedir, "testing") c = yield client.create_client(basedir) # this must come after we create the client, as it will create # a new, random authtoken itself with open(os.path.join(basedir, "private", "api_auth_token"), "w") as f: f.write("deadbeef") token = c.get_auth_token() self.assertEqual(b"deadbeef", token) @defer.inlineCallbacks def test_web_staticdir(self): """ a relative web.static dir is expanded properly """ basedir = u"client.Basic.test_web_staticdir" os.mkdir(basedir) fileutil.write(os.path.join(basedir, "tahoe.cfg"), BASECONFIG + "[node]\n" + "web.port = tcp:0:interface=127.0.0.1\n" + "web.static = relative\n") c = yield client.create_client(basedir) w = c.getServiceNamed("webish") abs_basedir = fileutil.abspath_expanduser_unicode(basedir) expected = fileutil.abspath_expanduser_unicode(u"relative", abs_basedir) self.failUnlessReallyEqual(w.staticdir, expected) # TODO: also test config options for SFTP. See Git history for deleted FTP # tests that could be used as basis for these tests. @defer.inlineCallbacks def _storage_dir_test(self, basedir, storage_path, expected_path): """ generic helper for following storage_dir tests """ assert isinstance(basedir, str) assert isinstance(storage_path, (str, type(None))) assert isinstance(expected_path, str) os.mkdir(basedir) cfg_path = os.path.join(basedir, "tahoe.cfg") fileutil.write( cfg_path, BASECONFIG + "[storage]\n" "enabled = true\n", ) if storage_path is not None: fileutil.write( cfg_path, "storage_dir = %s\n" % (storage_path,), mode="ab", ) c = yield client.create_client(basedir) self.assertEqual( c.getServiceNamed("storage").storedir, expected_path, ) def test_default_storage_dir(self): """ If no value is given for ``storage_dir`` in the ``storage`` section of ``tahoe.cfg`` then the ``storage`` directory beneath the node directory is used. """ basedir = u"client.Basic.test_default_storage_dir" config_path = None expected_path = os.path.join( abspath_expanduser_unicode(basedir), u"storage", ) return self._storage_dir_test( basedir, config_path, expected_path, ) def test_relative_storage_dir(self): """ A storage node can be directed to use a particular directory for share file storage by setting ``storage_dir`` in the ``storage`` section of ``tahoe.cfg``. If the path is relative, it is interpreted relative to the node's basedir. """ basedir = u"client.Basic.test_relative_storage_dir" config_path = u"myowndir" expected_path = os.path.join( abspath_expanduser_unicode(basedir), u"myowndir", ) return self._storage_dir_test( basedir, config_path, expected_path, ) def test_absolute_storage_dir(self): """ If the ``storage_dir`` item in the ``storage`` section of the configuration gives an absolute path then exactly that path is used. """ basedir = u"client.Basic.test_absolute_storage_dir" # create_client is going to try to make the storage directory so we # don't want a literal absolute path like /myowndir which we won't # have write permission to. So construct an absolute path that we # should be able to write to. base = u"\N{SNOWMAN}" if encodingutil.filesystem_encoding != "utf-8": base = u"melted_snowman" expected_path = abspath_expanduser_unicode( u"client.Basic.test_absolute_storage_dir_myowndir/" + base ) config_path = expected_path return self._storage_dir_test( basedir, config_path, expected_path, ) def _permute(self, sb, key): return [ s.get_longname() for s in sb.get_servers_for_psi(key) ] def test_permute(self): """ Permutations need to be stable across Tahoe releases, which is why we hardcode a specific expected order. This is because the order of these results determines which servers a client will choose to place shares on and which servers it will consult (and in what order) when trying to retrieve those shares. If the order ever changes, all already-placed shares become (at best) harder to find or (at worst) impossible to find. """ sb = StorageFarmBroker(True, None, EMPTY_CLIENT_CONFIG) ks = [b"%d" % i for i in range(5)] for k in ks: ann = {"anonymous-storage-FURL": SOME_FURL, "permutation-seed-base32": base32.b2a(k) } sb.test_add_rref(k, "rref", ann) one = self._permute(sb, b"one") two = self._permute(sb, b"two") self.failUnlessReallyEqual(one, [b'3',b'1',b'0',b'4',b'2']) self.failUnlessReallyEqual(two, [b'0',b'4',b'2',b'1',b'3']) self.assertEqual(sorted(one), ks) self.assertEqual(sorted(two), ks) self.assertNotEqual(one, two) sb.servers.clear() self.failUnlessReallyEqual(self._permute(sb, b"one"), []) def test_permute_with_preferred(self): """ Permutations need to be stable across Tahoe releases, which is why we hardcode a specific expected order. In this case, two values are preferred and should come first. """ sb = StorageFarmBroker( True, None, EMPTY_CLIENT_CONFIG, StorageClientConfig(preferred_peers=[b'1',b'4']), ) ks = [b"%d" % i for i in range(5)] for k in [b"%d" % i for i in range(5)]: ann = {"anonymous-storage-FURL": SOME_FURL, "permutation-seed-base32": base32.b2a(k) } sb.test_add_rref(k, "rref", ann) one = self._permute(sb, b"one") two = self._permute(sb, b"two") self.failUnlessReallyEqual(b"".join(one), b'14302') self.failUnlessReallyEqual(b"".join(two), b'41023') self.assertEqual(sorted(one), ks) self.assertEqual(sorted(one[:2]), [b"1", b"4"]) self.assertEqual(sorted(two), ks) self.assertEqual(sorted(two[:2]), [b"1", b"4"]) self.assertNotEqual(one, two) sb.servers.clear() self.failUnlessReallyEqual(self._permute(sb, b"one"), []) @defer.inlineCallbacks def test_versions(self): """ A client knows the versions of software it has """ basedir = "test_client.Basic.test_versions" os.mkdir(basedir) fileutil.write(os.path.join(basedir, "tahoe.cfg"), \ BASECONFIG + \ "[storage]\n" + \ "enabled = true\n") c = yield client.create_client(basedir) ss = c.getServiceNamed("storage") verdict = ss.get_version() self.failUnlessReallyEqual(verdict[b"application-version"], allmydata.__full_version__.encode("ascii")) self.failIfEqual(str(allmydata.__version__), "unknown") self.failUnless("." in str(allmydata.__full_version__), "non-numeric version in '%s'" % allmydata.__version__) # also test stats stats = c.get_stats() self.failUnless("node.uptime" in stats) self.failUnless(isinstance(stats["node.uptime"], float)) @defer.inlineCallbacks def test_helper_furl(self): """ various helper.furl arguments are parsed correctly """ basedir = "test_client.Basic.test_helper_furl" os.mkdir(basedir) @defer.inlineCallbacks def _check(config, expected_furl): fileutil.write(os.path.join(basedir, "tahoe.cfg"), BASECONFIG + config) c = yield client.create_client(basedir) uploader = c.getServiceNamed("uploader") furl, connected = uploader.get_helper_info() self.failUnlessEqual(furl, expected_furl) yield _check("", None) yield _check("helper.furl =\n", None) yield _check("helper.furl = \n", None) yield _check("helper.furl = None", None) yield _check("helper.furl = pb://blah\n", "pb://blah") def flush_but_dont_ignore(res): d = flushEventualQueue() def _done(ignored): return res d.addCallback(_done) return d class AnonymousStorage(SyncTestCase): """ Tests for behaviors of the client object with respect to the anonymous storage service. """ @defer.inlineCallbacks def test_anonymous_storage_enabled(self): """ If anonymous storage access is enabled then the client announces it. """ basedir = FilePath(self.id()) basedir.child("private").makedirs() write_introducer(basedir, "someintroducer", SOME_FURL) config = client.config_from_string( basedir.path, "tub.port", BASECONFIG + ( "[storage]\n" "enabled = true\n" "anonymous = true\n" ) ) node = yield client.create_client_from_config( config, _introducer_factory=MemoryIntroducerClient, ) self.assertThat( get_published_announcements(node), MatchesListwise([ matches_storage_announcement( basedir.path, anonymous=True, ), ]), ) @defer.inlineCallbacks def test_anonymous_storage_disabled(self): """ If anonymous storage access is disabled then the client does not announce it nor does it write a fURL for it to beneath the node directory. """ basedir = FilePath(self.id()) basedir.child("private").makedirs() write_introducer(basedir, "someintroducer", SOME_FURL) config = client.config_from_string( basedir.path, "tub.port", BASECONFIG + ( "[storage]\n" "enabled = true\n" "anonymous = false\n" ) ) node = yield client.create_client_from_config( config, _introducer_factory=MemoryIntroducerClient, ) self.expectThat( get_published_announcements(node), MatchesListwise([ matches_storage_announcement( basedir.path, anonymous=False, ), ]), ) self.expectThat( config.get_private_config("storage.furl", default=None), Is(None), ) @defer.inlineCallbacks def test_anonymous_storage_enabled_then_disabled(self): """ If a node is run with anonymous storage enabled and then later anonymous storage is disabled in the configuration for that node, it is not possible to reach the anonymous storage server via the originally published fURL. """ basedir = FilePath(self.id()) basedir.child("private").makedirs() enabled_config = client.config_from_string( basedir.path, "tub.port", BASECONFIG + ( "[storage]\n" "enabled = true\n" "anonymous = true\n" ) ) node = yield client.create_client_from_config( enabled_config, _introducer_factory=MemoryIntroducerClient, ) anonymous_storage_furl = enabled_config.get_private_config("storage.furl") def check_furl(): return node.tub.getReferenceForURL(anonymous_storage_furl) # Perform a sanity check that our test code makes sense: is this a # legit way to verify whether a fURL will refer to an object? self.assertThat( check_furl(), # If it doesn't raise a KeyError we're in business. Always(), ) disabled_config = client.config_from_string( basedir.path, "tub.port", BASECONFIG + ( "[storage]\n" "enabled = true\n" "anonymous = false\n" ) ) node = yield client.create_client_from_config( disabled_config, _introducer_factory=MemoryIntroducerClient, ) self.assertThat( check_furl, raises(KeyError), ) class IntroducerClients(unittest.TestCase): def test_invalid_introducer_furl(self): """ An introducer.furl of 'None' in the deprecated [client]introducer.furl field is invalid and causes `create_introducer_clients` to fail. """ cfg = ( "[client]\n" "introducer.furl = None\n" ) config = client.config_from_string("basedir", "client.port", cfg) with self.assertRaises(ValueError) as ctx: client.create_introducer_clients(config, main_tub=None) self.assertIn( "invalid 'introducer.furl = None'", str(ctx.exception) ) def get_known_server_details(a_client): """ Get some details about known storage servers from a client. :param _Client a_client: The client to inspect. :return: A ``list`` of two-tuples. Each element of the list corresponds to a "known server". The first element of each tuple is a server id. The second is the server's announcement. """ return list( (s.get_serverid(), s.get_announcement()) for s in a_client.storage_broker.get_known_servers() ) class StaticServers(Fixture): """ Create a ``servers.yaml`` file. """ def __init__(self, basedir, server_details): super(StaticServers, self).__init__() self._basedir = basedir self._server_details = server_details def _setUp(self): private = self._basedir.child(u"private") private.makedirs() servers = private.child(u"servers.yaml") servers.setContent(safe_dump({ u"storage": { serverid: { u"ann": announcement, } for (serverid, announcement) in self._server_details }, }).encode("utf-8")) class StorageClients(SyncTestCase): """ Tests for storage-related behavior of ``_Client``. """ def setUp(self): super(StorageClients, self).setUp() # Some other tests create Nodes and Node mutates tempfile.tempdir and # that screws us up because we're *not* making a Node. "Fix" it. See # https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3052 for the real fix, # though. import tempfile tempfile.tempdir = None tempdir = TempDir() self.useFixture(tempdir) self.basedir = FilePath(tempdir.path) @capture_logging( lambda case, logger: assertHasAction( case, logger, actionType=u"storage-client:broker:set-static-servers", succeeded=True, ), encoder_=json.AnyBytesJSONEncoder ) def test_static_servers(self, logger): """ Storage servers defined in ``private/servers.yaml`` are loaded into the storage broker. """ serverid = u"v0-aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" announcement = { u"nickname": u"some-storage-server", u"anonymous-storage-FURL": u"pb://xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx@tcp:storage.example:100/swissnum", } self.useFixture( StaticServers( self.basedir, [(serverid, announcement)], ), ) self.assertThat( client.create_client(self.basedir.asTextMode().path), succeeded( AfterPreprocessing( get_known_server_details, Equals([(serverid.encode("utf-8"), announcement)]), ), ), ) @capture_logging( lambda case, logger: assertHasAction( case, logger, actionType=u"storage-client:broker:make-storage-server", succeeded=False, ), encoder_=json.AnyBytesJSONEncoder ) def test_invalid_static_server(self, logger): """ An invalid announcement for a static server does not prevent other static servers from being loaded. """ # Some good details serverid = u"v1-aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" announcement = { u"nickname": u"some-storage-server", u"anonymous-storage-FURL": u"pb://xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx@tcp:storage.example:100/swissnum", } self.useFixture( StaticServers( self.basedir, [(serverid.encode("ascii"), announcement), # Along with a "bad" server announcement. Order in this list # doesn't matter, yaml serializer and Python dicts are going # to shuffle everything around kind of randomly. (u"v0-bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb", {u"nickname": u"another-storage-server", u"anonymous-storage-FURL": None, }), ], ), ) self.assertThat( client.create_client(self.basedir.asTextMode().path), succeeded( AfterPreprocessing( get_known_server_details, # It should have the good server details. Equals([(serverid.encode("utf-8"), announcement)]), ), ), ) class Run(unittest.TestCase, testutil.StallMixin): def setUp(self): self.sparent = service.MultiService() self.sparent.startService() def tearDown(self): d = self.sparent.stopService() d.addBoth(flush_but_dont_ignore) return d @defer.inlineCallbacks def test_loadable(self): """ A configuration consisting only of an introducer can be turned into a client node. """ basedir = FilePath("test_client.Run.test_loadable") private = basedir.child("private") private.makedirs() dummy = "pb://wl74cyahejagspqgy4x5ukrvfnevlknt@127.0.0.1:58889/bogus" write_introducer(basedir, "someintroducer", dummy) basedir.child("tahoe.cfg").setContent(BASECONFIG.encode("ascii")) basedir.child(client._Client.EXIT_TRIGGER_FILE).touch() yield client.create_client(basedir.path) @defer.inlineCallbacks def test_reloadable(self): from twisted.internet import reactor dummy = "pb://wl74cyahejagspqgy4x5ukrvfnevlknt@127.0.0.1:58889/bogus" fixture = UseNode(None, None, FilePath(self.mktemp()), dummy, reactor=reactor) fixture.setUp() self.addCleanup(fixture.cleanUp) c1 = yield fixture.create_node() c1.setServiceParent(self.sparent) # delay to let the service start up completely. I'm not entirely sure # this is necessary. yield self.stall(delay=2.0) yield c1.disownServiceParent() # the cygwin buildslave seems to need more time to let the old # service completely shut down. When delay=0.1, I saw this test fail, # probably due to the logport trying to reclaim the old socket # number. This suggests that either we're dropping a Deferred # somewhere in the shutdown sequence, or that cygwin is just cranky. yield self.stall(delay=2.0) # TODO: pause for slightly over one second, to let # Client._check_exit_trigger poll the file once. That will exercise # another few lines. Then add another test in which we don't # update the file at all, and watch to see the node shutdown. # (To do this, use a modified node which overrides Node.shutdown(), # also change _check_exit_trigger to use it instead of a raw # reactor.stop, also instrument the shutdown event in an # attribute that we can check.) c2 = yield fixture.create_node() c2.setServiceParent(self.sparent) yield c2.disownServiceParent() class NodeMakerTests(testutil.ReallyEqualMixin, AsyncBrokenTestCase): def _make_node_maker(self, mode, writecap, deep_immutable): """ Create a callable which can create an ``IFilesystemNode`` provider for the given cap. :param unicode mode: The read/write combination to pass to ``NodeMaker.create_from_cap``. If it contains ``u"r"`` then a readcap will be passed in. If it contains ``u"w"`` then a writecap will be passed in. :param IURI writecap: The capability for which to create a node. :param bool deep_immutable: Whether to request a "deep immutable" node which forces the result to be an immutable ``IFilesystemNode`` (I think -exarkun). """ if writecap.is_mutable(): # It's just not a valid combination to have a mutable alongside # deep_immutable = True. It's easier to fix deep_immutable than # writecap to clear up this conflict. deep_immutable = False if "r" in mode: readcap = writecap.get_readonly().to_string() else: readcap = None if "w" in mode: writecap = writecap.to_string() else: writecap = None nm = NodeMaker( storage_broker=None, secret_holder=None, history=None, uploader=None, terminator=None, default_encoding_parameters={u"k": 1, u"n": 1}, mutable_file_default=None, key_generator=None, blacklist=None, ) return partial( nm.create_from_cap, writecap, readcap, deep_immutable, ) @given( mode=sampled_from(["w", "r", "rw"]), writecap=write_capabilities(), deep_immutable=booleans(), ) def test_cached_result(self, mode, writecap, deep_immutable): """ ``NodeMaker.create_from_cap`` returns the same object when called with the same arguments. """ make_node = self._make_node_maker(mode, writecap, deep_immutable) original = make_node() additional = make_node() self.assertThat( original, Is(additional), ) @given( mode=sampled_from(["w", "r", "rw"]), writecap=write_capabilities(), deep_immutable=booleans(), ) def test_cache_expired(self, mode, writecap, deep_immutable): """ After the node object returned by an earlier call to ``NodeMaker.create_from_cap`` has been garbage collected, a new call to ``NodeMaker.create_from_cap`` returns a node object, maybe even a new one although we can't really prove it. """ make_node = self._make_node_maker(mode, writecap, deep_immutable) make_node() additional = make_node() self.assertThat( additional, AfterPreprocessing( lambda node: node.get_readonly_uri(), Equals(writecap.get_readonly().to_string()), ), ) @defer.inlineCallbacks def test_maker(self): basedir = "client/NodeMaker/maker" fileutil.make_dirs(basedir) fileutil.write(os.path.join(basedir, "tahoe.cfg"), BASECONFIG) c = yield client.create_client(basedir) n = c.create_node_from_uri(b"URI:CHK:6nmrpsubgbe57udnexlkiwzmlu:bjt7j6hshrlmadjyr7otq3dc24end5meo5xcr5xe5r663po6itmq:3:10:7277") self.failUnless(IFilesystemNode.providedBy(n)) self.failUnless(IFileNode.providedBy(n)) self.failUnless(IImmutableFileNode.providedBy(n)) self.failIf(IMutableFileNode.providedBy(n)) self.failIf(IDirectoryNode.providedBy(n)) self.failUnless(n.is_readonly()) self.failIf(n.is_mutable()) # Testing #1679. There was a bug that would occur when downloader was # downloading the same readcap more than once concurrently, so the # filenode object was cached, and there was a failure from one of the # servers in one of the download attempts. No subsequent download # attempt would attempt to use that server again, which would lead to # the file being undownloadable until the gateway was restarted. The # current fix for this (hopefully to be superceded by a better fix # eventually) is to prevent re-use of filenodes, so the NodeMaker is # hereby required *not* to cache and re-use filenodes for CHKs. other_n = c.create_node_from_uri(b"URI:CHK:6nmrpsubgbe57udnexlkiwzmlu:bjt7j6hshrlmadjyr7otq3dc24end5meo5xcr5xe5r663po6itmq:3:10:7277") self.failIf(n is other_n, (n, other_n)) n = c.create_node_from_uri(b"URI:LIT:n5xgk") self.failUnless(IFilesystemNode.providedBy(n)) self.failUnless(IFileNode.providedBy(n)) self.failUnless(IImmutableFileNode.providedBy(n)) self.failIf(IMutableFileNode.providedBy(n)) self.failIf(IDirectoryNode.providedBy(n)) self.failUnless(n.is_readonly()) self.failIf(n.is_mutable()) n = c.create_node_from_uri(b"URI:SSK:n6x24zd3seu725yluj75q5boaa:mm6yoqjhl6ueh7iereldqxue4nene4wl7rqfjfybqrehdqmqskvq") self.failUnless(IFilesystemNode.providedBy(n)) self.failUnless(IFileNode.providedBy(n)) self.failIf(IImmutableFileNode.providedBy(n)) self.failUnless(IMutableFileNode.providedBy(n)) self.failIf(IDirectoryNode.providedBy(n)) self.failIf(n.is_readonly()) self.failUnless(n.is_mutable()) n = c.create_node_from_uri(b"URI:SSK-RO:b7sr5qsifnicca7cbk3rhrhbvq:mm6yoqjhl6ueh7iereldqxue4nene4wl7rqfjfybqrehdqmqskvq") self.failUnless(IFilesystemNode.providedBy(n)) self.failUnless(IFileNode.providedBy(n)) self.failIf(IImmutableFileNode.providedBy(n)) self.failUnless(IMutableFileNode.providedBy(n)) self.failIf(IDirectoryNode.providedBy(n)) self.failUnless(n.is_readonly()) self.failUnless(n.is_mutable()) n = c.create_node_from_uri(b"URI:DIR2:n6x24zd3seu725yluj75q5boaa:mm6yoqjhl6ueh7iereldqxue4nene4wl7rqfjfybqrehdqmqskvq") self.failUnless(IFilesystemNode.providedBy(n)) self.failIf(IFileNode.providedBy(n)) self.failIf(IImmutableFileNode.providedBy(n)) self.failIf(IMutableFileNode.providedBy(n)) self.failUnless(IDirectoryNode.providedBy(n)) self.failIf(n.is_readonly()) self.failUnless(n.is_mutable()) n = c.create_node_from_uri(b"URI:DIR2-RO:b7sr5qsifnicca7cbk3rhrhbvq:mm6yoqjhl6ueh7iereldqxue4nene4wl7rqfjfybqrehdqmqskvq") self.failUnless(IFilesystemNode.providedBy(n)) self.failIf(IFileNode.providedBy(n)) self.failIf(IImmutableFileNode.providedBy(n)) self.failIf(IMutableFileNode.providedBy(n)) self.failUnless(IDirectoryNode.providedBy(n)) self.failUnless(n.is_readonly()) self.failUnless(n.is_mutable()) unknown_rw = b"lafs://from_the_future" unknown_ro = b"lafs://readonly_from_the_future" n = c.create_node_from_uri(unknown_rw, unknown_ro) self.failUnless(IFilesystemNode.providedBy(n)) self.failIf(IFileNode.providedBy(n)) self.failIf(IImmutableFileNode.providedBy(n)) self.failIf(IMutableFileNode.providedBy(n)) self.failIf(IDirectoryNode.providedBy(n)) self.failUnless(n.is_unknown()) self.failUnlessReallyEqual(n.get_uri(), unknown_rw) self.failUnlessReallyEqual(n.get_write_uri(), unknown_rw) self.failUnlessReallyEqual(n.get_readonly_uri(), b"ro." + unknown_ro) # Note: it isn't that we *intend* to deploy non-ASCII caps in # the future, it is that we want to make sure older Tahoe-LAFS # versions wouldn't choke on them if we were to do so. See # #1051 and wiki:NewCapDesign for details. unknown_rw = u"lafs://from_the_future_rw_\u263A".encode('utf-8') unknown_ro = u"lafs://readonly_from_the_future_ro_\u263A".encode('utf-8') n = c.create_node_from_uri(unknown_rw, unknown_ro) self.failUnless(IFilesystemNode.providedBy(n)) self.failIf(IFileNode.providedBy(n)) self.failIf(IImmutableFileNode.providedBy(n)) self.failIf(IMutableFileNode.providedBy(n)) self.failIf(IDirectoryNode.providedBy(n)) self.failUnless(n.is_unknown()) self.failUnlessReallyEqual(n.get_uri(), unknown_rw) self.failUnlessReallyEqual(n.get_write_uri(), unknown_rw) self.failUnlessReallyEqual(n.get_readonly_uri(), b"ro." + unknown_ro) def matches_dummy_announcement(name, value): """ Matches the portion of an announcement for the ``DummyStorage`` storage server plugin. :param unicode name: The name of the dummy plugin. :param unicode value: The arbitrary value in the dummy plugin announcement. :return: a testtools-style matcher """ return MatchesDict({ # Everyone gets a name and a fURL added to their announcement. u"name": Equals(name), u"storage-server-FURL": matches_furl(), # The plugin can contribute things, too. u"value": Equals(value), }) class StorageAnnouncementTests(SyncTestCase): """ Tests for the storage announcement published by the client. """ def setUp(self): super(StorageAnnouncementTests, self).setUp() self.basedir = FilePath(self.useFixture(TempDir()).path) create_node_dir(self.basedir.path, u"") # Write an introducer configuration or we can't observer # announcements. write_introducer(self.basedir, "someintroducer", SOME_FURL) def get_config(self, storage_enabled, more_storage="", more_sections=""): return """ [client] # Empty [node] tub.location = tcp:192.0.2.0:1234 [storage] enabled = {storage_enabled} {more_storage} {more_sections} """.format( storage_enabled=storage_enabled, more_storage=more_storage, more_sections=more_sections, ) def test_no_announcement(self): """ No storage announcement is published if storage is not enabled. """ config = client.config_from_string( self.basedir.path, "tub.port", self.get_config(storage_enabled=False), ) self.assertThat( client.create_client_from_config( config, _introducer_factory=MemoryIntroducerClient, ), succeeded(AfterPreprocessing( get_published_announcements, Equals([]), )), ) def test_anonymous_storage_announcement(self): """ A storage announcement with the anonymous storage fURL is published when storage is enabled. """ config = client.config_from_string( self.basedir.path, "tub.port", self.get_config(storage_enabled=True), ) client_deferred = client.create_client_from_config( config, _introducer_factory=MemoryIntroducerClient, ) self.assertThat( client_deferred, # The Deferred succeeds succeeded(AfterPreprocessing( # The announcements published by the client should ... get_published_announcements, # Match the following list (of one element) ... MatchesListwise([ # The only element in the list ... matches_storage_announcement(self.basedir.path), ]), )), ) def test_single_storage_plugin_announcement(self): """ The announcement from a single enabled storage plugin is published when storage is enabled. """ self.useFixture(UseTestPlugins()) value = u"thing" config = client.config_from_string( self.basedir.path, "tub.port", self.get_config( storage_enabled=True, more_storage="plugins=tahoe-lafs-dummy-v1", more_sections=( "[storageserver.plugins.tahoe-lafs-dummy-v1]\n" "some = {}\n".format(value) ), ), ) self.assertThat( client.create_client_from_config( config, _introducer_factory=MemoryIntroducerClient, ), succeeded(AfterPreprocessing( get_published_announcements, MatchesListwise([ matches_storage_announcement( self.basedir.path, options=[ matches_dummy_announcement( u"tahoe-lafs-dummy-v1", value, ), ], ), ]), )), ) def test_multiple_storage_plugin_announcements(self): """ The announcements from several enabled storage plugins are published when storage is enabled. """ self.useFixture(UseTestPlugins()) config = client.config_from_string( self.basedir.path, "tub.port", self.get_config( storage_enabled=True, more_storage="plugins=tahoe-lafs-dummy-v1,tahoe-lafs-dummy-v2", more_sections=( "[storageserver.plugins.tahoe-lafs-dummy-v1]\n" "some = thing-1\n" "[storageserver.plugins.tahoe-lafs-dummy-v2]\n" "some = thing-2\n" ), ), ) self.assertThat( client.create_client_from_config( config, _introducer_factory=MemoryIntroducerClient, ), succeeded(AfterPreprocessing( get_published_announcements, MatchesListwise([ matches_storage_announcement( self.basedir.path, options=[ matches_dummy_announcement( u"tahoe-lafs-dummy-v1", u"thing-1", ), matches_dummy_announcement( u"tahoe-lafs-dummy-v2", u"thing-2", ), ], ), ]), )), ) def test_stable_storage_server_furl(self): """ The value for the ``storage-server-FURL`` item in the announcement for a particular storage server plugin is stable across different node instantiations. """ self.useFixture(UseTestPlugins()) config = client.config_from_string( self.basedir.path, "tub.port", self.get_config( storage_enabled=True, more_storage="plugins=tahoe-lafs-dummy-v1", more_sections=( "[storageserver.plugins.tahoe-lafs-dummy-v1]\n" "some = thing\n" ), ), ) node_a = client.create_client_from_config( config, _introducer_factory=MemoryIntroducerClient, ) node_b = client.create_client_from_config( config, _introducer_factory=MemoryIntroducerClient, ) self.assertThat( defer.gatherResults([node_a, node_b]), succeeded(AfterPreprocessing( partial(map, get_published_announcements), MatchesSameElements(), )), ) def test_storage_plugin_without_configuration(self): """ A storage plugin with no configuration is loaded and announced. """ self.useFixture(UseTestPlugins()) config = client.config_from_string( self.basedir.path, "tub.port", self.get_config( storage_enabled=True, more_storage="plugins=tahoe-lafs-dummy-v1", ), ) self.assertThat( client.create_client_from_config( config, _introducer_factory=MemoryIntroducerClient, ), succeeded(AfterPreprocessing( get_published_announcements, MatchesListwise([ matches_storage_announcement( self.basedir.path, options=[ matches_dummy_announcement( u"tahoe-lafs-dummy-v1", u"default-value", ), ], ), ]), )), ) def test_broken_storage_plugin(self): """ A storage plugin that raises an exception from ``get_storage_server`` causes ``client.create_client_from_config`` to return ``Deferred`` that fails. """ self.useFixture(UseTestPlugins()) config = client.config_from_string( self.basedir.path, "tub.port", self.get_config( storage_enabled=True, more_storage="plugins=tahoe-lafs-dummy-v1", more_sections=( "[storageserver.plugins.tahoe-lafs-dummy-v1]\n" # This will make it explode on instantiation. "invalid = configuration\n" ) ), ) self.assertThat( client.create_client_from_config( config, _introducer_factory=MemoryIntroducerClient, ), failed(Always()), ) def test_storage_plugin_not_found(self): """ ``client.create_client_from_config`` raises ``UnknownConfigError`` when called with a configuration which enables a storage plugin that is not available on the system. """ config = client.config_from_string( self.basedir.path, "tub.port", self.get_config( storage_enabled=True, more_storage="plugins=tahoe-lafs-dummy-vX", ), ) self.assertThat( client.create_client_from_config( config, _introducer_factory=MemoryIntroducerClient, ), failed( AfterPreprocessing( lambda f: f.type, Equals(configutil.UnknownConfigError), ), ), ) def test_announcement_includes_grid_manager(self): """ When Grid Manager is enabled certificates are included in the announcement """ fake_cert = { "certificate": "{\"expires\":1601687822,\"public_key\":\"pub-v0-cbq6hcf3pxcz6ouoafrbktmkixkeuywpcpbcomzd3lqbkq4nmfga\",\"version\":1}", "signature": "fvjd3uvvupf2v6tnvkwjd473u3m3inyqkwiclhp7balmchkmn3px5pei3qyfjnhymq4cjcwvbpqmcwwnwswdtrfkpnlaxuih2zbdmda", } with self.basedir.child("zero.cert").open("w") as f: f.write(json.dumps_bytes(fake_cert)) with self.basedir.child("gm0.cert").open("w") as f: f.write(json.dumps_bytes(fake_cert)) config = client.config_from_string( self.basedir.path, "tub.port", self.get_config( storage_enabled=True, more_storage="grid_management = True", more_sections=( "[grid_managers]\n" "gm0 = pub-v0-ibpbsexcjfbv3ni7gwlclgn6mldaqnqd5mrtan2fnq2b27xnovca\n" "[grid_manager_certificates]\n" "foo = zero.cert\n" ) ), ) self.assertThat( client.create_client_from_config( config, _introducer_factory=MemoryIntroducerClient, ), succeeded(AfterPreprocessing( lambda client: get_published_announcements(client)[0].ann, ContainsDict({ "grid-manager-certificates": Equals([fake_cert]), }), )), ) tahoe_lafs-1.20.0/src/allmydata/test/test_codec.py0000644000000000000000000001056613615410400017033 0ustar00""" Tests for allmydata.codec. Ported to Python 3. """ import os from twisted.trial import unittest from twisted.python import log from allmydata.codec import CRSEncoder, CRSDecoder, parse_params import random from allmydata.util import mathutil class T(unittest.TestCase): def do_test(self, size, required_shares, max_shares, fewer_shares=None): data0s = [os.urandom(mathutil.div_ceil(size, required_shares)) for i in range(required_shares)] enc = CRSEncoder() enc.set_params(size, required_shares, max_shares) params = enc.get_params() assert params == (size, required_shares, max_shares) serialized_params = enc.get_serialized_params() self.assertEqual(parse_params(serialized_params), params) log.msg("params: %s" % (params,)) d = enc.encode(data0s) def _done_encoding_all(shares_and_shareids): (shares, shareids) = shares_and_shareids self.failUnlessEqual(len(shares), max_shares) self.shares = shares self.shareids = shareids d.addCallback(_done_encoding_all) if fewer_shares is not None: # also validate that the desired_shareids= parameter works desired_shareids = random.sample(list(range(max_shares)), fewer_shares) d.addCallback(lambda res: enc.encode(data0s, desired_shareids)) def _check_fewer_shares(some_shares_and_their_shareids): (some_shares, their_shareids) = some_shares_and_their_shareids self.failUnlessEqual(tuple(their_shareids), tuple(desired_shareids)) d.addCallback(_check_fewer_shares) def _decode(shares_and_shareids): (shares, shareids) = shares_and_shareids dec = CRSDecoder() dec.set_params(*params) d1 = dec.decode(shares, shareids) return d1 def _check_data(decoded_shares): self.failUnlessEqual(len(b''.join(decoded_shares)), len(b''.join(data0s))) self.failUnlessEqual(len(decoded_shares), len(data0s)) for (i, (x, y)) in enumerate(zip(data0s, decoded_shares)): self.failUnlessEqual(x, y, "%s: %r != %r.... first share was %r" % (str(i), x, y, data0s[0],)) self.failUnless(b''.join(decoded_shares) == b''.join(data0s), "%s" % ("???",)) # 0data0sclipped = tuple(data0s) # data0sclipped[-1] = # self.failUnless(tuple(decoded_shares) == tuple(data0s)) def _decode_some(res): log.msg("_decode_some") # decode with a minimal subset of the shares some_shares = self.shares[:required_shares] some_shareids = self.shareids[:required_shares] return _decode((some_shares, some_shareids)) d.addCallback(_decode_some) d.addCallback(_check_data) def _decode_some_random(res): log.msg("_decode_some_random") # use a randomly-selected minimal subset l = random.sample(list(zip(self.shares, self.shareids)), required_shares) some_shares = [ x[0] for x in l ] some_shareids = [ x[1] for x in l ] return _decode((some_shares, some_shareids)) d.addCallback(_decode_some_random) d.addCallback(_check_data) def _decode_multiple(res): log.msg("_decode_multiple") # make sure we can re-use the decoder object shares1 = random.sample(self.shares, required_shares) sharesl1 = random.sample(list(zip(self.shares, self.shareids)), required_shares) shares1 = [ x[0] for x in sharesl1 ] shareids1 = [ x[1] for x in sharesl1 ] sharesl2 = random.sample(list(zip(self.shares, self.shareids)), required_shares) shares2 = [ x[0] for x in sharesl2 ] shareids2 = [ x[1] for x in sharesl2 ] dec = CRSDecoder() dec.set_params(*params) d1 = dec.decode(shares1, shareids1) d1.addCallback(_check_data) d1.addCallback(lambda res: dec.decode(shares2, shareids2)) d1.addCallback(_check_data) return d1 d.addCallback(_decode_multiple) return d def test_encode(self): return self.do_test(1000, 25, 100) def test_encode1(self): return self.do_test(8, 8, 16) def test_encode2(self): return self.do_test(125, 25, 100, 90) tahoe_lafs-1.20.0/src/allmydata/test/test_common_util.py0000644000000000000000000000462313615410400020300 0ustar00""" This module has been ported to Python 3. """ import sys import random from hypothesis import given from hypothesis.strategies import lists, sampled_from from testtools.matchers import Equals from twisted.python.reflect import ( ModuleNotFound, namedAny, ) from .common import ( SyncTestCase, disable_modules, ) from allmydata.test.common_util import flip_one_bit class TestFlipOneBit(SyncTestCase): def setUp(self): super(TestFlipOneBit, self).setUp() # I tried using version=1 on PY3 to avoid the if below, to no avail. random.seed(42) def test_accepts_byte_string(self): actual = flip_one_bit(b'foo') self.assertEqual(actual, b'fom') def test_rejects_unicode_string(self): self.assertRaises(AssertionError, flip_one_bit, u'foo') def some_existing_modules(): """ Build the names of modules (as native strings) that exist and can be imported. """ candidates = sorted( name for name in sys.modules if "." not in name and sys.modules[name] is not None ) return sampled_from(candidates) class DisableModulesTests(SyncTestCase): """ Tests for ``disable_modules``. """ def setup_example(self): return sys.modules.copy() def teardown_example(self, safe_modules): sys.modules.update(safe_modules) @given(lists(some_existing_modules(), unique=True)) def test_importerror(self, module_names): """ While the ``disable_modules`` context manager is active any import of the modules identified by the names passed to it result in ``ImportError`` being raised. """ def get_modules(): return list( namedAny(name) for name in module_names ) before_modules = get_modules() with disable_modules(*module_names): for name in module_names: with self.assertRaises(ModuleNotFound): namedAny(name) after_modules = get_modules() self.assertThat(before_modules, Equals(after_modules)) def test_dotted_names_rejected(self): """ If names with "." in them are passed to ``disable_modules`` then ``ValueError`` is raised. """ with self.assertRaises(ValueError): with disable_modules("foo.bar"): pass tahoe_lafs-1.20.0/src/allmydata/test/test_configutil.py0000644000000000000000000002227013615410400020114 0ustar00""" Tests for allmydata.util.configutil. Ported to Python 3. """ import os.path from configparser import ( ConfigParser, ) from functools import ( partial, ) from hypothesis import ( given, ) from hypothesis.strategies import ( dictionaries, text, characters, ) from twisted.python.filepath import ( FilePath, ) from twisted.trial import unittest from allmydata.util import configutil def arbitrary_config_dicts( min_sections=0, max_sections=3, max_section_name_size=8, max_items_per_section=3, max_item_length=8, max_value_length=8, ): """ Build ``dict[str, dict[str, str]]`` instances populated with arbitrary configurations. """ identifier_text = partial( text, # Don't allow most control characters or spaces alphabet=characters( blacklist_categories=('Cc', 'Cs', 'Zs'), ), ) return dictionaries( identifier_text( min_size=1, max_size=max_section_name_size, ), dictionaries( identifier_text( min_size=1, max_size=max_item_length, ), text(max_size=max_value_length), max_size=max_items_per_section, ), min_size=min_sections, max_size=max_sections, ) def to_configparser(dictconfig): """ Take a ``dict[str, dict[str, str]]`` and turn it into the corresponding populated ``ConfigParser`` instance. """ cp = ConfigParser() for section, items in dictconfig.items(): cp.add_section(section) for k, v in items.items(): cp.set( section, k, # ConfigParser has a feature that everyone knows and loves # where it will use %-style interpolation to substitute # values from one part of the config into another part of # the config. Escape all our `%`s to avoid hitting this # and complicating things. v.replace("%", "%%"), ) return cp class ConfigUtilTests(unittest.TestCase): def setUp(self): super(ConfigUtilTests, self).setUp() self.static_valid_config = configutil.ValidConfiguration( dict(node=['valid']), ) self.dynamic_valid_config = configutil.ValidConfiguration( dict(), lambda section_name: section_name == "node", lambda section_name, item_name: (section_name, item_name) == ("node", "valid"), ) def create_tahoe_cfg(self, cfg): d = self.mktemp() os.mkdir(d) fname = os.path.join(d, 'tahoe.cfg') with open(fname, "w") as f: f.write(cfg) return fname def test_config_utils(self): tahoe_cfg = self.create_tahoe_cfg("""\ [node] nickname = client-0 web.port = adopt-socket:fd=5 [storage] enabled = false """) # test that at least one option was read correctly config = configutil.get_config(tahoe_cfg) self.failUnlessEqual(config.get("node", "nickname"), "client-0") # test that set_config can mutate an existing option configutil.set_config(config, "node", "nickname", "Alice!") configutil.write_config(FilePath(tahoe_cfg), config) config = configutil.get_config(tahoe_cfg) self.failUnlessEqual(config.get("node", "nickname"), "Alice!") # test that set_config can set a new option descriptor = "Twas brillig, and the slithy toves Did gyre and gimble in the wabe" configutil.set_config(config, "node", "descriptor", descriptor) configutil.write_config(FilePath(tahoe_cfg), config) config = configutil.get_config(tahoe_cfg) self.failUnlessEqual(config.get("node", "descriptor"), descriptor) def test_config_validation_success(self): """ ``configutil.validate_config`` returns ``None`` when the configuration it is given has nothing more than the static sections and items defined by the validator. """ # should succeed, no exceptions configutil.validate_config( "", to_configparser({"node": {"valid": "foo"}}), self.static_valid_config, ) def test_config_dynamic_validation_success(self): """ A configuration with sections and items that are not matched by the static validation but are matched by the dynamic validation is considered valid. """ # should succeed, no exceptions configutil.validate_config( "", to_configparser({"node": {"valid": "foo"}}), self.dynamic_valid_config, ) def test_config_validation_invalid_item(self): config = to_configparser({"node": {"valid": "foo", "invalid": "foo"}}) e = self.assertRaises( configutil.UnknownConfigError, configutil.validate_config, "", config, self.static_valid_config, ) self.assertIn("section [node] contains unknown option 'invalid'", str(e)) def test_config_validation_invalid_section(self): """ A configuration with a section that is matched by neither the static nor dynamic validators is rejected. """ config = to_configparser({"node": {"valid": "foo"}, "invalid": {}}) e = self.assertRaises( configutil.UnknownConfigError, configutil.validate_config, "", config, self.static_valid_config, ) self.assertIn("contains unknown section [invalid]", str(e)) def test_config_dynamic_validation_invalid_section(self): """ A configuration with a section that is matched by neither the static nor dynamic validators is rejected. """ config = to_configparser({"node": {"valid": "foo"}, "invalid": {}}) e = self.assertRaises( configutil.UnknownConfigError, configutil.validate_config, "", config, self.dynamic_valid_config, ) self.assertIn("contains unknown section [invalid]", str(e)) def test_config_dynamic_validation_invalid_item(self): """ A configuration with a section, item pair that is matched by neither the static nor dynamic validators is rejected. """ config = to_configparser({"node": {"valid": "foo", "invalid": "foo"}}) e = self.assertRaises( configutil.UnknownConfigError, configutil.validate_config, "", config, self.dynamic_valid_config, ) self.assertIn("section [node] contains unknown option 'invalid'", str(e)) def test_duplicate_sections(self): """ Duplicate section names are merged. """ fname = self.create_tahoe_cfg('[node]\na = foo\n[node]\n b = bar\n') config = configutil.get_config(fname) self.assertEqual(config.get("node", "a"), "foo") self.assertEqual(config.get("node", "b"), "bar") @given(arbitrary_config_dicts()) def test_everything_valid(self, cfgdict): """ ``validate_config`` returns ``None`` when the validator is ``ValidConfiguration.everything()``. """ cfg = to_configparser(cfgdict) self.assertIs( configutil.validate_config( "", cfg, configutil.ValidConfiguration.everything(), ), None, ) @given(arbitrary_config_dicts(min_sections=1)) def test_nothing_valid(self, cfgdict): """ ``validate_config`` raises ``UnknownConfigError`` when the validator is ``ValidConfiguration.nothing()`` for all non-empty configurations. """ cfg = to_configparser(cfgdict) with self.assertRaises(configutil.UnknownConfigError): configutil.validate_config( "", cfg, configutil.ValidConfiguration.nothing(), ) def test_nothing_empty_valid(self): """ ``validate_config`` returns ``None`` when the validator is ``ValidConfiguration.nothing()`` if the configuration is empty. """ cfg = ConfigParser() self.assertIs( configutil.validate_config( "", cfg, configutil.ValidConfiguration.nothing(), ), None, ) @given(arbitrary_config_dicts()) def test_copy_config(self, cfgdict): """ ``copy_config`` creates a new ``ConfigParser`` object containing the same values as its input. """ cfg = to_configparser(cfgdict) copied = configutil.copy_config(cfg) # Should be equal self.assertEqual(cfg, copied) # But not because they're the same object. self.assertIsNot(cfg, copied) tahoe_lafs-1.20.0/src/allmydata/test/test_connection_status.py0000644000000000000000000001112713615410400021512 0ustar00""" Tests for allmydata.util.connection_status. """ from __future__ import annotations from typing import Optional from foolscap.reconnector import ReconnectionInfo, Reconnector from foolscap.info import ConnectionInfo from ..util import connection_status from .common import SyncTestCase def reconnector(info: ReconnectionInfo) -> Reconnector: rc = Reconnector(None, None, (), {}) # type: ignore[no-untyped-call] rc._reconnectionInfo = info return rc def connection_info( statuses: dict[str, str], handlers: dict[str, str], winningHint: Optional[str], establishedAt: Optional[int], ) -> ConnectionInfo: ci = ConnectionInfo() # type: ignore[no-untyped-call] ci.connectorStatuses = statuses ci.connectionHandlers = handlers ci.winningHint = winningHint ci.establishedAt = establishedAt return ci def reconnection_info( state: str, connection_info: ConnectionInfo, ) -> ReconnectionInfo: ri = ReconnectionInfo() # type: ignore[no-untyped-call] ri.state = state ri.connectionInfo = connection_info return ri class Status(SyncTestCase): def test_hint_statuses(self) -> None: ncs = connection_status._hint_statuses(["h2","h1"], {"h1": "hand1", "h4": "hand4"}, {"h1": "st1", "h2": "st2", "h3": "st3"}) self.assertEqual(ncs, {"h1 via hand1": "st1", "h2": "st2"}) def test_reconnector_connected(self) -> None: ci = connection_info({"h1": "st1"}, {"h1": "hand1"}, "h1", 120) ri = reconnection_info("connected", ci) rc = reconnector(ri) cs = connection_status.from_foolscap_reconnector(rc, 123) self.assertEqual(cs.connected, True) self.assertEqual(cs.summary, "Connected to h1 via hand1") self.assertEqual(cs.non_connected_statuses, {}) self.assertEqual(cs.last_connection_time, 120) self.assertEqual(cs.last_received_time, 123) def test_reconnector_connected_others(self) -> None: ci = connection_info({"h1": "st1", "h2": "st2"}, {"h1": "hand1"}, "h1", 120) ri = reconnection_info("connected", ci) rc = reconnector(ri) cs = connection_status.from_foolscap_reconnector(rc, 123) self.assertEqual(cs.connected, True) self.assertEqual(cs.summary, "Connected to h1 via hand1") self.assertEqual(cs.non_connected_statuses, {"h2": "st2"}) self.assertEqual(cs.last_connection_time, 120) self.assertEqual(cs.last_received_time, 123) def test_reconnector_connected_listener(self) -> None: ci = connection_info({"h1": "st1", "h2": "st2"}, {"h1": "hand1"}, None, 120) ci.listenerStatus = ("listener1", "successful") ri = reconnection_info("connected", ci) rc = reconnector(ri) cs = connection_status.from_foolscap_reconnector(rc, 123) self.assertEqual(cs.connected, True) self.assertEqual(cs.summary, "Connected via listener (listener1)") self.assertEqual(cs.non_connected_statuses, {"h1 via hand1": "st1", "h2": "st2"}) self.assertEqual(cs.last_connection_time, 120) self.assertEqual(cs.last_received_time, 123) def test_reconnector_connecting(self) -> None: ci = connection_info({"h1": "st1", "h2": "st2"}, {"h1": "hand1"}, None, None) ri = reconnection_info("connecting", ci) rc = reconnector(ri) cs = connection_status.from_foolscap_reconnector(rc, 123) self.assertEqual(cs.connected, False) self.assertEqual(cs.summary, "Trying to connect") self.assertEqual(cs.non_connected_statuses, {"h1 via hand1": "st1", "h2": "st2"}) self.assertEqual(cs.last_connection_time, None) self.assertEqual(cs.last_received_time, 123) def test_reconnector_waiting(self) -> None: ci = connection_info({"h1": "st1", "h2": "st2"}, {"h1": "hand1"}, None, None) ri = reconnection_info("waiting", ci) ri.lastAttempt = 10 ri.nextAttempt = 20 rc = reconnector(ri) cs = connection_status.from_foolscap_reconnector(rc, 5, time=lambda: 12) self.assertEqual(cs.connected, False) self.assertEqual(cs.summary, "Reconnecting in 8 seconds (last attempt 2s ago)") self.assertEqual(cs.non_connected_statuses, {"h1 via hand1": "st1", "h2": "st2"}) self.assertEqual(cs.last_connection_time, None) self.assertEqual(cs.last_received_time, 5) tahoe_lafs-1.20.0/src/allmydata/test/test_connections.py0000644000000000000000000002073313615410400020275 0ustar00""" Ported to Python 3. """ from twisted.trial import unittest from twisted.internet import reactor from foolscap.connections import tcp from testtools.matchers import ( MatchesDict, IsInstance, Equals, ) from ..node import PrivacyError, config_from_string from ..node import create_connection_handlers from ..node import create_main_tub from ..util.i2p_provider import create as create_i2p_provider from ..util.tor_provider import create as create_tor_provider from .common import ( SyncTestCase, ConstantAddresses, ) BASECONFIG = "" class CreateConnectionHandlersTests(SyncTestCase): """ Tests for the Foolscap connection handlers return by ``create_connection_handlers``. """ def test_foolscap_handlers(self): """ ``create_connection_handlers`` returns a Foolscap connection handlers dictionary mapping ``"tcp"`` to ``foolscap.connections.tcp.DefaultTCP``, ``"tor"`` to the supplied Tor provider's handler, and ``"i2p"`` to the supplied I2P provider's handler. """ config = config_from_string( "fake.port", "no-basedir", BASECONFIG, ) tor_endpoint = object() tor = ConstantAddresses(handler=tor_endpoint) i2p_endpoint = object() i2p = ConstantAddresses(handler=i2p_endpoint) _, foolscap_handlers = create_connection_handlers( config, i2p, tor, ) self.assertThat( foolscap_handlers, MatchesDict({ "tcp": IsInstance(tcp.DefaultTCP), "i2p": Equals(i2p_endpoint), "tor": Equals(tor_endpoint), }), ) class Tor(unittest.TestCase): def test_socksport_bad_endpoint(self): config = config_from_string( "fake.port", "no-basedir", BASECONFIG + "[tor]\nsocks.port = meow:unsupported\n", ) with self.assertRaises(ValueError) as ctx: tor_provider = create_tor_provider(reactor, config) tor_provider.get_tor_handler() self.assertIn( "Unknown endpoint type: 'meow'", str(ctx.exception) ) def test_socksport_not_integer(self): config = config_from_string( "fake.port", "no-basedir", BASECONFIG + "[tor]\nsocks.port = tcp:localhost:kumquat\n", ) with self.assertRaises(ValueError) as ctx: tor_provider = create_tor_provider(reactor, config) tor_provider.get_tor_handler() self.assertIn( "invalid literal for int()", str(ctx.exception) ) self.assertIn( "kumquat", str(ctx.exception) ) class I2P(unittest.TestCase): def test_samport_and_launch(self): config = config_from_string( "no-basedir", "fake.port", BASECONFIG + "[i2p]\n" + "sam.port = tcp:localhost:1234\n" + "launch = true\n", ) with self.assertRaises(ValueError) as ctx: i2p_provider = create_i2p_provider(reactor, config) i2p_provider.get_i2p_handler() self.assertIn( "must not set both sam.port and launch", str(ctx.exception) ) class Connections(unittest.TestCase): def setUp(self): self.basedir = 'BASEDIR' self.config = config_from_string("fake.port", self.basedir, BASECONFIG) def test_default(self): default_connection_handlers, _ = create_connection_handlers( self.config, ConstantAddresses(handler=object()), ConstantAddresses(handler=object()), ) self.assertEqual(default_connection_handlers["tcp"], "tcp") self.assertEqual(default_connection_handlers["tor"], "tor") self.assertEqual(default_connection_handlers["i2p"], "i2p") def test_tor(self): config = config_from_string( "fake.port", "no-basedir", BASECONFIG + "[connections]\ntcp = tor\n", ) default_connection_handlers, _ = create_connection_handlers( config, ConstantAddresses(handler=object()), ConstantAddresses(handler=object()), ) self.assertEqual(default_connection_handlers["tcp"], "tor") self.assertEqual(default_connection_handlers["tor"], "tor") self.assertEqual(default_connection_handlers["i2p"], "i2p") def test_tor_unimportable(self): """ If the configuration calls for substituting Tor for TCP and ``foolscap.connections.tor`` is not importable then ``create_connection_handlers`` raises ``ValueError`` with a message explaining this makes Tor unusable. """ self.config = config_from_string( "fake.port", "no-basedir", BASECONFIG + "[connections]\ntcp = tor\n", ) tor_provider = create_tor_provider( reactor, self.config, import_tor=lambda: None, ) with self.assertRaises(ValueError) as ctx: default_connection_handlers, _ = create_connection_handlers( self.config, i2p_provider=ConstantAddresses(handler=object()), tor_provider=tor_provider, ) self.assertEqual( str(ctx.exception), "'tahoe.cfg [connections] tcp='" " uses unavailable/unimportable handler type 'tor'." " Please pip install tahoe-lafs[tor] to fix.", ) def test_unknown(self): config = config_from_string( "fake.port", "no-basedir", BASECONFIG + "[connections]\ntcp = unknown\n", ) with self.assertRaises(ValueError) as ctx: create_connection_handlers( config, ConstantAddresses(handler=object()), ConstantAddresses(handler=object()), ) self.assertIn("'tahoe.cfg [connections] tcp='", str(ctx.exception)) self.assertIn("uses unknown handler type 'unknown'", str(ctx.exception)) def test_tcp_disabled(self): config = config_from_string( "fake.port", "no-basedir", BASECONFIG + "[connections]\ntcp = disabled\n", ) default_connection_handlers, _ = create_connection_handlers( config, ConstantAddresses(handler=object()), ConstantAddresses(handler=object()), ) self.assertEqual(default_connection_handlers["tcp"], None) self.assertEqual(default_connection_handlers["tor"], "tor") self.assertEqual(default_connection_handlers["i2p"], "i2p") class Privacy(unittest.TestCase): def test_connections(self): config = config_from_string( "fake.port", "no-basedir", BASECONFIG + "[node]\nreveal-IP-address = false\n", ) with self.assertRaises(PrivacyError) as ctx: create_connection_handlers( config, ConstantAddresses(handler=object()), ConstantAddresses(handler=object()), ) self.assertEqual( str(ctx.exception), "Privacy requested with `reveal-IP-address = false` " "but `tcp = tcp` conflicts with this.", ) def test_connections_tcp_disabled(self): config = config_from_string( "no-basedir", "fake.port", BASECONFIG + "[connections]\ntcp = disabled\n" + "[node]\nreveal-IP-address = false\n", ) default_connection_handlers, _ = create_connection_handlers( config, ConstantAddresses(handler=object()), ConstantAddresses(handler=object()), ) self.assertEqual(default_connection_handlers["tcp"], None) def test_tub_location_auto(self): config = config_from_string( "fake.port", "no-basedir", BASECONFIG + "[node]\nreveal-IP-address = false\n", ) with self.assertRaises(PrivacyError) as ctx: create_main_tub( config, tub_options={}, default_connection_handlers={}, foolscap_connection_handlers={}, i2p_provider=ConstantAddresses(), tor_provider=ConstantAddresses(), ) self.assertEqual( str(ctx.exception), "tub.location uses AUTO", ) tahoe_lafs-1.20.0/src/allmydata/test/test_consumer.py0000644000000000000000000000463213615410400017606 0ustar00""" Tests for allmydata.util.consumer. Ported to Python 3. """ from zope.interface import implementer from twisted.internet.interfaces import IPushProducer, IPullProducer from allmydata.util.consumer import MemoryConsumer from .common import ( SyncTestCase, ) from testtools.matchers import ( Equals, ) @implementer(IPushProducer) @implementer(IPullProducer) class Producer(object): """Can be used as either streaming or non-streaming producer. If used as streaming, the test should call iterate() manually. """ def __init__(self, consumer, data): self.data = data self.consumer = consumer self.done = False def stopProducing(self): pass def pauseProducing(self): pass def resumeProducing(self): """Kick off streaming.""" self.iterate() def iterate(self): """Do another iteration of writing.""" if self.done: raise RuntimeError( "There's a bug somewhere, shouldn't iterate after being done" ) if self.data: self.consumer.write(self.data.pop(0)) else: self.done = True self.consumer.unregisterProducer() class MemoryConsumerTests(SyncTestCase): """Tests for MemoryConsumer.""" def test_push_producer(self): """ A MemoryConsumer accumulates all data sent by a streaming producer. """ consumer = MemoryConsumer() producer = Producer(consumer, [b"abc", b"def", b"ghi"]) consumer.registerProducer(producer, True) self.assertThat(consumer.chunks, Equals([b"abc"])) producer.iterate() producer.iterate() self.assertThat(consumer.chunks, Equals([b"abc", b"def", b"ghi"])) self.assertFalse(consumer.done) producer.iterate() self.assertThat(consumer.chunks, Equals([b"abc", b"def", b"ghi"])) self.assertTrue(consumer.done) def test_pull_producer(self): """ A MemoryConsumer accumulates all data sent by a non-streaming producer. """ consumer = MemoryConsumer() producer = Producer(consumer, [b"abc", b"def", b"ghi"]) consumer.registerProducer(producer, False) self.assertThat(consumer.chunks, Equals([b"abc", b"def", b"ghi"])) self.assertTrue(consumer.done) # download_to_data() is effectively tested by some of the filenode tests, e.g. # test_immutable.py. tahoe_lafs-1.20.0/src/allmydata/test/test_crawler.py0000644000000000000000000004126113615410400017411 0ustar00""" Tests for allmydata.storage.crawler. Ported to Python 3. """ import time import os.path from twisted.trial import unittest from twisted.application import service from twisted.internet import defer from foolscap.api import eventually, fireEventually from allmydata.util import fileutil, hashutil, pollmixin from allmydata.storage.server import StorageServer, si_b2a from allmydata.storage.crawler import ShareCrawler, TimeSliceExceeded from allmydata.test.common_util import StallMixin class BucketEnumeratingCrawler(ShareCrawler): cpu_slice = 500 # make sure it can complete in a single slice slow_start = 0 def __init__(self, *args, **kwargs): ShareCrawler.__init__(self, *args, **kwargs) self.all_buckets = [] self.finished_d = defer.Deferred() def process_bucket(self, cycle, prefix, prefixdir, storage_index_b32): # Bucket _inputs_ are bytes, and that's what we will compare this # to: storage_index_b32 = storage_index_b32.encode("ascii") self.all_buckets.append(storage_index_b32) def finished_cycle(self, cycle): eventually(self.finished_d.callback, None) class PacedCrawler(ShareCrawler): cpu_slice = 500 # make sure it can complete in a single slice slow_start = 0 def __init__(self, *args, **kwargs): ShareCrawler.__init__(self, *args, **kwargs) self.countdown = 6 self.all_buckets = [] self.finished_d = defer.Deferred() self.yield_cb = None def process_bucket(self, cycle, prefix, prefixdir, storage_index_b32): # Bucket _inputs_ are bytes, and that's what we will compare this # to: storage_index_b32 = storage_index_b32.encode("ascii") self.all_buckets.append(storage_index_b32) self.countdown -= 1 if self.countdown == 0: # force a timeout. We restore it in yielding() self.cpu_slice = -1.0 def yielding(self, sleep_time): self.cpu_slice = 500 if self.yield_cb: self.yield_cb() def finished_cycle(self, cycle): eventually(self.finished_d.callback, None) class ConsumingCrawler(ShareCrawler): cpu_slice = 0.5 allowed_cpu_percentage = 0.5 minimum_cycle_time = 0 slow_start = 0 def __init__(self, *args, **kwargs): ShareCrawler.__init__(self, *args, **kwargs) self.accumulated = 0.0 self.cycles = 0 self.last_yield = 0.0 def process_bucket(self, cycle, prefix, prefixdir, storage_index_b32): start = time.time() time.sleep(0.05) elapsed = time.time() - start self.accumulated += elapsed self.last_yield += elapsed def finished_cycle(self, cycle): self.cycles += 1 def yielding(self, sleep_time): self.last_yield = 0.0 class OneShotCrawler(ShareCrawler): cpu_slice = 500 # make sure it can complete in a single slice slow_start = 0 def __init__(self, *args, **kwargs): ShareCrawler.__init__(self, *args, **kwargs) self.counter = 0 self.finished_d = defer.Deferred() def process_bucket(self, cycle, prefix, prefixdir, storage_index_b32): self.counter += 1 def finished_cycle(self, cycle): self.finished_d.callback(None) self.disownServiceParent() class Basic(unittest.TestCase, StallMixin, pollmixin.PollMixin): def setUp(self): self.s = service.MultiService() self.s.startService() def tearDown(self): return self.s.stopService() def si(self, i): return hashutil.storage_index_hash(b"%d" % (i,)) def rs(self, i, serverid): return hashutil.bucket_renewal_secret_hash(b"%d" % (i,), serverid) def cs(self, i, serverid): return hashutil.bucket_cancel_secret_hash(b"%d" % (i,), serverid) def write(self, i, ss, serverid, tail=0): si = self.si(i) si = si[:-1] + bytes(bytearray((tail,))) had,made = ss.allocate_buckets(si, self.rs(i, serverid), self.cs(i, serverid), set([0]), 99) made[0].write(0, b"data") made[0].close() return si_b2a(si) def test_immediate(self): self.basedir = "crawler/Basic/immediate" fileutil.make_dirs(self.basedir) serverid = b"\x00" * 20 ss = StorageServer(self.basedir, serverid) ss.setServiceParent(self.s) sis = [self.write(i, ss, serverid) for i in range(10)] statefile = os.path.join(self.basedir, "statefile") c = BucketEnumeratingCrawler(ss, statefile, allowed_cpu_percentage=.1) c.load_state() c.start_current_prefix(time.time()) self.failUnlessEqual(sorted(sis), sorted(c.all_buckets)) # make sure the statefile has been returned to the starting point c.finished_d = defer.Deferred() c.all_buckets = [] c.start_current_prefix(time.time()) self.failUnlessEqual(sorted(sis), sorted(c.all_buckets)) # check that a new crawler picks up on the state file properly c2 = BucketEnumeratingCrawler(ss, statefile) c2.load_state() c2.start_current_prefix(time.time()) self.failUnlessEqual(sorted(sis), sorted(c2.all_buckets)) def test_service(self): self.basedir = "crawler/Basic/service" fileutil.make_dirs(self.basedir) serverid = b"\x00" * 20 ss = StorageServer(self.basedir, serverid) ss.setServiceParent(self.s) sis = [self.write(i, ss, serverid) for i in range(10)] statefile = os.path.join(self.basedir, "statefile") c = BucketEnumeratingCrawler(ss, statefile) c.setServiceParent(self.s) # it should be legal to call get_state() and get_progress() right # away, even before the first tick is performed. No work should have # been done yet. s = c.get_state() p = c.get_progress() self.failUnlessEqual(s["last-complete-prefix"], None) self.failUnlessEqual(s["current-cycle"], None) self.failUnlessEqual(p["cycle-in-progress"], False) d = c.finished_d def _check(ignored): self.failUnlessEqual(sorted(sis), sorted(c.all_buckets)) d.addCallback(_check) return d def test_paced(self): self.basedir = "crawler/Basic/paced" fileutil.make_dirs(self.basedir) serverid = b"\x00" * 20 ss = StorageServer(self.basedir, serverid) ss.setServiceParent(self.s) # put four buckets in each prefixdir sis = [] for i in range(10): for tail in range(4): sis.append(self.write(i, ss, serverid, tail)) statefile = os.path.join(self.basedir, "statefile") c = PacedCrawler(ss, statefile) c.load_state() try: c.start_current_prefix(time.time()) except TimeSliceExceeded: pass # that should stop in the middle of one of the buckets. Since we # aren't using its normal scheduler, we have to save its state # manually. c.save_state() c.cpu_slice = PacedCrawler.cpu_slice self.failUnlessEqual(len(c.all_buckets), 6) c.start_current_prefix(time.time()) # finish it self.failUnlessEqual(len(sis), len(c.all_buckets)) self.failUnlessEqual(sorted(sis), sorted(c.all_buckets)) # make sure the statefile has been returned to the starting point c.finished_d = defer.Deferred() c.all_buckets = [] c.start_current_prefix(time.time()) self.failUnlessEqual(sorted(sis), sorted(c.all_buckets)) del c # start a new crawler, it should start from the beginning c = PacedCrawler(ss, statefile) c.load_state() try: c.start_current_prefix(time.time()) except TimeSliceExceeded: pass # that should stop in the middle of one of the buckets. Since we # aren't using its normal scheduler, we have to save its state # manually. c.save_state() c.cpu_slice = PacedCrawler.cpu_slice # a third crawler should pick up from where it left off c2 = PacedCrawler(ss, statefile) c2.all_buckets = c.all_buckets[:] c2.load_state() c2.countdown = -1 c2.start_current_prefix(time.time()) self.failUnlessEqual(len(sis), len(c2.all_buckets)) self.failUnlessEqual(sorted(sis), sorted(c2.all_buckets)) del c, c2 # now stop it at the end of a bucket (countdown=4), to exercise a # different place that checks the time c = PacedCrawler(ss, statefile) c.load_state() c.countdown = 4 try: c.start_current_prefix(time.time()) except TimeSliceExceeded: pass # that should stop at the end of one of the buckets. Again we must # save state manually. c.save_state() c.cpu_slice = PacedCrawler.cpu_slice self.failUnlessEqual(len(c.all_buckets), 4) c.start_current_prefix(time.time()) # finish it self.failUnlessEqual(len(sis), len(c.all_buckets)) self.failUnlessEqual(sorted(sis), sorted(c.all_buckets)) del c # stop it again at the end of the bucket, check that a new checker # picks up correctly c = PacedCrawler(ss, statefile) c.load_state() c.countdown = 4 try: c.start_current_prefix(time.time()) except TimeSliceExceeded: pass # that should stop at the end of one of the buckets. c.save_state() c2 = PacedCrawler(ss, statefile) c2.all_buckets = c.all_buckets[:] c2.load_state() c2.countdown = -1 c2.start_current_prefix(time.time()) self.failUnlessEqual(len(sis), len(c2.all_buckets)) self.failUnlessEqual(sorted(sis), sorted(c2.all_buckets)) del c, c2 def test_paced_service(self): self.basedir = "crawler/Basic/paced_service" fileutil.make_dirs(self.basedir) serverid = b"\x00" * 20 ss = StorageServer(self.basedir, serverid) ss.setServiceParent(self.s) sis = [self.write(i, ss, serverid) for i in range(10)] statefile = os.path.join(self.basedir, "statefile") c = PacedCrawler(ss, statefile) did_check_progress = [False] def check_progress(): c.yield_cb = None try: p = c.get_progress() self.failUnlessEqual(p["cycle-in-progress"], True) pct = p["cycle-complete-percentage"] # after 6 buckets, we happen to be at 76.17% complete. As # long as we create shares in deterministic order, this will # continue to be true. self.failUnlessEqual(int(pct), 76) left = p["remaining-sleep-time"] self.failUnless(isinstance(left, float), left) self.failUnless(left > 0.0, left) except Exception as e: did_check_progress[0] = e else: did_check_progress[0] = True c.yield_cb = check_progress c.setServiceParent(self.s) # that should get through 6 buckets, pause for a little while (and # run check_progress()), then resume d = c.finished_d def _check(ignored): if did_check_progress[0] is not True: raise did_check_progress[0] self.failUnless(did_check_progress[0]) self.failUnlessEqual(sorted(sis), sorted(c.all_buckets)) # at this point, the crawler should be sitting in the inter-cycle # timer, which should be pegged at the minumum cycle time self.failUnless(c.timer) self.failUnless(c.sleeping_between_cycles) self.failUnlessEqual(c.current_sleep_time, c.minimum_cycle_time) p = c.get_progress() self.failUnlessEqual(p["cycle-in-progress"], False) naptime = p["remaining-wait-time"] self.failUnless(isinstance(naptime, float), naptime) # min-cycle-time is 300, so this is basically testing that it took # less than 290s to crawl self.failUnless(naptime > 10.0, naptime) soon = p["next-crawl-time"] - time.time() self.failUnless(soon > 10.0, soon) d.addCallback(_check) return d def OFF_test_cpu_usage(self): # this test can't actually assert anything, because too many # buildslave machines are slow. But on a fast developer machine, it # can produce interesting results. So if you care about how well the # Crawler is accomplishing it's run-slowly goals, re-enable this test # and read the stdout when it runs. self.basedir = "crawler/Basic/cpu_usage" fileutil.make_dirs(self.basedir) serverid = b"\x00" * 20 ss = StorageServer(self.basedir, serverid) ss.setServiceParent(self.s) for i in range(10): self.write(i, ss, serverid) statefile = os.path.join(self.basedir, "statefile") c = ConsumingCrawler(ss, statefile) c.setServiceParent(self.s) # this will run as fast as it can, consuming about 50ms per call to # process_bucket(), limited by the Crawler to about 50% cpu. We let # it run for a few seconds, then compare how much time # process_bucket() got vs wallclock time. It should get between 10% # and 70% CPU. This is dicey, there's about 100ms of overhead per # 300ms slice (saving the state file takes about 150-200us, but we do # it 1024 times per cycle, one for each [empty] prefixdir), leaving # 200ms for actual processing, which is enough to get through 4 # buckets each slice, then the crawler sleeps for 300ms/0.5 = 600ms, # giving us 900ms wallclock per slice. In 4.0 seconds we can do 4.4 # slices, giving us about 17 shares, so we merely assert that we've # finished at least one cycle in that time. # with a short cpu_slice (so we can keep this test down to 4 # seconds), the overhead is enough to make a nominal 50% usage more # like 30%. Forcing sleep_time to 0 only gets us 67% usage. start = time.time() d = self.stall(delay=4.0) def _done(res): elapsed = time.time() - start percent = 100.0 * c.accumulated / elapsed # our buildslaves vary too much in their speeds and load levels, # and many of them only manage to hit 7% usage when our target is # 50%. So don't assert anything about the results, just log them. print() print("crawler: got %d%% percent when trying for 50%%" % percent) print("crawler: got %d full cycles" % c.cycles) d.addCallback(_done) return d def test_empty_subclass(self): self.basedir = "crawler/Basic/empty_subclass" fileutil.make_dirs(self.basedir) serverid = b"\x00" * 20 ss = StorageServer(self.basedir, serverid) ss.setServiceParent(self.s) for i in range(10): self.write(i, ss, serverid) statefile = os.path.join(self.basedir, "statefile") c = ShareCrawler(ss, statefile) c.slow_start = 0 c.setServiceParent(self.s) # we just let it run for a while, to get figleaf coverage of the # empty methods in the base class def _check(): return bool(c.state["last-cycle-finished"] is not None) d = self.poll(_check) def _done(ignored): state = c.get_state() self.failUnless(state["last-cycle-finished"] is not None) d.addCallback(_done) return d def test_oneshot(self): self.basedir = "crawler/Basic/oneshot" fileutil.make_dirs(self.basedir) serverid = b"\x00" * 20 ss = StorageServer(self.basedir, serverid) ss.setServiceParent(self.s) for i in range(30): self.write(i, ss, serverid) statefile = os.path.join(self.basedir, "statefile") c = OneShotCrawler(ss, statefile) c.setServiceParent(self.s) d = c.finished_d def _finished_first_cycle(ignored): return fireEventually(c.counter) d.addCallback(_finished_first_cycle) def _check(old_counter): # the crawler should do any work after it's been stopped self.failUnlessEqual(old_counter, c.counter) self.failIf(c.running) self.failIf(c.timer) self.failIf(c.current_sleep_time) s = c.get_state() self.failUnlessEqual(s["last-cycle-finished"], 0) self.failUnlessEqual(s["current-cycle"], None) d.addCallback(_check) return d tahoe_lafs-1.20.0/src/allmydata/test/test_crypto.py0000644000000000000000000004340013615410400017267 0ustar00import unittest from base64 import b64decode from binascii import a2b_hex, b2a_hex from twisted.python.filepath import FilePath from allmydata.crypto import ( aes, ed25519, rsa, ) from allmydata.crypto.util import remove_prefix from allmydata.crypto.error import BadPrefixError RESOURCE_DIR = FilePath(__file__).parent().child('data') class TestRegression(unittest.TestCase): ''' These tests are regression tests to ensure that the upgrade from `pycryptopp` to `cryptography` doesn't break anything. They check that data encrypted with old keys can be decrypted with new keys. ''' AES_KEY = b'My\x9c\xc0f\xd3\x03\x9a1\x8f\xbd\x17W_\x1f2' IV = b'\x96\x1c\xa0\xbcUj\x89\xc1\x85J\x1f\xeb=\x17\x04\xca' with RESOURCE_DIR.child('pycryptopp-rsa-2048-priv.txt').open('r') as f: # Created using `pycryptopp`: # # from base64 import b64encode # from pycryptopp.publickey import rsa # priv = rsa.generate(2048) # priv_str = b64encode(priv.serialize()) # pub_str = b64encode(priv.get_verifying_key().serialize()) RSA_2048_PRIV_KEY = b64decode(f.read().strip()) assert isinstance(RSA_2048_PRIV_KEY, bytes) with RESOURCE_DIR.child('pycryptopp-rsa-2048-sig.txt').open('r') as f: # Signature created using `RSA_2048_PRIV_KEY` via: # # sig = priv.sign(b'test') RSA_2048_SIG = b64decode(f.read().strip()) with RESOURCE_DIR.child('pycryptopp-rsa-2048-pub.txt').open('r') as f: # The public key corresponding to `RSA_2048_PRIV_KEY`. RSA_2048_PUB_KEY = b64decode(f.read().strip()) with RESOURCE_DIR.child('pycryptopp-rsa-1024-priv.txt').open('r') as f: # Created using `pycryptopp`: # # from base64 import b64encode # from pycryptopp.publickey import rsa # priv = rsa.generate(1024) # priv_str = b64encode(priv.serialize()) # pub_str = b64encode(priv.get_verifying_key().serialize()) RSA_TINY_PRIV_KEY = b64decode(f.read().strip()) assert isinstance(RSA_TINY_PRIV_KEY, bytes) with RESOURCE_DIR.child('pycryptopp-rsa-32768-priv.txt').open('r') as f: # Created using `pycryptopp`: # # from base64 import b64encode # from pycryptopp.publickey import rsa # priv = rsa.generate(32768) # priv_str = b64encode(priv.serialize()) # pub_str = b64encode(priv.get_verifying_key().serialize()) RSA_HUGE_PRIV_KEY = b64decode(f.read().strip()) assert isinstance(RSA_HUGE_PRIV_KEY, bytes) def test_old_start_up_test(self): """ This was the old startup test run at import time in `pycryptopp.cipher.aes`. """ enc0 = b"dc95c078a2408989ad48a21492842087530f8afbc74536b9a963b4f1c4cb738b" cryptor = aes.create_decryptor(key=b"\x00" * 32) ct = aes.decrypt_data(cryptor, b"\x00" * 32) self.assertEqual(enc0, b2a_hex(ct)) cryptor = aes.create_decryptor(key=b"\x00" * 32) ct1 = aes.decrypt_data(cryptor, b"\x00" * 15) ct2 = aes.decrypt_data(cryptor, b"\x00" * 17) self.assertEqual(enc0, b2a_hex(ct1+ct2)) enc0 = b"66e94bd4ef8a2c3b884cfa59ca342b2e" cryptor = aes.create_decryptor(key=b"\x00" * 16) ct = aes.decrypt_data(cryptor, b"\x00" * 16) self.assertEqual(enc0, b2a_hex(ct)) cryptor = aes.create_decryptor(key=b"\x00" * 16) ct1 = aes.decrypt_data(cryptor, b"\x00" * 8) ct2 = aes.decrypt_data(cryptor, b"\x00" * 8) self.assertEqual(enc0, b2a_hex(ct1+ct2)) def _test_from_Niels_AES(keysize, result): def fake_ecb_using_ctr(k, p): encryptor = aes.create_encryptor(key=k, iv=p) return aes.encrypt_data(encryptor, b'\x00' * 16) E = fake_ecb_using_ctr b = 16 k = keysize S = b'\x00' * (k + b) for i in range(1000): K = S[-k:] P = S[-k-b:-k] S += E(K, E(K, P)) self.assertEqual(S[-b:], a2b_hex(result)) _test_from_Niels_AES(16, b'bd883f01035e58f42f9d812f2dacbcd8') _test_from_Niels_AES(32, b'c84b0f3a2c76dd9871900b07f09bdd3e') def test_aes_no_iv_process_short_input(self): ''' The old code used the following patterns with AES ciphers. import os from pycryptopp.cipher.aes import AES key = = os.urandom(16) ciphertext = AES(key).process(plaintext) This test verifies that using the new AES wrapper generates the same output. ''' plaintext = b'test' expected_ciphertext = b'\x7fEK\\' k = aes.create_decryptor(self.AES_KEY) ciphertext = aes.decrypt_data(k, plaintext) self.assertEqual(ciphertext, expected_ciphertext) def test_aes_no_iv_process_long_input(self): ''' The old code used the following patterns with AES ciphers. import os from pycryptopp.cipher.aes import AES key = = os.urandom(16) ciphertext = AES(key).process(plaintext) This test verifies that using the new AES wrapper generates the same output. ''' plaintext = b'hi' * 32 expected_ciphertext = ( b'cIPAY%o:\xce\xfex\x8e@^.\x90\xb1\x80a\xff\xd8^\xac\x8d\xa7/\x1d\xe6\x92\xa1\x04\x92' b'\x1f\xa1|\xd2$E\xb5\xe7\x9d\xae\xd1\x1f)\xe4\xc7\x83\xb8\xd5|dHhU\xc8\x9a\xb1\x10\xed' b'\xd1\xe7|\xd1') k = aes.create_decryptor(self.AES_KEY) ciphertext = aes.decrypt_data(k, plaintext) self.assertEqual(ciphertext, expected_ciphertext) def test_aes_with_iv_process_short_input(self): ''' The old code used the following patterns with AES ciphers. import os from pycryptopp.cipher.aes import AES key = = os.urandom(16) ciphertext = AES(key).process(plaintext) This test verifies that using the new AES wrapper generates the same output. ''' plaintext = b'test' expected_ciphertext = b'\x82\x0e\rt' k = aes.create_decryptor(self.AES_KEY, iv=self.IV) ciphertext = aes.decrypt_data(k, plaintext) self.assertEqual(ciphertext, expected_ciphertext) def test_aes_with_iv_process_long_input(self): ''' The old code used the following patterns with AES ciphers. import os from pycryptopp.cipher.aes import AES key = = os.urandom(16) ciphertext = AES(key).process(plaintext) This test verifies that using the new AES wrapper generates the same output. ''' plaintext = b'hi' * 32 expected_ciphertext = ( b'\x9e\x02\x16i}WL\xbf\x83\xac\xb4K\xf7\xa0\xdf\xa3\xba!3\x15\xd3(L\xb7\xb3\x91\xbcb' b'\x97a\xdc\x100?\xf5L\x9f\xd9\xeeO\x98\xda\xf5g\x93\xa7q\xe1\xb1~\xf8\x1b\xe8[\\s' b'\x144$\x86\xeaC^f') k = aes.create_decryptor(self.AES_KEY, iv=self.IV) ciphertext = aes.decrypt_data(k, plaintext) self.assertEqual(ciphertext, expected_ciphertext) def test_decode_ed15519_keypair(self): ''' Created using the old code: from allmydata.util.keyutil import make_keypair, parse_privkey, parse_pubkey test_data = b'test' priv_str, pub_str = make_keypair() priv, _ = parse_privkey(priv_str) pub = parse_pubkey(pub_str) sig = priv.sign(test_data) pub.verify(sig, test_data) This simply checks that keys and signatures generated using the old code are still valid using the new code. ''' priv_str = b'priv-v0-lqcj746bqa4npkb6zpyc6esd74x3bl6mbcjgqend7cvtgmcpawhq' pub_str = b'pub-v0-yzpqin3of3ep363lwzxwpvgai3ps43dao46k2jds5kw5ohhpcwhq' test_data = b'test' sig = (b'\xde\x0e\xd6\xe2\xf5\x03]8\xfe\xa71\xad\xb4g\x03\x11\x81\x8b\x08\xffz\xf4K\xa0' b'\x86 ier!\xe8\xe5#*\x9d\x8c\x0bI\x02\xd90\x0e7\xbeW\xbf\xa3\xfe\xc1\x1c\xf5+\xe9)' b'\xa3\xde\xc9\xc6s\xc9\x90\xf7x\x08') private_key, derived_public_key = ed25519.signing_keypair_from_string(priv_str) public_key = ed25519.verifying_key_from_string(pub_str) self.assertEqual( ed25519.string_from_verifying_key(public_key), ed25519.string_from_verifying_key(derived_public_key), ) new_sig = ed25519.sign_data(private_key, test_data) self.assertEqual(new_sig, sig) ed25519.verify_signature(public_key, new_sig, test_data) ed25519.verify_signature(derived_public_key, new_sig, test_data) ed25519.verify_signature(public_key, sig, test_data) ed25519.verify_signature(derived_public_key, sig, test_data) def test_decode_rsa_keypair(self): ''' This simply checks that keys and signatures generated using the old code are still valid using the new code. ''' priv_key, pub_key = rsa.create_signing_keypair_from_string(self.RSA_2048_PRIV_KEY) rsa.verify_signature(pub_key, self.RSA_2048_SIG, b'test') def test_decode_tiny_rsa_keypair(self): ''' An unreasonably small RSA key is rejected ("unreasonably small" means less that 2048 bits) ''' with self.assertRaises(ValueError): rsa.create_signing_keypair_from_string(self.RSA_TINY_PRIV_KEY) def test_decode_huge_rsa_keypair(self): ''' An unreasonably _large_ RSA key is rejected ("unreasonably large" means 32768 or more bits) ''' with self.assertRaises(ValueError): rsa.create_signing_keypair_from_string(self.RSA_HUGE_PRIV_KEY) def test_encrypt_data_not_bytes(self): ''' only bytes can be encrypted ''' key = b'\x00' * 16 encryptor = aes.create_encryptor(key) with self.assertRaises(ValueError) as ctx: aes.encrypt_data(encryptor, u"not bytes") self.assertIn( "must be bytes", str(ctx.exception) ) def test_key_incorrect_size(self): ''' keys that aren't 16 or 32 bytes are rejected ''' key = b'\x00' * 12 with self.assertRaises(ValueError) as ctx: aes.create_encryptor(key) self.assertIn( "16 or 32 bytes long", str(ctx.exception) ) def test_iv_not_bytes(self): ''' iv must be bytes ''' key = b'\x00' * 16 with self.assertRaises(TypeError) as ctx: aes.create_encryptor(key, iv=u"1234567890abcdef") self.assertIn( "must be bytes", str(ctx.exception) ) def test_incorrect_iv_size(self): ''' iv must be 16 bytes ''' key = b'\x00' * 16 with self.assertRaises(ValueError) as ctx: aes.create_encryptor(key, iv=b'\x00' * 3) self.assertIn( "16 bytes long", str(ctx.exception) ) class TestEd25519(unittest.TestCase): """ Test allmydata.crypto.ed25519 """ def test_key_serialization(self): """ a serialized+deserialized keypair is the same as the original """ private_key, public_key = ed25519.create_signing_keypair() private_key_str = ed25519.string_from_signing_key(private_key) self.assertIsInstance(private_key_str, bytes) private_key2, public_key2 = ed25519.signing_keypair_from_string(private_key_str) # the deserialized signing keys are the same as the original self.assertEqual( ed25519.string_from_signing_key(private_key), ed25519.string_from_signing_key(private_key2), ) self.assertEqual( ed25519.string_from_verifying_key(public_key), ed25519.string_from_verifying_key(public_key2), ) # ditto, but for the verifying keys public_key_str = ed25519.string_from_verifying_key(public_key) self.assertIsInstance(public_key_str, bytes) public_key2 = ed25519.verifying_key_from_string(public_key_str) self.assertEqual( ed25519.string_from_verifying_key(public_key), ed25519.string_from_verifying_key(public_key2), ) def test_deserialize_private_not_bytes(self): ''' serialized key must be bytes ''' with self.assertRaises(ValueError) as ctx: ed25519.signing_keypair_from_string(u"not bytes") self.assertIn( "must be bytes", str(ctx.exception) ) def test_deserialize_public_not_bytes(self): ''' serialized key must be bytes ''' with self.assertRaises(ValueError) as ctx: ed25519.verifying_key_from_string(u"not bytes") self.assertIn( "must be bytes", str(ctx.exception) ) def test_signed_data_not_bytes(self): ''' data to sign must be bytes ''' priv, pub = ed25519.create_signing_keypair() with self.assertRaises(ValueError) as ctx: ed25519.sign_data(priv, u"not bytes") self.assertIn( "must be bytes", str(ctx.exception) ) def test_signature_not_bytes(self): ''' signature must be bytes ''' priv, pub = ed25519.create_signing_keypair() with self.assertRaises(ValueError) as ctx: ed25519.verify_signature(pub, u"not bytes", b"data") self.assertIn( "must be bytes", str(ctx.exception) ) def test_signature_data_not_bytes(self): ''' signed data must be bytes ''' priv, pub = ed25519.create_signing_keypair() with self.assertRaises(ValueError) as ctx: ed25519.verify_signature(pub, b"signature", u"not bytes") self.assertIn( "must be bytes", str(ctx.exception) ) def test_sign_invalid_pubkey(self): ''' pubkey must be correct kind of object ''' priv, pub = ed25519.create_signing_keypair() with self.assertRaises(ValueError) as ctx: ed25519.sign_data(object(), b"data") self.assertIn( "must be an Ed25519PrivateKey", str(ctx.exception) ) def test_verify_invalid_pubkey(self): ''' pubkey must be correct kind of object ''' priv, pub = ed25519.create_signing_keypair() with self.assertRaises(ValueError) as ctx: ed25519.verify_signature(object(), b"signature", b"data") self.assertIn( "must be an Ed25519PublicKey", str(ctx.exception) ) class TestRsa(unittest.TestCase): """ Tests related to allmydata.crypto.rsa module """ def test_keys(self): """ test that two instances of 'the same' key sign and verify data in the same way """ priv_key, pub_key = rsa.create_signing_keypair(2048) priv_key_str = rsa.der_string_from_signing_key(priv_key) self.assertIsInstance(priv_key_str, bytes) priv_key2, pub_key2 = rsa.create_signing_keypair_from_string(priv_key_str) # instead of asking "are these two keys equal", we can instead # test their function: can the second key verify a signature # produced by the first (and FAIL a signature with different # data) data_to_sign = b"test data" sig0 = rsa.sign_data(priv_key, data_to_sign) rsa.verify_signature(pub_key2, sig0, data_to_sign) # ..and the other way sig1 = rsa.sign_data(priv_key2, data_to_sign) rsa.verify_signature(pub_key, sig1, data_to_sign) # ..and a failed way with self.assertRaises(rsa.BadSignature): rsa.verify_signature(pub_key, sig1, data_to_sign + b"more") def test_sign_invalid_pubkey(self): ''' signing data using an invalid key-object fails ''' priv, pub = rsa.create_signing_keypair(1024) with self.assertRaises(ValueError) as ctx: rsa.sign_data(object(), b"data") self.assertIn( "must be an RSAPrivateKey", str(ctx.exception) ) def test_verify_invalid_pubkey(self): ''' verifying a signature using an invalid key-object fails ''' priv, pub = rsa.create_signing_keypair(1024) with self.assertRaises(ValueError) as ctx: rsa.verify_signature(object(), b"signature", b"data") self.assertIn( "must be an RSAPublicKey", str(ctx.exception) ) class TestUtil(unittest.TestCase): """ tests related to allmydata.crypto utils """ def test_remove_prefix_good(self): """ remove a simple prefix properly """ self.assertEqual( remove_prefix(b"foobar", b"foo"), b"bar" ) def test_remove_prefix_bad(self): """ attempt to remove a prefix that doesn't exist fails with exception """ with self.assertRaises(BadPrefixError): remove_prefix(b"foobar", b"bar") def test_remove_prefix_zero(self): """ removing a zero-length prefix does nothing """ self.assertEqual( remove_prefix(b"foobar", b""), b"foobar", ) def test_remove_prefix_entire_string(self): """ removing a prefix which is the whole string is empty """ self.assertEqual( remove_prefix(b"foobar", b"foobar"), b"", ) def test_remove_prefix_partial(self): """ removing a prefix with only partial match fails with exception """ with self.assertRaises(BadPrefixError): remove_prefix(b"foobar", b"fooz"), tahoe_lafs-1.20.0/src/allmydata/test/test_deepcheck.py0000644000000000000000000016263213615410400017673 0ustar00""" Ported to Python 3. """ from six import ensure_text import os, json from urllib.parse import quote as url_quote from twisted.trial import unittest from twisted.internet import defer from twisted.internet.defer import inlineCallbacks, returnValue from allmydata.immutable import upload from allmydata.mutable.common import UnrecoverableFileError from allmydata.mutable.publish import MutableData from allmydata.util import idlib from allmydata.util import base32 from allmydata.interfaces import ICheckResults, ICheckAndRepairResults, \ IDeepCheckResults, IDeepCheckAndRepairResults from allmydata.monitor import Monitor, OperationCancelledError from allmydata.uri import LiteralFileURI from allmydata.test.common import ErrorMixin, _corrupt_mutable_share_data, \ ShouldFailMixin from .common_util import StallMixin, run_cli_unicode from .common_web import do_http from allmydata.test.no_network import GridTestMixin from .cli.common import CLITestMixin def run_cli(verb, *argv): """Match usage in existing tests by accept *args.""" return run_cli_unicode(verb, list(argv)) class MutableChecker(GridTestMixin, unittest.TestCase, ErrorMixin): def test_good(self): self.basedir = "deepcheck/MutableChecker/good" self.set_up_grid() CONTENTS = b"a little bit of data" CONTENTS_uploadable = MutableData(CONTENTS) d = self.g.clients[0].create_mutable_file(CONTENTS_uploadable) def _created(node): self.node = node self.fileurl = "uri/" + url_quote(node.get_uri()) d.addCallback(_created) # now make sure the webapi verifier sees no problems d.addCallback(lambda ign: self.GET(self.fileurl+"?t=check&verify=true", method="POST")) def _got_results(out): self.failUnless(b"Healthy : Healthy" in out, out) self.failUnless(b"Recoverable Versions: 10*seq1-" in out, out) self.failIf(b"Not Healthy!" in out, out) self.failIf(b"Unhealthy" in out, out) self.failIf(b"Corrupt Shares" in out, out) d.addCallback(_got_results) d.addErrback(self.explain_web_error) return d def test_corrupt(self): self.basedir = "deepcheck/MutableChecker/corrupt" self.set_up_grid() CONTENTS = b"a little bit of data" CONTENTS_uploadable = MutableData(CONTENTS) d = self.g.clients[0].create_mutable_file(CONTENTS_uploadable) def _stash_and_corrupt(node): self.node = node self.fileurl = "uri/" + url_quote(node.get_uri()) self.corrupt_shares_numbered(node.get_uri(), [0], _corrupt_mutable_share_data) d.addCallback(_stash_and_corrupt) # now make sure the webapi verifier notices it d.addCallback(lambda ign: self.GET(self.fileurl+"?t=check&verify=true", method="POST")) def _got_results(out): self.failUnless(b"Not Healthy!" in out, out) self.failUnless(b"Unhealthy: best version has only 9 shares (encoding is 3-of-10)" in out, out) self.failUnless(b"Corrupt Shares:" in out, out) d.addCallback(_got_results) # now make sure the webapi repairer can fix it d.addCallback(lambda ign: self.GET(self.fileurl+"?t=check&verify=true&repair=true", method="POST")) def _got_repair_results(out): self.failUnless(b"
Repair successful
" in out, out) d.addCallback(_got_repair_results) d.addCallback(lambda ign: self.GET(self.fileurl+"?t=check&verify=true", method="POST")) def _got_postrepair_results(out): self.failIf(b"Not Healthy!" in out, out) self.failUnless(b"Recoverable Versions: 10*seq" in out, out) d.addCallback(_got_postrepair_results) d.addErrback(self.explain_web_error) return d def test_delete_share(self): self.basedir = "deepcheck/MutableChecker/delete_share" self.set_up_grid() CONTENTS = b"a little bit of data" CONTENTS_uploadable = MutableData(CONTENTS) d = self.g.clients[0].create_mutable_file(CONTENTS_uploadable) def _stash_and_delete(node): self.node = node self.fileurl = "uri/" + url_quote(node.get_uri()) self.delete_shares_numbered(node.get_uri(), [0]) d.addCallback(_stash_and_delete) # now make sure the webapi checker notices it d.addCallback(lambda ign: self.GET(self.fileurl+"?t=check&verify=false", method="POST")) def _got_results(out): self.failUnless(b"Not Healthy!" in out, out) self.failUnless(b"Unhealthy: best version has only 9 shares (encoding is 3-of-10)" in out, out) self.failIf(b"Corrupt Shares" in out, out) d.addCallback(_got_results) # now make sure the webapi repairer can fix it d.addCallback(lambda ign: self.GET(self.fileurl+"?t=check&verify=false&repair=true", method="POST")) def _got_repair_results(out): self.failUnless(b"Repair successful" in out) d.addCallback(_got_repair_results) d.addCallback(lambda ign: self.GET(self.fileurl+"?t=check&verify=false", method="POST")) def _got_postrepair_results(out): self.failIf(b"Not Healthy!" in out, out) self.failUnless(b"Recoverable Versions: 10*seq" in out) d.addCallback(_got_postrepair_results) d.addErrback(self.explain_web_error) return d class DeepCheckBase(GridTestMixin, ErrorMixin, StallMixin, ShouldFailMixin, CLITestMixin): def web_json(self, n, **kwargs): kwargs["output"] = "json" d = self.web(n, "POST", **kwargs) d.addCallback(self.decode_json) return d def decode_json(self, args): (s, url) = args try: data = json.loads(s) except ValueError: self.fail("%s: not JSON: '%s'" % (url, s)) return data def parse_streamed_json(self, s): s = ensure_text(s) for unit in s.split("\n"): if not unit: # stream should end with a newline, so split returns "" continue try: yield json.loads(unit) except ValueError as le: le.args = tuple(le.args + (unit,)) raise @inlineCallbacks def web(self, n, method="GET", **kwargs): # returns (data, url) url = (self.client_baseurls[0] + "uri/%s" % url_quote(n.get_uri()) + "?" + "&".join(["%s=%s" % (k,str(v, "ascii") if isinstance(v, bytes) else v) for (k,v) in kwargs.items()])) data = yield do_http(method, url, browser_like_redirects=True) returnValue((data,url)) @inlineCallbacks def wait_for_operation(self, ophandle): url = self.client_baseurls[0] + "operations/" + str(ophandle, "ascii") url += "?t=status&output=JSON" while True: body = yield do_http("get", url) data = json.loads(body) if data["finished"]: break yield self.stall(delay=0.1) returnValue(data) @inlineCallbacks def get_operation_results(self, ophandle, output=None): url = self.client_baseurls[0] + "operations/" + str(ophandle, "ascii") url += "?t=status" if output: url += "&output=" + output body = yield do_http("get", url) if output and output.lower() == "json": data = json.loads(body) else: data = body returnValue(data) @inlineCallbacks def slow_web(self, n, output=None, **kwargs): # use ophandle= handle = base32.b2a(os.urandom(4)) yield self.web(n, "POST", ophandle=handle, **kwargs) yield self.wait_for_operation(handle) data = yield self.get_operation_results(handle, output=output) returnValue(data) class DeepCheckWebGood(DeepCheckBase, unittest.TestCase): # construct a small directory tree (with one dir, one immutable file, one # mutable file, two LIT files, one DIR2:LIT empty dir, one DIR2:LIT tiny # dir, and a loop), and then check/examine it in various ways. def set_up_tree(self): # 2.9s c0 = self.g.clients[0] d = c0.create_dirnode() def _created_root(n): self.root = n self.root_uri = n.get_uri() d.addCallback(_created_root) d.addCallback(lambda ign: c0.create_mutable_file(MutableData(b"mutable file contents"))) d.addCallback(lambda n: self.root.set_node(u"mutable", n)) def _created_mutable(n): self.mutable = n self.mutable_uri = n.get_uri() d.addCallback(_created_mutable) large = upload.Data(b"Lots of data\n" * 1000, None) d.addCallback(lambda ign: self.root.add_file(u"large", large)) def _created_large(n): self.large = n self.large_uri = n.get_uri() d.addCallback(_created_large) small = upload.Data(b"Small enough for a LIT", None) d.addCallback(lambda ign: self.root.add_file(u"small", small)) def _created_small(n): self.small = n self.small_uri = n.get_uri() d.addCallback(_created_small) small2 = upload.Data(b"Small enough for a LIT too", None) d.addCallback(lambda ign: self.root.add_file(u"small2", small2)) def _created_small2(n): self.small2 = n self.small2_uri = n.get_uri() d.addCallback(_created_small2) empty_litdir_uri = b"URI:DIR2-LIT:" tiny_litdir_uri = b"URI:DIR2-LIT:gqytunj2onug64tufqzdcosvkjetutcjkq5gw4tvm5vwszdgnz5hgyzufqydulbshj5x2lbm" # contains one child which is itself also LIT d.addCallback(lambda ign: self.root._create_and_validate_node(None, empty_litdir_uri, name=u"test_deepcheck empty_lit_dir")) def _created_empty_lit_dir(n): self.empty_lit_dir = n self.empty_lit_dir_uri = n.get_uri() self.root.set_node(u"empty_lit_dir", n) d.addCallback(_created_empty_lit_dir) d.addCallback(lambda ign: self.root._create_and_validate_node(None, tiny_litdir_uri, name=u"test_deepcheck tiny_lit_dir")) def _created_tiny_lit_dir(n): self.tiny_lit_dir = n self.tiny_lit_dir_uri = n.get_uri() self.root.set_node(u"tiny_lit_dir", n) d.addCallback(_created_tiny_lit_dir) d.addCallback(lambda ign: self.root.set_node(u"loop", self.root)) return d def check_is_healthy(self, cr, n, where, incomplete=False): self.failUnless(ICheckResults.providedBy(cr), where) self.failUnless(cr.is_healthy(), where) self.failUnlessEqual(cr.get_storage_index(), n.get_storage_index(), where) self.failUnlessEqual(cr.get_storage_index_string(), base32.b2a(n.get_storage_index()), where) num_servers = len(self.g.all_servers) self.failUnlessEqual(num_servers, 10, where) self.failUnlessEqual(cr.get_happiness(), num_servers, where) self.failUnlessEqual(cr.get_share_counter_good(), num_servers, where) self.failUnlessEqual(cr.get_encoding_needed(), 3, where) self.failUnlessEqual(cr.get_encoding_expected(), num_servers, where) if not incomplete: self.failUnlessEqual(cr.get_host_counter_good_shares(), num_servers, where) self.failUnlessEqual(cr.get_corrupt_shares(), [], where) if not incomplete: self.failUnlessEqual(sorted([s.get_serverid() for s in cr.get_servers_responding()]), sorted(self.g.get_all_serverids()), where) all_serverids = set() for (shareid, servers) in list(cr.get_sharemap().items()): all_serverids.update([s.get_serverid() for s in servers]) self.failUnlessEqual(sorted(all_serverids), sorted(self.g.get_all_serverids()), where) self.failUnlessEqual(cr.get_share_counter_wrong(), 0, where) self.failUnlessEqual(cr.get_version_counter_recoverable(), 1, where) self.failUnlessEqual(cr.get_version_counter_unrecoverable(), 0, where) def check_and_repair_is_healthy(self, cr, n, where, incomplete=False): self.failUnless(ICheckAndRepairResults.providedBy(cr), (where, cr)) self.failUnless(cr.get_pre_repair_results().is_healthy(), where) self.check_is_healthy(cr.get_pre_repair_results(), n, where, incomplete) self.failUnless(cr.get_post_repair_results().is_healthy(), where) self.check_is_healthy(cr.get_post_repair_results(), n, where, incomplete) self.failIf(cr.get_repair_attempted(), where) def deep_check_is_healthy(self, cr, num_healthy, where): self.failUnless(IDeepCheckResults.providedBy(cr)) self.failUnlessEqual(cr.get_counters()["count-objects-healthy"], num_healthy, where) def deep_check_and_repair_is_healthy(self, cr, num_healthy, where): self.failUnless(IDeepCheckAndRepairResults.providedBy(cr), where) c = cr.get_counters() self.failUnlessEqual(c["count-objects-healthy-pre-repair"], num_healthy, where) self.failUnlessEqual(c["count-objects-healthy-post-repair"], num_healthy, where) self.failUnlessEqual(c["count-repairs-attempted"], 0, where) def test_good(self): self.basedir = "deepcheck/DeepCheckWebGood/good" self.set_up_grid() d = self.set_up_tree() d.addCallback(self.do_stats) d.addCallback(self.do_web_stream_manifest) d.addCallback(self.do_web_stream_check) d.addCallback(self.do_test_check_good) d.addCallback(self.do_test_web_good) d.addCallback(self.do_test_cli_good) d.addErrback(self.explain_web_error) d.addErrback(self.explain_error) return d def do_stats(self, ignored): d = defer.succeed(None) d.addCallback(lambda ign: self.root.start_deep_stats().when_done()) d.addCallback(self.check_stats_good) return d def check_stats_good(self, s): self.failUnlessEqual(s["count-directories"], 3) self.failUnlessEqual(s["count-files"], 5) self.failUnlessEqual(s["count-immutable-files"], 1) self.failUnlessEqual(s["count-literal-files"], 3) self.failUnlessEqual(s["count-mutable-files"], 1) # don't check directories: their size will vary # s["largest-directory"] # s["size-directories"] self.failUnlessEqual(s["largest-directory-children"], 7) self.failUnlessEqual(s["largest-immutable-file"], 13000) # to re-use this function for both the local # dirnode.start_deep_stats() and the webapi t=start-deep-stats, we # coerce the result into a list of tuples. dirnode.start_deep_stats() # returns a list of tuples, but JSON only knows about lists., so # t=start-deep-stats returns a list of lists. histogram = [tuple(stuff) for stuff in s["size-files-histogram"]] self.failUnlessEqual(histogram, [(4, 10, 1), (11, 31, 2), (10001, 31622, 1), ]) self.failUnlessEqual(s["size-immutable-files"], 13000) self.failUnlessEqual(s["size-literal-files"], 56) def do_web_stream_manifest(self, ignored): d = self.web(self.root, method="POST", t="stream-manifest") d.addCallback(lambda output_and_url: self._check_streamed_manifest(output_and_url[0])) return d def _check_streamed_manifest(self, output): units = list(self.parse_streamed_json(output)) files = [u for u in units if u["type"] in ("file", "directory")] assert units[-1]["type"] == "stats" stats = units[-1]["stats"] self.failUnlessEqual(len(files), 8) # [root,mutable,large] are distributed, [small,small2,empty_litdir,tiny_litdir] are not self.failUnlessEqual(len([f for f in files if f["verifycap"] != ""]), 3) self.failUnlessEqual(len([f for f in files if f["verifycap"] == ""]), 5) self.failUnlessEqual(len([f for f in files if f["repaircap"] != ""]), 3) self.failUnlessEqual(len([f for f in files if f["repaircap"] == ""]), 5) self.failUnlessEqual(len([f for f in files if f["storage-index"] != ""]), 3) self.failUnlessEqual(len([f for f in files if f["storage-index"] == ""]), 5) # make sure that a mutable file has filecap==repaircap!=verifycap mutable = [f for f in files if f["cap"] is not None and f["cap"].startswith("URI:SSK:")][0] self.failUnlessEqual(mutable["cap"].encode("ascii"), self.mutable_uri) self.failIfEqual(mutable["cap"], mutable["verifycap"]) self.failUnlessEqual(mutable["cap"], mutable["repaircap"]) # for immutable file, verifycap==repaircap!=filecap large = [f for f in files if f["cap"] is not None and f["cap"].startswith("URI:CHK:")][0] self.failUnlessEqual(large["cap"].encode("ascii"), self.large_uri) self.failIfEqual(large["cap"], large["verifycap"]) self.failUnlessEqual(large["verifycap"], large["repaircap"]) self.check_stats_good(stats) def do_web_stream_check(self, ignored): # TODO return d = self.web(self.root, t="stream-deep-check") def _check(res): units = list(self.parse_streamed_json(res)) #files = [u for u in units if u["type"] in ("file", "directory")] assert units[-1]["type"] == "stats" #stats = units[-1]["stats"] # ... d.addCallback(_check) return d def do_test_check_good(self, ignored): d = defer.succeed(None) # check the individual items d.addCallback(lambda ign: self.root.check(Monitor())) d.addCallback(self.check_is_healthy, self.root, "root") d.addCallback(lambda ign: self.mutable.check(Monitor())) d.addCallback(self.check_is_healthy, self.mutable, "mutable") d.addCallback(lambda ign: self.large.check(Monitor())) d.addCallback(self.check_is_healthy, self.large, "large") d.addCallback(lambda ign: self.small.check(Monitor())) d.addCallback(self.failUnlessEqual, None, "small") d.addCallback(lambda ign: self.small2.check(Monitor())) d.addCallback(self.failUnlessEqual, None, "small2") d.addCallback(lambda ign: self.empty_lit_dir.check(Monitor())) d.addCallback(self.failUnlessEqual, None, "empty_lit_dir") d.addCallback(lambda ign: self.tiny_lit_dir.check(Monitor())) d.addCallback(self.failUnlessEqual, None, "tiny_lit_dir") # and again with verify=True d.addCallback(lambda ign: self.root.check(Monitor(), verify=True)) d.addCallback(self.check_is_healthy, self.root, "root") d.addCallback(lambda ign: self.mutable.check(Monitor(), verify=True)) d.addCallback(self.check_is_healthy, self.mutable, "mutable") d.addCallback(lambda ign: self.large.check(Monitor(), verify=True)) d.addCallback(self.check_is_healthy, self.large, "large", incomplete=True) d.addCallback(lambda ign: self.small.check(Monitor(), verify=True)) d.addCallback(self.failUnlessEqual, None, "small") d.addCallback(lambda ign: self.small2.check(Monitor(), verify=True)) d.addCallback(self.failUnlessEqual, None, "small2") d.addCallback(lambda ign: self.empty_lit_dir.check(Monitor(), verify=True)) d.addCallback(self.failUnlessEqual, None, "empty_lit_dir") d.addCallback(lambda ign: self.tiny_lit_dir.check(Monitor(), verify=True)) d.addCallback(self.failUnlessEqual, None, "tiny_lit_dir") # and check_and_repair(), which should be a nop d.addCallback(lambda ign: self.root.check_and_repair(Monitor())) d.addCallback(self.check_and_repair_is_healthy, self.root, "root") d.addCallback(lambda ign: self.mutable.check_and_repair(Monitor())) d.addCallback(self.check_and_repair_is_healthy, self.mutable, "mutable") d.addCallback(lambda ign: self.large.check_and_repair(Monitor())) d.addCallback(self.check_and_repair_is_healthy, self.large, "large") d.addCallback(lambda ign: self.small.check_and_repair(Monitor())) d.addCallback(self.failUnlessEqual, None, "small") d.addCallback(lambda ign: self.small2.check_and_repair(Monitor())) d.addCallback(self.failUnlessEqual, None, "small2") d.addCallback(lambda ign: self.empty_lit_dir.check_and_repair(Monitor())) d.addCallback(self.failUnlessEqual, None, "empty_lit_dir") d.addCallback(lambda ign: self.tiny_lit_dir.check_and_repair(Monitor())) # check_and_repair(verify=True) d.addCallback(lambda ign: self.root.check_and_repair(Monitor(), verify=True)) d.addCallback(self.check_and_repair_is_healthy, self.root, "root") d.addCallback(lambda ign: self.mutable.check_and_repair(Monitor(), verify=True)) d.addCallback(self.check_and_repair_is_healthy, self.mutable, "mutable") d.addCallback(lambda ign: self.large.check_and_repair(Monitor(), verify=True)) d.addCallback(self.check_and_repair_is_healthy, self.large, "large", incomplete=True) d.addCallback(lambda ign: self.small.check_and_repair(Monitor(), verify=True)) d.addCallback(self.failUnlessEqual, None, "small") d.addCallback(lambda ign: self.small2.check_and_repair(Monitor(), verify=True)) d.addCallback(self.failUnlessEqual, None, "small2") d.addCallback(self.failUnlessEqual, None, "small2") d.addCallback(lambda ign: self.empty_lit_dir.check_and_repair(Monitor(), verify=True)) d.addCallback(self.failUnlessEqual, None, "empty_lit_dir") d.addCallback(lambda ign: self.tiny_lit_dir.check_and_repair(Monitor(), verify=True)) # now deep-check the root, with various verify= and repair= options d.addCallback(lambda ign: self.root.start_deep_check().when_done()) d.addCallback(self.deep_check_is_healthy, 3, "root") d.addCallback(lambda ign: self.root.start_deep_check(verify=True).when_done()) d.addCallback(self.deep_check_is_healthy, 3, "root") d.addCallback(lambda ign: self.root.start_deep_check_and_repair().when_done()) d.addCallback(self.deep_check_and_repair_is_healthy, 3, "root") d.addCallback(lambda ign: self.root.start_deep_check_and_repair(verify=True).when_done()) d.addCallback(self.deep_check_and_repair_is_healthy, 3, "root") # and finally, start a deep-check, but then cancel it. d.addCallback(lambda ign: self.root.start_deep_check()) def _checking(monitor): monitor.cancel() d = monitor.when_done() # this should fire as soon as the next dirnode.list finishes. # TODO: add a counter to measure how many list() calls are made, # assert that no more than one gets to run before the cancel() # takes effect. def _finished_normally(res): self.fail("this was supposed to fail, not finish normally") def _cancelled(f): f.trap(OperationCancelledError) d.addCallbacks(_finished_normally, _cancelled) return d d.addCallback(_checking) return d def json_check_is_healthy(self, data, n, where, incomplete=False): self.failUnlessEqual(data["storage-index"], str(base32.b2a(n.get_storage_index()), "ascii"), where) self.failUnless("summary" in data, (where, data)) self.failUnlessEqual(data["summary"].lower(), "healthy", "%s: '%s'" % (where, data["summary"])) r = data["results"] self.failUnlessEqual(r["healthy"], True, where) num_servers = len(self.g.all_servers) self.failUnlessEqual(num_servers, 10) self.failIfIn("needs-rebalancing", r) self.failUnlessEqual(r["count-happiness"], num_servers, where) self.failUnlessEqual(r["count-shares-good"], num_servers, where) self.failUnlessEqual(r["count-shares-needed"], 3, where) self.failUnlessEqual(r["count-shares-expected"], num_servers, where) if not incomplete: self.failUnlessEqual(r["count-good-share-hosts"], num_servers, where) self.failUnlessEqual(r["count-corrupt-shares"], 0, where) self.failUnlessEqual(r["list-corrupt-shares"], [], where) if not incomplete: self.failUnlessEqual(sorted(r["servers-responding"]), sorted([idlib.nodeid_b2a(sid) for sid in self.g.get_all_serverids()]), where) self.failUnless("sharemap" in r, where) all_serverids = set() for (shareid, serverids_s) in list(r["sharemap"].items()): all_serverids.update(serverids_s) self.failUnlessEqual(sorted(all_serverids), sorted([idlib.nodeid_b2a(sid) for sid in self.g.get_all_serverids()]), where) self.failUnlessEqual(r["count-wrong-shares"], 0, where) self.failUnlessEqual(r["count-recoverable-versions"], 1, where) self.failUnlessEqual(r["count-unrecoverable-versions"], 0, where) def json_check_and_repair_is_healthy(self, data, n, where, incomplete=False): self.failUnlessEqual(data["storage-index"], str(base32.b2a(n.get_storage_index()), "ascii"), where) self.failUnlessEqual(data["repair-attempted"], False, where) self.json_check_is_healthy(data["pre-repair-results"], n, where, incomplete) self.json_check_is_healthy(data["post-repair-results"], n, where, incomplete) def json_full_deepcheck_is_healthy(self, data, n, where): self.failUnlessEqual(data["root-storage-index"], str(base32.b2a(n.get_storage_index()), "ascii"), where) self.failUnlessEqual(data["count-objects-checked"], 3, where) self.failUnlessEqual(data["count-objects-healthy"], 3, where) self.failUnlessEqual(data["count-objects-unhealthy"], 0, where) self.failUnlessEqual(data["count-corrupt-shares"], 0, where) self.failUnlessEqual(data["list-corrupt-shares"], [], where) self.failUnlessEqual(data["list-unhealthy-files"], [], where) self.json_check_stats_good(data["stats"], where) def json_full_deepcheck_and_repair_is_healthy(self, data, n, where): self.failUnlessEqual(data["root-storage-index"], str(base32.b2a(n.get_storage_index()), "ascii"), where) self.failUnlessEqual(data["count-objects-checked"], 3, where) self.failUnlessEqual(data["count-objects-healthy-pre-repair"], 3, where) self.failUnlessEqual(data["count-objects-unhealthy-pre-repair"], 0, where) self.failUnlessEqual(data["count-corrupt-shares-pre-repair"], 0, where) self.failUnlessEqual(data["count-objects-healthy-post-repair"], 3, where) self.failUnlessEqual(data["count-objects-unhealthy-post-repair"], 0, where) self.failUnlessEqual(data["count-corrupt-shares-post-repair"], 0, where) self.failUnlessEqual(data["list-corrupt-shares"], [], where) self.failUnlessEqual(data["list-remaining-corrupt-shares"], [], where) self.failUnlessEqual(data["list-unhealthy-files"], [], where) self.failUnlessEqual(data["count-repairs-attempted"], 0, where) self.failUnlessEqual(data["count-repairs-successful"], 0, where) self.failUnlessEqual(data["count-repairs-unsuccessful"], 0, where) def json_check_lit(self, data, n, where): self.failUnlessEqual(data["storage-index"], "", where) self.failUnlessEqual(data["results"]["healthy"], True, where) def json_check_stats_good(self, data, where): self.check_stats_good(data) def do_test_web_good(self, ignored): d = defer.succeed(None) # stats d.addCallback(lambda ign: self.slow_web(self.root, t="start-deep-stats", output="json")) d.addCallback(self.json_check_stats_good, "deep-stats") # check, no verify d.addCallback(lambda ign: self.web_json(self.root, t="check")) d.addCallback(self.json_check_is_healthy, self.root, "root") d.addCallback(lambda ign: self.web_json(self.mutable, t="check")) d.addCallback(self.json_check_is_healthy, self.mutable, "mutable") d.addCallback(lambda ign: self.web_json(self.large, t="check")) d.addCallback(self.json_check_is_healthy, self.large, "large") d.addCallback(lambda ign: self.web_json(self.small, t="check")) d.addCallback(self.json_check_lit, self.small, "small") d.addCallback(lambda ign: self.web_json(self.small2, t="check")) d.addCallback(self.json_check_lit, self.small2, "small2") d.addCallback(lambda ign: self.web_json(self.empty_lit_dir, t="check")) d.addCallback(self.json_check_lit, self.empty_lit_dir, "empty_lit_dir") d.addCallback(lambda ign: self.web_json(self.tiny_lit_dir, t="check")) d.addCallback(self.json_check_lit, self.tiny_lit_dir, "tiny_lit_dir") # check and verify d.addCallback(lambda ign: self.web_json(self.root, t="check", verify="true")) d.addCallback(self.json_check_is_healthy, self.root, "root+v") d.addCallback(lambda ign: self.web_json(self.mutable, t="check", verify="true")) d.addCallback(self.json_check_is_healthy, self.mutable, "mutable+v") d.addCallback(lambda ign: self.web_json(self.large, t="check", verify="true")) d.addCallback(self.json_check_is_healthy, self.large, "large+v", incomplete=True) d.addCallback(lambda ign: self.web_json(self.small, t="check", verify="true")) d.addCallback(self.json_check_lit, self.small, "small+v") d.addCallback(lambda ign: self.web_json(self.small2, t="check", verify="true")) d.addCallback(self.json_check_lit, self.small2, "small2+v") d.addCallback(lambda ign: self.web_json(self.empty_lit_dir, t="check", verify="true")) d.addCallback(self.json_check_lit, self.empty_lit_dir, "empty_lit_dir+v") d.addCallback(lambda ign: self.web_json(self.tiny_lit_dir, t="check", verify="true")) d.addCallback(self.json_check_lit, self.tiny_lit_dir, "tiny_lit_dir+v") # check and repair, no verify d.addCallback(lambda ign: self.web_json(self.root, t="check", repair="true")) d.addCallback(self.json_check_and_repair_is_healthy, self.root, "root+r") d.addCallback(lambda ign: self.web_json(self.mutable, t="check", repair="true")) d.addCallback(self.json_check_and_repair_is_healthy, self.mutable, "mutable+r") d.addCallback(lambda ign: self.web_json(self.large, t="check", repair="true")) d.addCallback(self.json_check_and_repair_is_healthy, self.large, "large+r") d.addCallback(lambda ign: self.web_json(self.small, t="check", repair="true")) d.addCallback(self.json_check_lit, self.small, "small+r") d.addCallback(lambda ign: self.web_json(self.small2, t="check", repair="true")) d.addCallback(self.json_check_lit, self.small2, "small2+r") d.addCallback(lambda ign: self.web_json(self.empty_lit_dir, t="check", repair="true")) d.addCallback(self.json_check_lit, self.empty_lit_dir, "empty_lit_dir+r") d.addCallback(lambda ign: self.web_json(self.tiny_lit_dir, t="check", repair="true")) d.addCallback(self.json_check_lit, self.tiny_lit_dir, "tiny_lit_dir+r") # check+verify+repair d.addCallback(lambda ign: self.web_json(self.root, t="check", repair="true", verify="true")) d.addCallback(self.json_check_and_repair_is_healthy, self.root, "root+vr") d.addCallback(lambda ign: self.web_json(self.mutable, t="check", repair="true", verify="true")) d.addCallback(self.json_check_and_repair_is_healthy, self.mutable, "mutable+vr") d.addCallback(lambda ign: self.web_json(self.large, t="check", repair="true", verify="true")) d.addCallback(self.json_check_and_repair_is_healthy, self.large, "large+vr", incomplete=True) d.addCallback(lambda ign: self.web_json(self.small, t="check", repair="true", verify="true")) d.addCallback(self.json_check_lit, self.small, "small+vr") d.addCallback(lambda ign: self.web_json(self.small2, t="check", repair="true", verify="true")) d.addCallback(self.json_check_lit, self.small2, "small2+vr") d.addCallback(lambda ign: self.web_json(self.empty_lit_dir, t="check", repair="true", verify=True)) d.addCallback(self.json_check_lit, self.empty_lit_dir, "empty_lit_dir+vr") d.addCallback(lambda ign: self.web_json(self.tiny_lit_dir, t="check", repair="true", verify=True)) d.addCallback(self.json_check_lit, self.tiny_lit_dir, "tiny_lit_dir+vr") # now run a deep-check, with various verify= and repair= flags d.addCallback(lambda ign: self.slow_web(self.root, t="start-deep-check", output="json")) d.addCallback(self.json_full_deepcheck_is_healthy, self.root, "root+d") d.addCallback(lambda ign: self.slow_web(self.root, t="start-deep-check", verify="true", output="json")) d.addCallback(self.json_full_deepcheck_is_healthy, self.root, "root+dv") d.addCallback(lambda ign: self.slow_web(self.root, t="start-deep-check", repair="true", output="json")) d.addCallback(self.json_full_deepcheck_and_repair_is_healthy, self.root, "root+dr") d.addCallback(lambda ign: self.slow_web(self.root, t="start-deep-check", verify="true", repair="true", output="json")) d.addCallback(self.json_full_deepcheck_and_repair_is_healthy, self.root, "root+dvr") # now look at t=info d.addCallback(lambda ign: self.web(self.root, t="info")) # TODO: examine the output d.addCallback(lambda ign: self.web(self.mutable, t="info")) d.addCallback(lambda ign: self.web(self.large, t="info")) d.addCallback(lambda ign: self.web(self.small, t="info")) d.addCallback(lambda ign: self.web(self.small2, t="info")) d.addCallback(lambda ign: self.web(self.empty_lit_dir, t="info")) d.addCallback(lambda ign: self.web(self.tiny_lit_dir, t="info")) return d def do_test_cli_good(self, ignored): d = defer.succeed(None) d.addCallback(lambda ign: self.do_cli_manifest_stream1()) d.addCallback(lambda ign: self.do_cli_manifest_stream2()) d.addCallback(lambda ign: self.do_cli_manifest_stream3()) d.addCallback(lambda ign: self.do_cli_manifest_stream4()) d.addCallback(lambda ign: self.do_cli_manifest_stream5()) d.addCallback(lambda ign: self.do_cli_stats1()) d.addCallback(lambda ign: self.do_cli_stats2()) return d def _check_manifest_storage_index(self, out): lines = [l.encode("utf-8") for l in out.split("\n") if l] self.failUnlessEqual(len(lines), 3) self.failUnless(base32.b2a(self.root.get_storage_index()) in lines) self.failUnless(base32.b2a(self.mutable.get_storage_index()) in lines) self.failUnless(base32.b2a(self.large.get_storage_index()) in lines) def do_cli_manifest_stream1(self): d = self.do_cli("manifest", self.root_uri) def _check(args): (rc, out, err) = args self.failUnlessEqual(err, "") lines = [l for l in out.split("\n") if l] self.failUnlessEqual(len(lines), 8) caps = {} for l in lines: try: cap, path = l.split(None, 1) except ValueError: cap = l.strip() path = "" caps[cap.encode("ascii")] = path self.failUnless(self.root.get_uri() in caps) self.failUnlessEqual(caps[self.root.get_uri()], "") self.failUnlessEqual(caps[self.mutable.get_uri()], "mutable") self.failUnlessEqual(caps[self.large.get_uri()], "large") self.failUnlessEqual(caps[self.small.get_uri()], "small") self.failUnlessEqual(caps[self.small2.get_uri()], "small2") self.failUnlessEqual(caps[self.empty_lit_dir.get_uri()], "empty_lit_dir") self.failUnlessEqual(caps[self.tiny_lit_dir.get_uri()], "tiny_lit_dir") d.addCallback(_check) return d def do_cli_manifest_stream2(self): d = self.do_cli("manifest", "--raw", self.root_uri) def _check(args): (rc, out, err) = args self.failUnlessEqual(err, "") # this should be the same as the POST t=stream-manifest output self._check_streamed_manifest(out) d.addCallback(_check) return d def do_cli_manifest_stream3(self): d = self.do_cli("manifest", "--storage-index", self.root_uri) def _check(args): (rc, out, err) = args self.failUnlessEqual(err, "") self._check_manifest_storage_index(out) d.addCallback(_check) return d def do_cli_manifest_stream4(self): d = self.do_cli("manifest", "--verify-cap", self.root_uri) def _check(args): (rc, out, err) = args self.failUnlessEqual(err, "") lines = [l.encode("utf-8") for l in out.split("\n") if l] self.failUnlessEqual(len(lines), 3) self.failUnless(self.root.get_verify_cap().to_string() in lines) self.failUnless(self.mutable.get_verify_cap().to_string() in lines) self.failUnless(self.large.get_verify_cap().to_string() in lines) d.addCallback(_check) return d def do_cli_manifest_stream5(self): d = self.do_cli("manifest", "--repair-cap", self.root_uri) def _check(args): (rc, out, err) = args self.failUnlessEqual(err, "") lines = [l.encode("utf-8") for l in out.split("\n") if l] self.failUnlessEqual(len(lines), 3) self.failUnless(self.root.get_repair_cap().to_string() in lines) self.failUnless(self.mutable.get_repair_cap().to_string() in lines) self.failUnless(self.large.get_repair_cap().to_string() in lines) d.addCallback(_check) return d def do_cli_stats1(self): d = self.do_cli("stats", self.root_uri) def _check3(args): (rc, out, err) = args lines = [l.strip() for l in out.split("\n") if l] self.failUnless("count-immutable-files: 1" in lines) self.failUnless("count-mutable-files: 1" in lines) self.failUnless("count-literal-files: 3" in lines) self.failUnless("count-files: 5" in lines) self.failUnless("count-directories: 3" in lines) self.failUnless("size-immutable-files: 13000 (13.00 kB, 12.70 kiB)" in lines, lines) self.failUnless("size-literal-files: 56" in lines, lines) self.failUnless(" 4-10 : 1 (10 B, 10 B)".strip() in lines, lines) self.failUnless(" 11-31 : 2 (31 B, 31 B)".strip() in lines, lines) self.failUnless("10001-31622 : 1 (31.62 kB, 30.88 kiB)".strip() in lines, lines) d.addCallback(_check3) return d def do_cli_stats2(self): d = self.do_cli("stats", "--raw", self.root_uri) def _check4(args): (rc, out, err) = args data = json.loads(out) self.failUnlessEqual(data["count-immutable-files"], 1) self.failUnlessEqual(data["count-immutable-files"], 1) self.failUnlessEqual(data["count-mutable-files"], 1) self.failUnlessEqual(data["count-literal-files"], 3) self.failUnlessEqual(data["count-files"], 5) self.failUnlessEqual(data["count-directories"], 3) self.failUnlessEqual(data["size-immutable-files"], 13000) self.failUnlessEqual(data["size-literal-files"], 56) self.failUnless([4,10,1] in data["size-files-histogram"]) self.failUnless([11,31,2] in data["size-files-histogram"]) self.failUnless([10001,31622,1] in data["size-files-histogram"]) d.addCallback(_check4) return d class DeepCheckWebBad(DeepCheckBase, unittest.TestCase): def test_bad(self): self.basedir = "deepcheck/DeepCheckWebBad/bad" self.set_up_grid() d = self.set_up_damaged_tree() d.addCallback(self.do_check) d.addCallback(self.do_deepcheck) d.addCallback(self.do_deepcheck_broken) d.addCallback(self.do_test_web_bad) d.addErrback(self.explain_web_error) d.addErrback(self.explain_error) return d def set_up_damaged_tree(self): # 6.4s # root # mutable-good # mutable-missing-shares # mutable-corrupt-shares # mutable-unrecoverable # large-good # large-missing-shares # large-corrupt-shares # large-unrecoverable # broken # large1-good # subdir-good # large2-good # subdir-unrecoverable # large3-good self.nodes = {} c0 = self.g.clients[0] d = c0.create_dirnode() def _created_root(n): self.root = n self.root_uri = n.get_uri() d.addCallback(_created_root) d.addCallback(self.create_mangled, "mutable-good") d.addCallback(self.create_mangled, "mutable-missing-shares") d.addCallback(self.create_mangled, "mutable-corrupt-shares") d.addCallback(self.create_mangled, "mutable-unrecoverable") d.addCallback(self.create_mangled, "large-good") d.addCallback(self.create_mangled, "large-missing-shares") d.addCallback(self.create_mangled, "large-corrupt-shares") d.addCallback(self.create_mangled, "large-unrecoverable") d.addCallback(lambda ignored: c0.create_dirnode()) d.addCallback(self._stash_node, "broken") large1 = upload.Data(b"Lots of data\n" * 1000 + b"large1" + b"\n", None) d.addCallback(lambda ignored: self.nodes["broken"].add_file(u"large1", large1)) d.addCallback(lambda ignored: self.nodes["broken"].create_subdirectory(u"subdir-good")) large2 = upload.Data(b"Lots of data\n" * 1000 + b"large2" + b"\n", None) d.addCallback(lambda subdir: subdir.add_file(u"large2-good", large2)) d.addCallback(lambda ignored: self.nodes["broken"].create_subdirectory(u"subdir-unrecoverable")) d.addCallback(self._stash_node, "subdir-unrecoverable") large3 = upload.Data(b"Lots of data\n" * 1000 + b"large3" + b"\n", None) d.addCallback(lambda subdir: subdir.add_file(u"large3-good", large3)) d.addCallback(lambda ignored: self._delete_most_shares(self.nodes["broken"])) return d def _stash_node(self, node, name): self.nodes[name] = node return node def create_mangled(self, ignored, name): nodetype, mangletype = name.split("-", 1) if nodetype == "mutable": mutable_uploadable = MutableData(b"mutable file contents") d = self.g.clients[0].create_mutable_file(mutable_uploadable) d.addCallback(lambda n: self.root.set_node(str(name), n)) # TODO drop str() once strings are unicode elif nodetype == "large": large = upload.Data(b"Lots of data\n" * 1000 + name.encode("ascii") + b"\n", None) d = self.root.add_file(str(name), large) elif nodetype == "small": small = upload.Data(b"Small enough for a LIT", None) d = self.root.add_file(str(name), small) d.addCallback(self._stash_node, name) if mangletype == "good": pass elif mangletype == "missing-shares": d.addCallback(self._delete_some_shares) elif mangletype == "corrupt-shares": d.addCallback(self._corrupt_some_shares) else: assert mangletype == "unrecoverable" d.addCallback(self._delete_most_shares) return d def _delete_some_shares(self, node): self.delete_shares_numbered(node.get_uri(), [0,1]) @defer.inlineCallbacks def _corrupt_some_shares(self, node): for (shnum, serverid, sharefile) in self.find_uri_shares(node.get_uri()): if shnum in (0,1): yield run_cli("debug", "corrupt-share", sharefile) def _delete_most_shares(self, node): self.delete_shares_numbered(node.get_uri(), list(range(1,10))) def check_is_healthy(self, cr, where): try: self.failUnless(ICheckResults.providedBy(cr), (cr, type(cr), where)) self.failUnless(cr.is_healthy(), (cr.get_report(), cr.is_healthy(), cr.get_summary(), where)) self.failUnless(cr.is_recoverable(), where) self.failUnlessEqual(cr.get_version_counter_recoverable(), 1, where) self.failUnlessEqual(cr.get_version_counter_unrecoverable(), 0, where) return cr except Exception as le: le.args = tuple(le.args + (where,)) raise def check_is_missing_shares(self, cr, where): self.failUnless(ICheckResults.providedBy(cr), where) self.failIf(cr.is_healthy(), where) self.failUnless(cr.is_recoverable(), where) self.failUnlessEqual(cr.get_version_counter_recoverable(), 1, where) self.failUnlessEqual(cr.get_version_counter_unrecoverable(), 0, where) return cr def check_has_corrupt_shares(self, cr, where): # by "corrupt-shares" we mean the file is still recoverable self.failUnless(ICheckResults.providedBy(cr), where) self.failIf(cr.is_healthy(), (where, cr)) self.failUnless(cr.is_recoverable(), where) self.failUnless(cr.get_share_counter_good() < 10, where) self.failUnless(cr.get_corrupt_shares(), where) return cr def check_is_unrecoverable(self, cr, where): self.failUnless(ICheckResults.providedBy(cr), where) self.failIf(cr.is_healthy(), where) self.failIf(cr.is_recoverable(), where) self.failUnless(cr.get_share_counter_good() < cr.get_encoding_needed(), (cr.get_share_counter_good(), cr.get_encoding_needed(), where)) self.failUnlessEqual(cr.get_version_counter_recoverable(), 0, where) self.failUnlessEqual(cr.get_version_counter_unrecoverable(), 1, where) return cr def do_check(self, ignored): d = defer.succeed(None) # check the individual items, without verification. This will not # detect corrupt shares. def _check(which, checker): d = self.nodes[which].check(Monitor()) d.addCallback(checker, which + "--check") return d d.addCallback(lambda ign: _check("mutable-good", self.check_is_healthy)) d.addCallback(lambda ign: _check("mutable-missing-shares", self.check_is_missing_shares)) d.addCallback(lambda ign: _check("mutable-corrupt-shares", self.check_is_healthy)) d.addCallback(lambda ign: _check("mutable-unrecoverable", self.check_is_unrecoverable)) d.addCallback(lambda ign: _check("large-good", self.check_is_healthy)) d.addCallback(lambda ign: _check("large-missing-shares", self.check_is_missing_shares)) d.addCallback(lambda ign: _check("large-corrupt-shares", self.check_is_healthy)) d.addCallback(lambda ign: _check("large-unrecoverable", self.check_is_unrecoverable)) # and again with verify=True, which *does* detect corrupt shares. def _checkv(which, checker): d = self.nodes[which].check(Monitor(), verify=True) d.addCallback(checker, which + "--check-and-verify") return d d.addCallback(lambda ign: _checkv("mutable-good", self.check_is_healthy)) d.addCallback(lambda ign: _checkv("mutable-missing-shares", self.check_is_missing_shares)) d.addCallback(lambda ign: _checkv("mutable-corrupt-shares", self.check_has_corrupt_shares)) d.addCallback(lambda ign: _checkv("mutable-unrecoverable", self.check_is_unrecoverable)) d.addCallback(lambda ign: _checkv("large-good", self.check_is_healthy)) d.addCallback(lambda ign: _checkv("large-missing-shares", self.check_is_missing_shares)) d.addCallback(lambda ign: _checkv("large-corrupt-shares", self.check_has_corrupt_shares)) d.addCallback(lambda ign: _checkv("large-unrecoverable", self.check_is_unrecoverable)) return d def do_deepcheck(self, ignored): d = defer.succeed(None) # now deep-check the root, with various verify= and repair= options d.addCallback(lambda ign: self.root.start_deep_check().when_done()) def _check1(cr): self.failUnless(IDeepCheckResults.providedBy(cr)) c = cr.get_counters() self.failUnlessEqual(c["count-objects-checked"], 9) self.failUnlessEqual(c["count-objects-healthy"], 5) self.failUnlessEqual(c["count-objects-unhealthy"], 4) self.failUnlessEqual(c["count-objects-unrecoverable"], 2) d.addCallback(_check1) d.addCallback(lambda ign: self.root.start_deep_check(verify=True).when_done()) def _check2(cr): self.failUnless(IDeepCheckResults.providedBy(cr)) c = cr.get_counters() self.failUnlessEqual(c["count-objects-checked"], 9) self.failUnlessEqual(c["count-objects-healthy"], 3) self.failUnlessEqual(c["count-objects-unhealthy"], 6) self.failUnlessEqual(c["count-objects-healthy"], 3) # root, mutable good, large good self.failUnlessEqual(c["count-objects-unrecoverable"], 2) # mutable unrecoverable, large unrecoverable d.addCallback(_check2) return d def do_deepcheck_broken(self, ignored): # deep-check on the broken directory should fail, because of the # untraversable subdir def _do_deep_check(): return self.nodes["broken"].start_deep_check().when_done() d = self.shouldFail(UnrecoverableFileError, "do_deep_check", "no recoverable versions", _do_deep_check) return d def json_is_healthy(self, data, where): r = data["results"] self.failUnless(r["healthy"], where) self.failUnless(r["recoverable"], where) self.failUnlessEqual(r["count-recoverable-versions"], 1, where) self.failUnlessEqual(r["count-unrecoverable-versions"], 0, where) def json_is_missing_shares(self, data, where): r = data["results"] self.failIf(r["healthy"], where) self.failUnless(r["recoverable"], where) self.failUnlessEqual(r["count-recoverable-versions"], 1, where) self.failUnlessEqual(r["count-unrecoverable-versions"], 0, where) def json_has_corrupt_shares(self, data, where): # by "corrupt-shares" we mean the file is still recoverable r = data["results"] self.failIf(r["healthy"], where) self.failUnless(r["recoverable"], where) self.failUnless(r["count-shares-good"] < 10, where) self.failUnless(r["count-corrupt-shares"], where) self.failUnless(r["list-corrupt-shares"], where) def json_is_unrecoverable(self, data, where): r = data["results"] self.failIf(r["healthy"], where) self.failIf(r["recoverable"], where) self.failUnless(r["count-shares-good"] < r["count-shares-needed"], where) self.failUnlessEqual(r["count-recoverable-versions"], 0, where) self.failUnlessEqual(r["count-unrecoverable-versions"], 1, where) def do_test_web_bad(self, ignored): d = defer.succeed(None) # check, no verify def _check(which, checker): d = self.web_json(self.nodes[which], t="check") d.addCallback(checker, which + "--webcheck") return d d.addCallback(lambda ign: _check("mutable-good", self.json_is_healthy)) d.addCallback(lambda ign: _check("mutable-missing-shares", self.json_is_missing_shares)) d.addCallback(lambda ign: _check("mutable-corrupt-shares", self.json_is_healthy)) d.addCallback(lambda ign: _check("mutable-unrecoverable", self.json_is_unrecoverable)) d.addCallback(lambda ign: _check("large-good", self.json_is_healthy)) d.addCallback(lambda ign: _check("large-missing-shares", self.json_is_missing_shares)) d.addCallback(lambda ign: _check("large-corrupt-shares", self.json_is_healthy)) d.addCallback(lambda ign: _check("large-unrecoverable", self.json_is_unrecoverable)) # check and verify def _checkv(which, checker): d = self.web_json(self.nodes[which], t="check", verify="true") d.addCallback(checker, which + "--webcheck-and-verify") return d d.addCallback(lambda ign: _checkv("mutable-good", self.json_is_healthy)) d.addCallback(lambda ign: _checkv("mutable-missing-shares", self.json_is_missing_shares)) d.addCallback(lambda ign: _checkv("mutable-corrupt-shares", self.json_has_corrupt_shares)) d.addCallback(lambda ign: _checkv("mutable-unrecoverable", self.json_is_unrecoverable)) d.addCallback(lambda ign: _checkv("large-good", self.json_is_healthy)) d.addCallback(lambda ign: _checkv("large-missing-shares", self.json_is_missing_shares)) d.addCallback(lambda ign: _checkv("large-corrupt-shares", self.json_has_corrupt_shares)) d.addCallback(lambda ign: _checkv("large-unrecoverable", self.json_is_unrecoverable)) return d class Large(DeepCheckBase, unittest.TestCase): def test_lots_of_lits(self): self.basedir = "deepcheck/Large/lots_of_lits" self.set_up_grid() # create the following directory structure: # root/ # subdir/ # 000-large (CHK) # 001-small (LIT) # 002-small # ... # 399-small # then do a deepcheck and make sure it doesn't cause a # Deferred-tail-recursion stack overflow COUNT = 400 c0 = self.g.clients[0] d = c0.create_dirnode() self.stash = {} def _created_root(n): self.root = n return n d.addCallback(_created_root) d.addCallback(lambda root: root.create_subdirectory(u"subdir")) def _add_children(subdir_node): self.subdir_node = subdir_node kids = {} for i in range(1, COUNT): litcap = LiteralFileURI(b"%03d-data" % i).to_string() kids[u"%03d-small" % i] = (litcap, litcap) return subdir_node.set_children(kids) d.addCallback(_add_children) up = upload.Data(b"large enough for CHK" * 100, b"") d.addCallback(lambda ign: self.subdir_node.add_file(u"0000-large", up)) def _start_deepcheck(ignored): return self.web(self.root, method="POST", t="stream-deep-check") d.addCallback(_start_deepcheck) def _check(output_and_url): (output, url) = output_and_url units = list(self.parse_streamed_json(output)) self.failUnlessEqual(len(units), 2+COUNT+1) d.addCallback(_check) return d tahoe_lafs-1.20.0/src/allmydata/test/test_deferredutil.py0000644000000000000000000002302613615410400020427 0ustar00""" Tests for allmydata.util.deferredutil. """ from __future__ import annotations from twisted.trial import unittest from twisted.internet import defer, reactor from twisted.internet.defer import Deferred from twisted.python.failure import Failure from hypothesis.strategies import integers from hypothesis import given from allmydata.util import deferredutil from allmydata.util.deferredutil import race, MultiFailure class DeferredUtilTests(unittest.TestCase, deferredutil.WaitForDelayedCallsMixin): def test_gather_results(self): d1 = defer.Deferred() d2 = defer.Deferred() res = deferredutil.gatherResults([d1, d2]) d1.errback(ValueError("BAD")) def _callb(res): self.fail("Should have errbacked, not resulted in %s" % (res,)) def _errb(thef): thef.trap(ValueError) res.addCallbacks(_callb, _errb) return res def test_success(self): d1, d2 = defer.Deferred(), defer.Deferred() good = [] bad = [] dlss = deferredutil.DeferredListShouldSucceed([d1,d2]) dlss.addCallbacks(good.append, bad.append) d1.callback(1) d2.callback(2) self.failUnlessEqual(good, [[1,2]]) self.failUnlessEqual(bad, []) def test_failure(self): d1, d2 = defer.Deferred(), defer.Deferred() good = [] bad = [] dlss = deferredutil.DeferredListShouldSucceed([d1,d2]) dlss.addCallbacks(good.append, bad.append) d1.addErrback(lambda _ignore: None) d2.addErrback(lambda _ignore: None) d1.callback(1) d2.errback(ValueError()) self.failUnlessEqual(good, []) self.failUnlessEqual(len(bad), 1) f = bad[0] self.failUnless(isinstance(f, Failure)) self.failUnless(f.check(ValueError)) def test_wait_for_delayed_calls(self): """ This tests that 'wait_for_delayed_calls' does in fact wait for a delayed call that is active when the test returns. If it didn't, Trial would report an unclean reactor error for this test. """ def _trigger(): #print("trigger") pass reactor.callLater(0.1, _trigger) d = defer.succeed(None) d.addBoth(self.wait_for_delayed_calls) return d class UntilTests(unittest.TestCase): """ Tests for ``deferredutil.until``. """ def test_exception(self): """ If the action raises an exception, the ``Deferred`` returned by ``until`` fires with a ``Failure``. """ self.assertFailure( deferredutil.until(lambda: 1/0, lambda: True), ZeroDivisionError, ) def test_stops_on_condition(self): """ The action is called repeatedly until ``condition`` returns ``True``. """ calls = [] def action(): calls.append(None) def condition(): return len(calls) == 3 self.assertIs( self.successResultOf( deferredutil.until(action, condition), ), None, ) self.assertEqual(3, len(calls)) def test_waits_for_deferred(self): """ If the action returns a ``Deferred`` then it is called again when the ``Deferred`` fires. """ counter = [0] r1 = defer.Deferred() r2 = defer.Deferred() results = [r1, r2] def action(): counter[0] += 1 return results.pop(0) def condition(): return False deferredutil.until(action, condition) self.assertEqual([1], counter) r1.callback(None) self.assertEqual([2], counter) class AsyncToDeferred(unittest.TestCase): """Tests for ``deferredutil.async_to_deferred.``""" def test_async_to_deferred_success(self): """ Normal results from a ``@async_to_deferred``-wrapped function get turned into a ``Deferred`` with that value. """ @deferredutil.async_to_deferred async def f(x, y): return x + y result = f(1, y=2) self.assertEqual(self.successResultOf(result), 3) def test_async_to_deferred_exception(self): """ Exceptions from a ``@async_to_deferred``-wrapped function get turned into a ``Deferred`` with that value. """ @deferredutil.async_to_deferred async def f(x, y): return x/y result = f(1, 0) self.assertIsInstance(self.failureResultOf(result).value, ZeroDivisionError) def _setupRaceState(numDeferreds: int) -> tuple[list[int], list[Deferred[object]]]: """ Create a list of Deferreds and a corresponding list of integers tracking how many times each Deferred has been cancelled. Without additional steps the Deferreds will never fire. """ cancelledState = [0] * numDeferreds ds: list[Deferred[object]] = [] for n in range(numDeferreds): def cancel(d: Deferred, n: int = n) -> None: cancelledState[n] += 1 ds.append(Deferred(canceller=cancel)) return cancelledState, ds class RaceTests(unittest.SynchronousTestCase): """ Tests for L{race}. """ @given( beforeWinner=integers(min_value=0, max_value=3), afterWinner=integers(min_value=0, max_value=3), ) def test_success(self, beforeWinner: int, afterWinner: int) -> None: """ When one of the L{Deferred}s passed to L{race} fires successfully, the L{Deferred} return by L{race} fires with the index of that L{Deferred} and its result and cancels the rest of the L{Deferred}s. @param beforeWinner: A randomly selected number of Deferreds to appear before the "winning" Deferred in the list passed in. @param beforeWinner: A randomly selected number of Deferreds to appear after the "winning" Deferred in the list passed in. """ cancelledState, ds = _setupRaceState(beforeWinner + 1 + afterWinner) raceResult = race(ds) expected = object() ds[beforeWinner].callback(expected) # The result should be the index and result of the only Deferred that # fired. self.assertEqual( self.successResultOf(raceResult), (beforeWinner, expected), ) # All Deferreds except the winner should have been cancelled once. expectedCancelledState = [1] * beforeWinner + [0] + [1] * afterWinner self.assertEqual( cancelledState, expectedCancelledState, ) @given( beforeWinner=integers(min_value=0, max_value=3), afterWinner=integers(min_value=0, max_value=3), ) def test_failure(self, beforeWinner: int, afterWinner: int) -> None: """ When all of the L{Deferred}s passed to L{race} fire with failures, the L{Deferred} return by L{race} fires with L{MultiFailure} wrapping all of their failures. @param beforeWinner: A randomly selected number of Deferreds to appear before the "winning" Deferred in the list passed in. @param beforeWinner: A randomly selected number of Deferreds to appear after the "winning" Deferred in the list passed in. """ cancelledState, ds = _setupRaceState(beforeWinner + 1 + afterWinner) failure = Failure(Exception("The test demands failures.")) raceResult = race(ds) for d in ds: d.errback(failure) actualFailure = self.failureResultOf(raceResult, MultiFailure) self.assertEqual( actualFailure.value.failures, [failure] * len(ds), ) self.assertEqual( cancelledState, [0] * len(ds), ) @given( beforeWinner=integers(min_value=0, max_value=3), afterWinner=integers(min_value=0, max_value=3), ) def test_resultAfterCancel(self, beforeWinner: int, afterWinner: int) -> None: """ If one of the Deferreds fires after it was cancelled its result goes nowhere. In particular, it does not cause any errors to be logged. """ # Ensure we have a Deferred to win and at least one other Deferred # that can ignore cancellation. ds: list[Deferred[None]] = [ Deferred() for n in range(beforeWinner + 2 + afterWinner) ] raceResult = race(ds) ds[beforeWinner].callback(None) ds[beforeWinner + 1].callback(None) self.successResultOf(raceResult) self.assertEqual(len(self.flushLoggedErrors()), 0) def test_resultFromCancel(self) -> None: """ If one of the input Deferreds has a cancel function that fires it with success, nothing bad happens. """ winner: Deferred[object] = Deferred() ds: list[Deferred[object]] = [ winner, Deferred(canceller=lambda d: d.callback(object())), ] expected = object() raceResult = race(ds) winner.callback(expected) self.assertEqual(self.successResultOf(raceResult), (0, expected)) @given( numDeferreds=integers(min_value=1, max_value=3), ) def test_cancel(self, numDeferreds: int) -> None: """ If the result of L{race} is cancelled then all of the L{Deferred}s passed in are cancelled. """ cancelledState, ds = _setupRaceState(numDeferreds) raceResult = race(ds) raceResult.cancel() self.assertEqual(cancelledState, [1] * numDeferreds) self.failureResultOf(raceResult, MultiFailure) tahoe_lafs-1.20.0/src/allmydata/test/test_dictutil.py0000644000000000000000000001224413615410400017572 0ustar00""" Tests for allmydata.util.dictutil. """ from __future__ import annotations from twisted.trial import unittest from allmydata.util import dictutil class DictUtil(unittest.TestCase): def test_dict_of_sets(self): ds = dictutil.DictOfSets() ds.add(1, "a") ds.add(2, "b") ds.add(2, "b") ds.add(2, "c") self.failUnlessEqual(ds[1], set(["a"])) self.failUnlessEqual(ds[2], set(["b", "c"])) ds.discard(3, "d") # should not raise an exception ds.discard(2, "b") self.failUnlessEqual(ds[2], set(["c"])) ds.discard(2, "c") self.failIf(2 in ds) ds.add(3, "f") ds2 = dictutil.DictOfSets() ds2.add(3, "f") ds2.add(3, "g") ds2.add(4, "h") ds.update(ds2) self.failUnlessEqual(ds[1], set(["a"])) self.failUnlessEqual(ds[3], set(["f", "g"])) self.failUnlessEqual(ds[4], set(["h"])) def test_auxdict(self): d = dictutil.AuxValueDict() # we put the serialized form in the auxdata d.set_with_aux("key", ("filecap", "metadata"), "serialized") self.failUnlessEqual(list(d.keys()), ["key"]) self.failUnlessEqual(d["key"], ("filecap", "metadata")) self.failUnlessEqual(d.get_aux("key"), "serialized") def _get_missing(key): return d[key] self.failUnlessRaises(KeyError, _get_missing, "nonkey") self.failUnlessEqual(d.get("nonkey"), None) self.failUnlessEqual(d.get("nonkey", "nonvalue"), "nonvalue") self.failUnlessEqual(d.get_aux("nonkey"), None) self.failUnlessEqual(d.get_aux("nonkey", "nonvalue"), "nonvalue") d["key"] = ("filecap2", "metadata2") self.failUnlessEqual(d["key"], ("filecap2", "metadata2")) self.failUnlessEqual(d.get_aux("key"), None) d.set_with_aux("key2", "value2", "aux2") self.failUnlessEqual(sorted(d.keys()), ["key", "key2"]) del d["key2"] self.failUnlessEqual(list(d.keys()), ["key"]) self.failIf("key2" in d) self.failUnlessRaises(KeyError, _get_missing, "key2") self.failUnlessEqual(d.get("key2"), None) self.failUnlessEqual(d.get_aux("key2"), None) d["key2"] = "newvalue2" self.failUnlessEqual(d.get("key2"), "newvalue2") self.failUnlessEqual(d.get_aux("key2"), None) d = dictutil.AuxValueDict({1:2,3:4}) self.failUnlessEqual(sorted(d.keys()), [1,3]) self.failUnlessEqual(d[1], 2) self.failUnlessEqual(d.get_aux(1), None) d = dictutil.AuxValueDict([ (1,2), (3,4) ]) self.failUnlessEqual(sorted(d.keys()), [1,3]) self.failUnlessEqual(d[1], 2) self.failUnlessEqual(d.get_aux(1), None) d = dictutil.AuxValueDict(one=1, two=2) self.failUnlessEqual(sorted(d.keys()), ["one","two"]) self.failUnlessEqual(d["one"], 1) self.failUnlessEqual(d.get_aux("one"), None) class TypedKeyDict(unittest.TestCase): """Tests for dictionaries that limit keys.""" def setUp(self): pass def test_bytes(self): """BytesKeyDict is limited to just byte keys.""" self.assertRaises(TypeError, dictutil.BytesKeyDict, {u"hello": 123}) d = dictutil.BytesKeyDict({b"123": 200}) with self.assertRaises(TypeError): d[u"hello"] = "blah" with self.assertRaises(TypeError): d[u"hello"] with self.assertRaises(TypeError): del d[u"hello"] with self.assertRaises(TypeError): d.setdefault(u"hello", "123") with self.assertRaises(TypeError): d.get(u"xcd") # Byte keys are fine: self.assertEqual(d, {b"123": 200}) d[b"456"] = 400 self.assertEqual(d[b"456"], 400) del d[b"456"] self.assertEqual(d.get(b"456", 50), 50) self.assertEqual(d.setdefault(b"456", 300), 300) self.assertEqual(d[b"456"], 300) def test_unicode(self): """UnicodeKeyDict is limited to just unicode keys.""" self.assertRaises(TypeError, dictutil.UnicodeKeyDict, {b"hello": 123}) d = dictutil.UnicodeKeyDict({u"123": 200}) with self.assertRaises(TypeError): d[b"hello"] = "blah" with self.assertRaises(TypeError): d[b"hello"] with self.assertRaises(TypeError): del d[b"hello"] with self.assertRaises(TypeError): d.setdefault(b"hello", "123") with self.assertRaises(TypeError): d.get(b"xcd") # Byte keys are fine: self.assertEqual(d, {u"123": 200}) d[u"456"] = 400 self.assertEqual(d[u"456"], 400) del d[u"456"] self.assertEqual(d.get(u"456", 50), 50) self.assertEqual(d.setdefault(u"456", 300), 300) self.assertEqual(d[u"456"], 300) class FilterTests(unittest.TestCase): """ Tests for ``dictutil.filter``. """ def test_filter(self) -> None: """ ``dictutil.filter`` returns a ``dict`` that contains the key/value pairs for which the value is matched by the given predicate. """ self.assertEqual( {1: 2}, dictutil.filter(lambda v: v == 2, {1: 2, 2: 3}), ) tahoe_lafs-1.20.0/src/allmydata/test/test_dirnode.py0000644000000000000000000031314213615410400017376 0ustar00"""Tests for the dirnode module. Ported to Python 3. """ import time import unicodedata from zope.interface import implementer from twisted.trial import unittest from twisted.internet import defer from twisted.internet.interfaces import IConsumer from twisted.python.filepath import FilePath from allmydata import uri, dirnode from allmydata.client import _Client from allmydata.crypto.rsa import create_signing_keypair from allmydata.immutable import upload from allmydata.immutable.literal import LiteralFileNode from allmydata.interfaces import IImmutableFileNode, IMutableFileNode, \ ExistingChildError, NoSuchChildError, MustNotBeUnknownRWError, \ MustBeDeepImmutableError, MustBeReadonlyError, \ IDeepCheckResults, IDeepCheckAndRepairResults, \ MDMF_VERSION, SDMF_VERSION from allmydata.mutable.filenode import MutableFileNode from allmydata.mutable.common import ( UncoordinatedWriteError, derive_mutable_keys, ) from allmydata.util import hashutil, base32 from allmydata.util.netstring import split_netstring from allmydata.monitor import Monitor from allmydata.test.common import make_chk_file_uri, make_mutable_file_uri, \ ErrorMixin from allmydata.test.mutable.util import ( FakeStorage, make_nodemaker_with_peers, make_peer, ) from allmydata.test.no_network import GridTestMixin from allmydata.unknown import UnknownNode, strip_prefix_for_ro from allmydata.nodemaker import NodeMaker from base64 import b32decode from cryptography.hazmat.primitives.serialization import load_pem_private_key import allmydata.test.common_util as testutil from hypothesis import given from hypothesis.strategies import text @implementer(IConsumer) class MemAccum(object): def registerProducer(self, producer, streaming): self.producer = producer self.producer.resumeProducing() pass def unregisterProducer(self): pass def write(self, data): assert not hasattr(self, 'data') self.data = data self.producer.resumeProducing() setup_py_uri = b"URI:CHK:n7r3m6wmomelk4sep3kw5cvduq:os7ijw5c3maek7pg65e5254k2fzjflavtpejjyhshpsxuqzhcwwq:3:20:14861" one_uri = b"URI:LIT:n5xgk" # LIT for "one" mut_write_uri = b"URI:SSK:vfvcbdfbszyrsaxchgevhmmlii:euw4iw7bbnkrrwpzuburbhppuxhc3gwxv26f6imekhz7zyw2ojnq" mdmf_write_uri = b"URI:MDMF:x533rhbm6kiehzl5kj3s44n5ie:4gif5rhneyd763ouo5qjrgnsoa3bg43xycy4robj2rf3tvmhdl3a" empty_litdir_uri = b"URI:DIR2-LIT:" tiny_litdir_uri = b"URI:DIR2-LIT:gqytunj2onug64tufqzdcosvkjetutcjkq5gw4tvm5vwszdgnz5hgyzufqydulbshj5x2lbm" # contains one child which is itself also LIT mut_read_uri = b"URI:SSK-RO:jf6wkflosyvntwxqcdo7a54jvm:euw4iw7bbnkrrwpzuburbhppuxhc3gwxv26f6imekhz7zyw2ojnq" mdmf_read_uri = b"URI:MDMF-RO:d4cydxselputycfzkw6qgz4zv4:4gif5rhneyd763ouo5qjrgnsoa3bg43xycy4robj2rf3tvmhdl3a" future_write_uri = b"x-tahoe-crazy://I_am_from_the_future." future_read_uri = b"x-tahoe-crazy-readonly://I_am_from_the_future." future_nonascii_write_uri = u"x-tahoe-even-more-crazy://I_am_from_the_future_rw_\u263A".encode('utf-8') future_nonascii_read_uri = u"x-tahoe-even-more-crazy-readonly://I_am_from_the_future_ro_\u263A".encode('utf-8') # 'o' 'n' 'e-macron' one_nfc = u"on\u0113" one_nfd = u"one\u0304" class Dirnode(GridTestMixin, unittest.TestCase, testutil.ReallyEqualMixin, testutil.ShouldFailMixin, testutil.StallMixin, ErrorMixin): def _do_create_test(self, mdmf=False): c = self.g.clients[0] self.expected_manifest = [] self.expected_verifycaps = set() self.expected_storage_indexes = set() d = None if mdmf: d = c.create_dirnode(version=MDMF_VERSION) else: d = c.create_dirnode() def _then(n): # / self.rootnode = n backing_node = n._node if mdmf: self.failUnlessEqual(backing_node.get_version(), MDMF_VERSION) else: self.failUnlessEqual(backing_node.get_version(), SDMF_VERSION) self.failUnless(n.is_mutable()) u = n.get_uri() self.failUnless(u) cap_formats = [] if mdmf: cap_formats = [b"URI:DIR2-MDMF:", b"URI:DIR2-MDMF-RO:", b"URI:DIR2-MDMF-Verifier:"] else: cap_formats = [b"URI:DIR2:", b"URI:DIR2-RO", b"URI:DIR2-Verifier:"] rw, ro, v = cap_formats self.failUnless(u.startswith(rw), u) u_ro = n.get_readonly_uri() self.failUnless(u_ro.startswith(ro), u_ro) u_v = n.get_verify_cap().to_string() self.failUnless(u_v.startswith(v), u_v) u_r = n.get_repair_cap().to_string() self.failUnlessReallyEqual(u_r, u) self.expected_manifest.append( ((), u) ) self.expected_verifycaps.add(u_v) si = n.get_storage_index() self.expected_storage_indexes.add(base32.b2a(si)) expected_si = n._uri.get_storage_index() self.failUnlessReallyEqual(si, expected_si) d = n.list() d.addCallback(lambda res: self.failUnlessEqual(res, {})) d.addCallback(lambda res: n.has_child(u"missing")) d.addCallback(lambda res: self.failIf(res)) fake_file_uri = make_mutable_file_uri() other_file_uri = make_mutable_file_uri() m = c.nodemaker.create_from_cap(fake_file_uri) ffu_v = m.get_verify_cap().to_string() self.expected_manifest.append( ((u"child",) , m.get_uri()) ) self.expected_verifycaps.add(ffu_v) self.expected_storage_indexes.add(base32.b2a(m.get_storage_index())) d.addCallback(lambda res: n.set_uri(u"child", fake_file_uri, fake_file_uri)) d.addCallback(lambda res: self.shouldFail(ExistingChildError, "set_uri-no", "child 'child' already exists", n.set_uri, u"child", other_file_uri, other_file_uri, overwrite=False)) # / # /child = mutable d.addCallback(lambda res: n.create_subdirectory(u"subdir")) # / # /child = mutable # /subdir = directory def _created(subdir): self.failUnless(isinstance(subdir, dirnode.DirectoryNode)) self.subdir = subdir new_v = subdir.get_verify_cap().to_string() assert isinstance(new_v, bytes) self.expected_manifest.append( ((u"subdir",), subdir.get_uri()) ) self.expected_verifycaps.add(new_v) si = subdir.get_storage_index() self.expected_storage_indexes.add(base32.b2a(si)) d.addCallback(_created) d.addCallback(lambda res: self.shouldFail(ExistingChildError, "mkdir-no", "child 'subdir' already exists", n.create_subdirectory, u"subdir", overwrite=False)) d.addCallback(lambda res: n.list()) d.addCallback(lambda children: self.failUnlessReallyEqual(set(children.keys()), set([u"child", u"subdir"]))) d.addCallback(lambda res: n.start_deep_stats().when_done()) def _check_deepstats(stats): self.failUnless(isinstance(stats, dict)) expected = {"count-immutable-files": 0, "count-mutable-files": 1, "count-literal-files": 0, "count-files": 1, "count-directories": 2, "size-immutable-files": 0, "size-literal-files": 0, #"size-directories": 616, # varies #"largest-directory": 616, "largest-directory-children": 2, "largest-immutable-file": 0, } for k,v in expected.items(): self.failUnlessReallyEqual(stats[k], v, "stats[%s] was %s, not %s" % (k, stats[k], v)) self.failUnless(stats["size-directories"] > 500, stats["size-directories"]) self.failUnless(stats["largest-directory"] > 500, stats["largest-directory"]) self.failUnlessReallyEqual(stats["size-files-histogram"], []) d.addCallback(_check_deepstats) d.addCallback(lambda res: n.build_manifest().when_done()) def _check_manifest(res): manifest = res["manifest"] self.failUnlessReallyEqual(sorted(manifest), sorted(self.expected_manifest)) stats = res["stats"] _check_deepstats(stats) self.failUnlessReallyEqual(self.expected_verifycaps, res["verifycaps"]) self.failUnlessReallyEqual(self.expected_storage_indexes, res["storage-index"]) d.addCallback(_check_manifest) def _add_subsubdir(res): return self.subdir.create_subdirectory(u"subsubdir") d.addCallback(_add_subsubdir) # / # /child = mutable # /subdir = directory # /subdir/subsubdir = directory d.addCallback(lambda res: n.get_child_at_path(u"subdir/subsubdir")) d.addCallback(lambda subsubdir: self.failUnless(isinstance(subsubdir, dirnode.DirectoryNode))) d.addCallback(lambda res: n.get_child_at_path(u"")) d.addCallback(lambda res: self.failUnlessReallyEqual(res.get_uri(), n.get_uri())) d.addCallback(lambda res: n.get_metadata_for(u"child")) d.addCallback(lambda metadata: self.failUnlessEqual(set(metadata.keys()), set(["tahoe"]))) d.addCallback(lambda res: self.shouldFail(NoSuchChildError, "gcamap-no", "nope", n.get_child_and_metadata_at_path, u"subdir/nope")) d.addCallback(lambda res: n.get_child_and_metadata_at_path(u"")) def _check_child_and_metadata1(res): child, metadata = res self.failUnless(isinstance(child, dirnode.DirectoryNode)) # edge-metadata needs at least one path segment self.failUnlessEqual(set(metadata.keys()), set([])) d.addCallback(_check_child_and_metadata1) d.addCallback(lambda res: n.get_child_and_metadata_at_path(u"child")) def _check_child_and_metadata2(res): child, metadata = res self.failUnlessReallyEqual(child.get_uri(), fake_file_uri) self.failUnlessEqual(set(metadata.keys()), set(["tahoe"])) d.addCallback(_check_child_and_metadata2) d.addCallback(lambda res: n.get_child_and_metadata_at_path(u"subdir/subsubdir")) def _check_child_and_metadata3(res): child, metadata = res self.failUnless(isinstance(child, dirnode.DirectoryNode)) self.failUnlessEqual(set(metadata.keys()), set(["tahoe"])) d.addCallback(_check_child_and_metadata3) # set_uri + metadata # it should be possible to add a child without any metadata d.addCallback(lambda res: n.set_uri(u"c2", fake_file_uri, fake_file_uri, {})) d.addCallback(lambda res: n.get_metadata_for(u"c2")) d.addCallback(lambda metadata: self.failUnlessEqual(set(metadata.keys()), set(["tahoe"]))) # You can't override the link timestamps. d.addCallback(lambda res: n.set_uri(u"c2", fake_file_uri, fake_file_uri, { 'tahoe': {'linkcrtime': "bogus"}})) d.addCallback(lambda res: n.get_metadata_for(u"c2")) def _has_good_linkcrtime(metadata): self.failUnless('tahoe' in metadata) self.failUnless('linkcrtime' in metadata['tahoe']) self.failIfEqual(metadata['tahoe']['linkcrtime'], 'bogus') d.addCallback(_has_good_linkcrtime) # if we don't set any defaults, the child should get timestamps d.addCallback(lambda res: n.set_uri(u"c3", fake_file_uri, fake_file_uri)) d.addCallback(lambda res: n.get_metadata_for(u"c3")) d.addCallback(lambda metadata: self.failUnlessEqual(set(metadata.keys()), set(["tahoe"]))) # we can also add specific metadata at set_uri() time d.addCallback(lambda res: n.set_uri(u"c4", fake_file_uri, fake_file_uri, {"key": "value"})) d.addCallback(lambda res: n.get_metadata_for(u"c4")) d.addCallback(lambda metadata: self.failUnless((set(metadata.keys()) == set(["key", "tahoe"])) and (metadata['key'] == "value"), metadata)) d.addCallback(lambda res: n.delete(u"c2")) d.addCallback(lambda res: n.delete(u"c3")) d.addCallback(lambda res: n.delete(u"c4")) # set_node + metadata # it should be possible to add a child without any metadata except for timestamps d.addCallback(lambda res: n.set_node(u"d2", n, {})) d.addCallback(lambda res: c.create_dirnode()) d.addCallback(lambda n2: self.shouldFail(ExistingChildError, "set_node-no", "child 'd2' already exists", n.set_node, u"d2", n2, overwrite=False)) d.addCallback(lambda res: n.get_metadata_for(u"d2")) d.addCallback(lambda metadata: self.failUnlessEqual(set(metadata.keys()), set(["tahoe"]))) # if we don't set any defaults, the child should get timestamps d.addCallback(lambda res: n.set_node(u"d3", n)) d.addCallback(lambda res: n.get_metadata_for(u"d3")) d.addCallback(lambda metadata: self.failUnlessEqual(set(metadata.keys()), set(["tahoe"]))) # we can also add specific metadata at set_node() time d.addCallback(lambda res: n.set_node(u"d4", n, {"key": "value"})) d.addCallback(lambda res: n.get_metadata_for(u"d4")) d.addCallback(lambda metadata: self.failUnless((set(metadata.keys()) == set(["key", "tahoe"])) and (metadata["key"] == "value"), metadata)) d.addCallback(lambda res: n.delete(u"d2")) d.addCallback(lambda res: n.delete(u"d3")) d.addCallback(lambda res: n.delete(u"d4")) # metadata through set_children() d.addCallback(lambda res: n.set_children({ u"e1": (fake_file_uri, fake_file_uri), u"e2": (fake_file_uri, fake_file_uri, {}), u"e3": (fake_file_uri, fake_file_uri, {"key": "value"}), })) d.addCallback(lambda n2: self.failUnlessIdentical(n2, n)) d.addCallback(lambda res: self.shouldFail(ExistingChildError, "set_children-no", "child 'e1' already exists", n.set_children, { u"e1": (other_file_uri, other_file_uri), u"new": (other_file_uri, other_file_uri), }, overwrite=False)) # and 'new' should not have been created d.addCallback(lambda res: n.list()) d.addCallback(lambda children: self.failIf(u"new" in children)) d.addCallback(lambda res: n.get_metadata_for(u"e1")) d.addCallback(lambda metadata: self.failUnlessEqual(set(metadata.keys()), set(["tahoe"]))) d.addCallback(lambda res: n.get_metadata_for(u"e2")) d.addCallback(lambda metadata: self.failUnlessEqual(set(metadata.keys()), set(["tahoe"]))) d.addCallback(lambda res: n.get_metadata_for(u"e3")) d.addCallback(lambda metadata: self.failUnless((set(metadata.keys()) == set(["key", "tahoe"])) and (metadata["key"] == "value"), metadata)) d.addCallback(lambda res: n.delete(u"e1")) d.addCallback(lambda res: n.delete(u"e2")) d.addCallback(lambda res: n.delete(u"e3")) # metadata through set_nodes() d.addCallback(lambda res: n.set_nodes({ u"f1": (n, None), u"f2": (n, {}), u"f3": (n, {"key": "value"}), })) d.addCallback(lambda n2: self.failUnlessIdentical(n2, n)) d.addCallback(lambda res: self.shouldFail(ExistingChildError, "set_nodes-no", "child 'f1' already exists", n.set_nodes, { u"f1": (n, None), u"new": (n, None), }, overwrite=False)) # and 'new' should not have been created d.addCallback(lambda res: n.list()) d.addCallback(lambda children: self.failIf(u"new" in children)) d.addCallback(lambda res: n.get_metadata_for(u"f1")) d.addCallback(lambda metadata: self.failUnlessEqual(set(metadata.keys()), set(["tahoe"]))) d.addCallback(lambda res: n.get_metadata_for(u"f2")) d.addCallback(lambda metadata: self.failUnlessEqual(set(metadata.keys()), set(["tahoe"]))) d.addCallback(lambda res: n.get_metadata_for(u"f3")) d.addCallback(lambda metadata: self.failUnless((set(metadata.keys()) == set(["key", "tahoe"])) and (metadata["key"] == "value"), metadata)) d.addCallback(lambda res: n.delete(u"f1")) d.addCallback(lambda res: n.delete(u"f2")) d.addCallback(lambda res: n.delete(u"f3")) d.addCallback(lambda res: n.set_metadata_for(u"child", {"tags": ["web2.0-compatible"], "tahoe": {"bad": "mojo"}})) d.addCallback(lambda n1: n1.get_metadata_for(u"child")) d.addCallback(lambda metadata: self.failUnless((set(metadata.keys()) == set(["tags", "tahoe"])) and metadata["tags"] == ["web2.0-compatible"] and "bad" not in metadata["tahoe"], metadata)) d.addCallback(lambda res: self.shouldFail(NoSuchChildError, "set_metadata_for-nosuch", "", n.set_metadata_for, u"nosuch", {})) def _start(res): self._start_timestamp = time.time() d.addCallback(_start) # a long time ago, we used simplejson-1.7.1 (as shipped on Ubuntu # 'gutsy'), which had a bug/misbehavior in which it would round # all floats to hundredeths (it used str(num) instead of # repr(num)). To prevent this bug from causing the test to fail, # we stall for more than a few hundrededths of a second here. # simplejson-1.7.3 does not have this bug, and anyways we've # moved on to stdlib "json" which doesn't have it either. d.addCallback(self.stall, 0.1) d.addCallback(lambda res: n.add_file(u"timestamps", upload.Data(b"stamp me", convergence=b"some convergence string"))) d.addCallback(self.stall, 0.1) def _stop(res): self._stop_timestamp = time.time() d.addCallback(_stop) d.addCallback(lambda res: n.get_metadata_for(u"timestamps")) def _check_timestamp1(metadata): self.failUnlessEqual(set(metadata.keys()), set(["tahoe"])) tahoe_md = metadata["tahoe"] self.failUnlessEqual(set(tahoe_md.keys()), set(["linkcrtime", "linkmotime"])) self.failUnlessGreaterOrEqualThan(tahoe_md["linkcrtime"], self._start_timestamp) self.failUnlessGreaterOrEqualThan(self._stop_timestamp, tahoe_md["linkcrtime"]) self.failUnlessGreaterOrEqualThan(tahoe_md["linkmotime"], self._start_timestamp) self.failUnlessGreaterOrEqualThan(self._stop_timestamp, tahoe_md["linkmotime"]) # Our current timestamp rules say that replacing an existing # child should preserve the 'linkcrtime' but update the # 'linkmotime' self._old_linkcrtime = tahoe_md["linkcrtime"] self._old_linkmotime = tahoe_md["linkmotime"] d.addCallback(_check_timestamp1) d.addCallback(self.stall, 2.0) # accomodate low-res timestamps d.addCallback(lambda res: n.set_node(u"timestamps", n)) d.addCallback(lambda res: n.get_metadata_for(u"timestamps")) def _check_timestamp2(metadata): self.failUnlessIn("tahoe", metadata) tahoe_md = metadata["tahoe"] self.failUnlessEqual(set(tahoe_md.keys()), set(["linkcrtime", "linkmotime"])) self.failUnlessReallyEqual(tahoe_md["linkcrtime"], self._old_linkcrtime) self.failUnlessGreaterThan(tahoe_md["linkmotime"], self._old_linkmotime) return n.delete(u"timestamps") d.addCallback(_check_timestamp2) d.addCallback(lambda res: n.delete(u"subdir")) d.addCallback(lambda old_child: self.failUnlessReallyEqual(old_child.get_uri(), self.subdir.get_uri())) d.addCallback(lambda res: n.list()) d.addCallback(lambda children: self.failUnlessReallyEqual(set(children.keys()), set([u"child"]))) uploadable1 = upload.Data(b"some data", convergence=b"converge") d.addCallback(lambda res: n.add_file(u"newfile", uploadable1)) d.addCallback(lambda newnode: self.failUnless(IImmutableFileNode.providedBy(newnode))) uploadable2 = upload.Data(b"some data", convergence=b"stuff") d.addCallback(lambda res: self.shouldFail(ExistingChildError, "add_file-no", "child 'newfile' already exists", n.add_file, u"newfile", uploadable2, overwrite=False)) d.addCallback(lambda res: n.list()) d.addCallback(lambda children: self.failUnlessReallyEqual(set(children.keys()), set([u"child", u"newfile"]))) d.addCallback(lambda res: n.get_metadata_for(u"newfile")) d.addCallback(lambda metadata: self.failUnlessEqual(set(metadata.keys()), set(["tahoe"]))) uploadable3 = upload.Data(b"some data", convergence=b"converge") d.addCallback(lambda res: n.add_file(u"newfile-metadata", uploadable3, {"key": "value"})) d.addCallback(lambda newnode: self.failUnless(IImmutableFileNode.providedBy(newnode))) d.addCallback(lambda res: n.get_metadata_for(u"newfile-metadata")) d.addCallback(lambda metadata: self.failUnless((set(metadata.keys()) == set(["key", "tahoe"])) and (metadata['key'] == "value"), metadata)) d.addCallback(lambda res: n.delete(u"newfile-metadata")) d.addCallback(lambda res: n.create_subdirectory(u"subdir2")) def _created2(subdir2): self.subdir2 = subdir2 # put something in the way, to make sure it gets overwritten return subdir2.add_file(u"child", upload.Data(b"overwrite me", b"converge")) d.addCallback(_created2) d.addCallback(lambda res: n.move_child_to(u"child", self.subdir2)) d.addCallback(lambda res: n.list()) d.addCallback(lambda children: self.failUnlessReallyEqual(set(children.keys()), set([u"newfile", u"subdir2"]))) d.addCallback(lambda res: self.subdir2.list()) d.addCallback(lambda children: self.failUnlessReallyEqual(set(children.keys()), set([u"child"]))) d.addCallback(lambda res: self.subdir2.get(u"child")) d.addCallback(lambda child: self.failUnlessReallyEqual(child.get_uri(), fake_file_uri)) # move it back, using new_child_name= d.addCallback(lambda res: self.subdir2.move_child_to(u"child", n, u"newchild")) d.addCallback(lambda res: n.list()) d.addCallback(lambda children: self.failUnlessReallyEqual(set(children.keys()), set([u"newchild", u"newfile", u"subdir2"]))) d.addCallback(lambda res: self.subdir2.list()) d.addCallback(lambda children: self.failUnlessReallyEqual(set(children.keys()), set([]))) # now make sure that we honor overwrite=False d.addCallback(lambda res: self.subdir2.set_uri(u"newchild", other_file_uri, other_file_uri)) d.addCallback(lambda res: self.shouldFail(ExistingChildError, "move_child_to-no", "child 'newchild' already exists", n.move_child_to, u"newchild", self.subdir2, overwrite=False)) d.addCallback(lambda res: self.subdir2.get(u"newchild")) d.addCallback(lambda child: self.failUnlessReallyEqual(child.get_uri(), other_file_uri)) # Setting the no-write field should diminish a mutable cap to read-only # (for both files and directories). d.addCallback(lambda ign: n.set_uri(u"mutable", other_file_uri, other_file_uri)) d.addCallback(lambda ign: n.get(u"mutable")) d.addCallback(lambda mutable: self.failIf(mutable.is_readonly(), mutable)) d.addCallback(lambda ign: n.set_metadata_for(u"mutable", {"no-write": True})) d.addCallback(lambda ign: n.get(u"mutable")) d.addCallback(lambda mutable: self.failUnless(mutable.is_readonly(), mutable)) d.addCallback(lambda ign: n.set_metadata_for(u"mutable", {"no-write": True})) d.addCallback(lambda ign: n.get(u"mutable")) d.addCallback(lambda mutable: self.failUnless(mutable.is_readonly(), mutable)) d.addCallback(lambda ign: n.get(u"subdir2")) d.addCallback(lambda subdir2: self.failIf(subdir2.is_readonly())) d.addCallback(lambda ign: n.set_metadata_for(u"subdir2", {"no-write": True})) d.addCallback(lambda ign: n.get(u"subdir2")) d.addCallback(lambda subdir2: self.failUnless(subdir2.is_readonly(), subdir2)) d.addCallback(lambda ign: n.set_uri(u"mutable_ro", other_file_uri, other_file_uri, metadata={"no-write": True})) d.addCallback(lambda ign: n.get(u"mutable_ro")) d.addCallback(lambda mutable_ro: self.failUnless(mutable_ro.is_readonly(), mutable_ro)) d.addCallback(lambda ign: n.create_subdirectory(u"subdir_ro", metadata={"no-write": True})) d.addCallback(lambda ign: n.get(u"subdir_ro")) d.addCallback(lambda subdir_ro: self.failUnless(subdir_ro.is_readonly(), subdir_ro)) return d d.addCallback(_then) d.addErrback(self.explain_error) return d def _do_initial_children_test(self, mdmf=False): c = self.g.clients[0] nm = c.nodemaker kids = {one_nfd: (nm.create_from_cap(one_uri), {}), u"two": (nm.create_from_cap(setup_py_uri), {"metakey": "metavalue"}), u"mut": (nm.create_from_cap(mut_write_uri, mut_read_uri), {}), u"mdmf": (nm.create_from_cap(mdmf_write_uri, mdmf_read_uri), {}), u"fut": (nm.create_from_cap(future_write_uri, future_read_uri), {}), u"fro": (nm.create_from_cap(None, future_read_uri), {}), u"fut-unic": (nm.create_from_cap(future_nonascii_write_uri, future_nonascii_read_uri), {}), u"fro-unic": (nm.create_from_cap(None, future_nonascii_read_uri), {}), u"empty_litdir": (nm.create_from_cap(empty_litdir_uri), {}), u"tiny_litdir": (nm.create_from_cap(tiny_litdir_uri), {}), } if mdmf: d = c.create_dirnode(kids, version=MDMF_VERSION) else: d = c.create_dirnode(kids) def _created(dn): self.failUnless(isinstance(dn, dirnode.DirectoryNode)) backing_node = dn._node if mdmf: self.failUnlessEqual(backing_node.get_version(), MDMF_VERSION) else: self.failUnlessEqual(backing_node.get_version(), SDMF_VERSION) self.failUnless(dn.is_mutable()) self.failIf(dn.is_readonly()) self.failIf(dn.is_unknown()) self.failIf(dn.is_allowed_in_immutable_directory()) dn.raise_error() rep = str(dn) self.failUnless("RW-MUT" in rep) return dn.list() d.addCallback(_created) def _check_kids(children): self.failUnlessReallyEqual(set(children.keys()), set([one_nfc, u"two", u"mut", u"mdmf", u"fut", u"fro", u"fut-unic", u"fro-unic", u"empty_litdir", u"tiny_litdir"])) one_node, one_metadata = children[one_nfc] two_node, two_metadata = children[u"two"] mut_node, mut_metadata = children[u"mut"] mdmf_node, mdmf_metadata = children[u"mdmf"] fut_node, fut_metadata = children[u"fut"] fro_node, fro_metadata = children[u"fro"] futna_node, futna_metadata = children[u"fut-unic"] frona_node, frona_metadata = children[u"fro-unic"] emptylit_node, emptylit_metadata = children[u"empty_litdir"] tinylit_node, tinylit_metadata = children[u"tiny_litdir"] self.failUnlessReallyEqual(one_node.get_size(), 3) self.failUnlessReallyEqual(one_node.get_uri(), one_uri) self.failUnlessReallyEqual(one_node.get_readonly_uri(), one_uri) self.failUnless(isinstance(one_metadata, dict), one_metadata) self.failUnlessReallyEqual(two_node.get_size(), 14861) self.failUnlessReallyEqual(two_node.get_uri(), setup_py_uri) self.failUnlessReallyEqual(two_node.get_readonly_uri(), setup_py_uri) self.failUnlessEqual(two_metadata["metakey"], "metavalue") self.failUnlessReallyEqual(mut_node.get_uri(), mut_write_uri) self.failUnlessReallyEqual(mut_node.get_readonly_uri(), mut_read_uri) self.failUnless(isinstance(mut_metadata, dict), mut_metadata) self.failUnlessReallyEqual(mdmf_node.get_uri(), mdmf_write_uri) self.failUnlessReallyEqual(mdmf_node.get_readonly_uri(), mdmf_read_uri) self.failUnless(isinstance(mdmf_metadata, dict), mdmf_metadata) self.failUnless(fut_node.is_unknown()) self.failUnlessReallyEqual(fut_node.get_uri(), future_write_uri) self.failUnlessReallyEqual(fut_node.get_readonly_uri(), b"ro." + future_read_uri) self.failUnless(isinstance(fut_metadata, dict), fut_metadata) self.failUnless(futna_node.is_unknown()) self.failUnlessReallyEqual(futna_node.get_uri(), future_nonascii_write_uri) self.failUnlessReallyEqual(futna_node.get_readonly_uri(), b"ro." + future_nonascii_read_uri) self.failUnless(isinstance(futna_metadata, dict), futna_metadata) self.failUnless(fro_node.is_unknown()) self.failUnlessReallyEqual(fro_node.get_uri(), b"ro." + future_read_uri) self.failUnlessReallyEqual(fut_node.get_readonly_uri(), b"ro." + future_read_uri) self.failUnless(isinstance(fro_metadata, dict), fro_metadata) self.failUnless(frona_node.is_unknown()) self.failUnlessReallyEqual(frona_node.get_uri(), b"ro." + future_nonascii_read_uri) self.failUnlessReallyEqual(futna_node.get_readonly_uri(), b"ro." + future_nonascii_read_uri) self.failUnless(isinstance(frona_metadata, dict), frona_metadata) self.failIf(emptylit_node.is_unknown()) self.failUnlessReallyEqual(emptylit_node.get_storage_index(), None) self.failIf(tinylit_node.is_unknown()) self.failUnlessReallyEqual(tinylit_node.get_storage_index(), None) d2 = defer.succeed(None) d2.addCallback(lambda ignored: emptylit_node.list()) d2.addCallback(lambda children: self.failUnlessEqual(children, {})) d2.addCallback(lambda ignored: tinylit_node.list()) d2.addCallback(lambda children: self.failUnlessReallyEqual(set(children.keys()), set([u"short"]))) d2.addCallback(lambda ignored: tinylit_node.list()) d2.addCallback(lambda children: children[u"short"][0].read(MemAccum())) d2.addCallback(lambda accum: self.failUnlessReallyEqual(accum.data, b"The end.")) return d2 d.addCallback(_check_kids) d.addCallback(lambda ign: nm.create_new_mutable_directory(kids)) d.addCallback(lambda dn: dn.list()) d.addCallback(_check_kids) bad_future_node = UnknownNode(future_write_uri, None) bad_kids1 = {one_nfd: (bad_future_node, {})} # This should fail because we don't know how to diminish the future_write_uri # cap (given in a write slot and not prefixed with "ro." or "imm.") to a readcap. d.addCallback(lambda ign: self.shouldFail(MustNotBeUnknownRWError, "bad_kids1", "cannot attach unknown", nm.create_new_mutable_directory, bad_kids1)) bad_kids2 = {one_nfd: (nm.create_from_cap(one_uri), None)} d.addCallback(lambda ign: self.shouldFail(AssertionError, "bad_kids2", "requires metadata to be a dict", nm.create_new_mutable_directory, bad_kids2)) return d def _do_basic_test(self, mdmf=False): c = self.g.clients[0] d = None if mdmf: d = c.create_dirnode(version=MDMF_VERSION) else: d = c.create_dirnode() def _done(res): self.failUnless(isinstance(res, dirnode.DirectoryNode)) self.failUnless(res.is_mutable()) self.failIf(res.is_readonly()) self.failIf(res.is_unknown()) self.failIf(res.is_allowed_in_immutable_directory()) res.raise_error() rep = str(res) self.failUnless("RW-MUT" in rep) d.addCallback(_done) return d def test_basic(self): self.basedir = "dirnode/Dirnode/test_basic" self.set_up_grid(oneshare=True) return self._do_basic_test() def test_basic_mdmf(self): self.basedir = "dirnode/Dirnode/test_basic_mdmf" self.set_up_grid(oneshare=True) return self._do_basic_test(mdmf=True) def test_initial_children(self): self.basedir = "dirnode/Dirnode/test_initial_children" self.set_up_grid(oneshare=True) return self._do_initial_children_test() def test_immutable(self): self.basedir = "dirnode/Dirnode/test_immutable" self.set_up_grid(oneshare=True) c = self.g.clients[0] nm = c.nodemaker kids = {one_nfd: (nm.create_from_cap(one_uri), {}), u"two": (nm.create_from_cap(setup_py_uri), {"metakey": "metavalue"}), u"fut": (nm.create_from_cap(None, future_read_uri), {}), u"futna": (nm.create_from_cap(None, future_nonascii_read_uri), {}), u"empty_litdir": (nm.create_from_cap(empty_litdir_uri), {}), u"tiny_litdir": (nm.create_from_cap(tiny_litdir_uri), {}), } d = c.create_immutable_dirnode(kids) def _created(dn): self.failUnless(isinstance(dn, dirnode.DirectoryNode)) self.failIf(dn.is_mutable()) self.failUnless(dn.is_readonly()) self.failIf(dn.is_unknown()) self.failUnless(dn.is_allowed_in_immutable_directory()) dn.raise_error() rep = str(dn) self.failUnless("RO-IMM" in rep) cap = dn.get_cap() self.failUnlessIn(b"CHK", cap.to_string()) self.cap = cap return dn.list() d.addCallback(_created) def _check_kids(children): self.failUnlessReallyEqual(set(children.keys()), set([one_nfc, u"two", u"fut", u"futna", u"empty_litdir", u"tiny_litdir"])) one_node, one_metadata = children[one_nfc] two_node, two_metadata = children[u"two"] fut_node, fut_metadata = children[u"fut"] futna_node, futna_metadata = children[u"futna"] emptylit_node, emptylit_metadata = children[u"empty_litdir"] tinylit_node, tinylit_metadata = children[u"tiny_litdir"] self.failUnlessReallyEqual(one_node.get_size(), 3) self.failUnlessReallyEqual(one_node.get_uri(), one_uri) self.failUnlessReallyEqual(one_node.get_readonly_uri(), one_uri) self.failUnless(isinstance(one_metadata, dict), one_metadata) self.failUnlessReallyEqual(two_node.get_size(), 14861) self.failUnlessReallyEqual(two_node.get_uri(), setup_py_uri) self.failUnlessReallyEqual(two_node.get_readonly_uri(), setup_py_uri) self.failUnlessEqual(two_metadata["metakey"], "metavalue") self.failUnless(fut_node.is_unknown()) self.failUnlessReallyEqual(fut_node.get_uri(), b"imm." + future_read_uri) self.failUnlessReallyEqual(fut_node.get_readonly_uri(), b"imm." + future_read_uri) self.failUnless(isinstance(fut_metadata, dict), fut_metadata) self.failUnless(futna_node.is_unknown()) self.failUnlessReallyEqual(futna_node.get_uri(), b"imm." + future_nonascii_read_uri) self.failUnlessReallyEqual(futna_node.get_readonly_uri(), b"imm." + future_nonascii_read_uri) self.failUnless(isinstance(futna_metadata, dict), futna_metadata) self.failIf(emptylit_node.is_unknown()) self.failUnlessReallyEqual(emptylit_node.get_storage_index(), None) self.failIf(tinylit_node.is_unknown()) self.failUnlessReallyEqual(tinylit_node.get_storage_index(), None) d2 = defer.succeed(None) d2.addCallback(lambda ignored: emptylit_node.list()) d2.addCallback(lambda children: self.failUnlessEqual(children, {})) d2.addCallback(lambda ignored: tinylit_node.list()) d2.addCallback(lambda children: self.failUnlessReallyEqual(set(children.keys()), set([u"short"]))) d2.addCallback(lambda ignored: tinylit_node.list()) d2.addCallback(lambda children: children[u"short"][0].read(MemAccum())) d2.addCallback(lambda accum: self.failUnlessReallyEqual(accum.data, b"The end.")) return d2 d.addCallback(_check_kids) d.addCallback(lambda ign: nm.create_from_cap(self.cap.to_string())) d.addCallback(lambda dn: dn.list()) d.addCallback(_check_kids) bad_future_node1 = UnknownNode(future_write_uri, None) bad_kids1 = {one_nfd: (bad_future_node1, {})} d.addCallback(lambda ign: self.shouldFail(MustNotBeUnknownRWError, "bad_kids1", "cannot attach unknown", c.create_immutable_dirnode, bad_kids1)) bad_future_node2 = UnknownNode(future_write_uri, future_read_uri) bad_kids2 = {one_nfd: (bad_future_node2, {})} d.addCallback(lambda ign: self.shouldFail(MustBeDeepImmutableError, "bad_kids2", "is not allowed in an immutable directory", c.create_immutable_dirnode, bad_kids2)) bad_kids3 = {one_nfd: (nm.create_from_cap(one_uri), None)} d.addCallback(lambda ign: self.shouldFail(AssertionError, "bad_kids3", "requires metadata to be a dict", c.create_immutable_dirnode, bad_kids3)) bad_kids4 = {one_nfd: (nm.create_from_cap(mut_write_uri), {})} d.addCallback(lambda ign: self.shouldFail(MustBeDeepImmutableError, "bad_kids4", "is not allowed in an immutable directory", c.create_immutable_dirnode, bad_kids4)) bad_kids5 = {one_nfd: (nm.create_from_cap(mut_read_uri), {})} d.addCallback(lambda ign: self.shouldFail(MustBeDeepImmutableError, "bad_kids5", "is not allowed in an immutable directory", c.create_immutable_dirnode, bad_kids5)) bad_kids6 = {one_nfd: (nm.create_from_cap(mdmf_write_uri), {})} d.addCallback(lambda ign: self.shouldFail(MustBeDeepImmutableError, "bad_kids6", "is not allowed in an immutable directory", c.create_immutable_dirnode, bad_kids6)) bad_kids7 = {one_nfd: (nm.create_from_cap(mdmf_read_uri), {})} d.addCallback(lambda ign: self.shouldFail(MustBeDeepImmutableError, "bad_kids7", "is not allowed in an immutable directory", c.create_immutable_dirnode, bad_kids7)) d.addCallback(lambda ign: c.create_immutable_dirnode({})) def _created_empty(dn): self.failUnless(isinstance(dn, dirnode.DirectoryNode)) self.failIf(dn.is_mutable()) self.failUnless(dn.is_readonly()) self.failIf(dn.is_unknown()) self.failUnless(dn.is_allowed_in_immutable_directory()) dn.raise_error() rep = str(dn) self.failUnless("RO-IMM" in rep) cap = dn.get_cap() self.failUnlessIn(b"LIT", cap.to_string()) self.failUnlessReallyEqual(cap.to_string(), b"URI:DIR2-LIT:") self.cap = cap return dn.list() d.addCallback(_created_empty) d.addCallback(lambda kids: self.failUnlessEqual(kids, {})) smallkids = {u"o": (nm.create_from_cap(one_uri), {})} d.addCallback(lambda ign: c.create_immutable_dirnode(smallkids)) def _created_small(dn): self.failUnless(isinstance(dn, dirnode.DirectoryNode)) self.failIf(dn.is_mutable()) self.failUnless(dn.is_readonly()) self.failIf(dn.is_unknown()) self.failUnless(dn.is_allowed_in_immutable_directory()) dn.raise_error() rep = str(dn) self.failUnless("RO-IMM" in rep) cap = dn.get_cap() self.failUnlessIn(b"LIT", cap.to_string()) self.failUnlessReallyEqual(cap.to_string(), b"URI:DIR2-LIT:gi4tumj2n4wdcmz2kvjesosmjfkdu3rvpbtwwlbqhiwdeot3puwcy") self.cap = cap return dn.list() d.addCallback(_created_small) d.addCallback(lambda kids: self.failUnlessReallyEqual(list(kids.keys()), [u"o"])) # now test n.create_subdirectory(mutable=False) d.addCallback(lambda ign: c.create_dirnode()) def _made_parent(n): d = n.create_subdirectory(u"subdir", kids, mutable=False) d.addCallback(lambda sd: sd.list()) d.addCallback(_check_kids) d.addCallback(lambda ign: n.list()) d.addCallback(lambda children: self.failUnlessReallyEqual(list(children.keys()), [u"subdir"])) d.addCallback(lambda ign: n.get(u"subdir")) d.addCallback(lambda sd: sd.list()) d.addCallback(_check_kids) d.addCallback(lambda ign: n.get(u"subdir")) d.addCallback(lambda sd: self.failIf(sd.is_mutable())) bad_kids8 = {one_nfd: (nm.create_from_cap(mut_write_uri), {})} d.addCallback(lambda ign: self.shouldFail(MustBeDeepImmutableError, "bad_kids8", "is not allowed in an immutable directory", n.create_subdirectory, u"sub2", bad_kids8, mutable=False)) bad_kids9 = {one_nfd: (nm.create_from_cap(mdmf_write_uri), {})} d.addCallback(lambda ign: self.shouldFail(MustBeDeepImmutableError, "bad_kids9", "is not allowed in an immutable directory", n.create_subdirectory, u"sub2", bad_kids9, mutable=False)) return d d.addCallback(_made_parent) return d def test_directory_representation(self): self.basedir = "dirnode/Dirnode/test_directory_representation" self.set_up_grid(oneshare=True) c = self.g.clients[0] nm = c.nodemaker # This test checks that any trailing spaces in URIs are retained in the # encoded directory, but stripped when we get them out of the directory. # See ticket #925 for why we want that. # It also tests that we store child names as UTF-8 NFC, and normalize # them again when retrieving them. stripped_write_uri = b"lafs://from_the_future\t" stripped_read_uri = b"lafs://readonly_from_the_future\t" spacedout_write_uri = stripped_write_uri + b" " spacedout_read_uri = stripped_read_uri + b" " child = nm.create_from_cap(spacedout_write_uri, spacedout_read_uri) self.failUnlessReallyEqual(child.get_write_uri(), spacedout_write_uri) self.failUnlessReallyEqual(child.get_readonly_uri(), b"ro." + spacedout_read_uri) child_dottedi = u"ch\u0131\u0307ld" kids_in = {child_dottedi: (child, {}), one_nfd: (child, {})} kids_out = {child_dottedi: (child, {}), one_nfc: (child, {})} kids_norm = {u"child": (child, {}), one_nfc: (child, {})} d = c.create_dirnode(kids_in) def _created(dn): self.failUnless(isinstance(dn, dirnode.DirectoryNode)) self.failUnless(dn.is_mutable()) self.failIf(dn.is_readonly()) dn.raise_error() self.cap = dn.get_cap() self.rootnode = dn return dn._node.download_best_version() d.addCallback(_created) def _check_data(data): # Decode the netstring representation of the directory to check that the # spaces are retained when the URIs are stored, and that the names are stored # as NFC. position = 0 numkids = 0 while position < len(data): entries, position = split_netstring(data, 1, position) entry = entries[0] (name_utf8, ro_uri, rwcapdata, metadata_s), subpos = split_netstring(entry, 4) name = name_utf8.decode("utf-8") rw_uri = self.rootnode._decrypt_rwcapdata(rwcapdata) self.failUnlessIn(name, kids_out) (expected_child, ign) = kids_out[name] self.failUnlessReallyEqual(rw_uri, expected_child.get_write_uri()) self.failUnlessReallyEqual(b"ro." + ro_uri, expected_child.get_readonly_uri()) numkids += 1 self.failUnlessReallyEqual(numkids, len(kids_out)) return self.rootnode d.addCallback(_check_data) # Mock up a hypothetical future version of Unicode that adds a canonical equivalence # between dotless-i + dot-above, and 'i'. That would actually be prohibited by the # stability rules, but similar additions involving currently-unassigned characters # would not be. old_normalize = unicodedata.normalize def future_normalize(form, s): assert form == 'NFC', form return old_normalize(form, s).replace(u"\u0131\u0307", u"i") def _list(node): unicodedata.normalize = future_normalize d2 = node.list() def _undo_mock(res): unicodedata.normalize = old_normalize return res d2.addBoth(_undo_mock) return d2 d.addCallback(_list) def _check_kids(children): # Now when we use the real directory listing code, the trailing spaces # should have been stripped (and "ro." should have been prepended to the # ro_uri, since it's unknown). Also the dotless-i + dot-above should have been # normalized to 'i'. self.failUnlessReallyEqual(set(children.keys()), set(kids_norm.keys())) child_node, child_metadata = children[u"child"] self.failUnlessReallyEqual(child_node.get_write_uri(), stripped_write_uri) self.failUnlessReallyEqual(child_node.get_readonly_uri(), b"ro." + stripped_read_uri) d.addCallback(_check_kids) d.addCallback(lambda ign: nm.create_from_cap(self.cap.to_string())) d.addCallback(_list) d.addCallback(_check_kids) # again with dirnode recreated from cap return d def test_check(self): self.basedir = "dirnode/Dirnode/test_check" self.set_up_grid(oneshare=True) c = self.g.clients[0] d = c.create_dirnode() d.addCallback(lambda dn: dn.check(Monitor())) def _done(res): self.failUnless(res.is_healthy()) d.addCallback(_done) return d def _test_deepcheck_create(self, version=SDMF_VERSION): # create a small tree with a loop, and some non-directories # root/ # root/subdir/ # root/subdir/file1 # root/subdir/link -> root # root/rodir c = self.g.clients[0] d = c.create_dirnode(version=version) def _created_root(rootnode): self._rootnode = rootnode self.failUnlessEqual(rootnode._node.get_version(), version) return rootnode.create_subdirectory(u"subdir") d.addCallback(_created_root) def _created_subdir(subdir): self._subdir = subdir d = subdir.add_file(u"file1", upload.Data(b"data"*100, None)) d.addCallback(lambda res: subdir.set_node(u"link", self._rootnode)) d.addCallback(lambda res: c.create_dirnode()) d.addCallback(lambda dn: self._rootnode.set_uri(u"rodir", dn.get_uri(), dn.get_readonly_uri())) return d d.addCallback(_created_subdir) def _done(res): return self._rootnode d.addCallback(_done) return d def test_deepcheck(self): self.basedir = "dirnode/Dirnode/test_deepcheck" self.set_up_grid(oneshare=True) d = self._test_deepcheck_create() d.addCallback(lambda rootnode: rootnode.start_deep_check().when_done()) def _check_results(r): self.failUnless(IDeepCheckResults.providedBy(r)) c = r.get_counters() self.failUnlessReallyEqual(c, {"count-objects-checked": 4, "count-objects-healthy": 4, "count-objects-unhealthy": 0, "count-objects-unrecoverable": 0, "count-corrupt-shares": 0, }) self.failIf(r.get_corrupt_shares()) self.failUnlessReallyEqual(len(r.get_all_results()), 4) d.addCallback(_check_results) return d def test_deepcheck_cachemisses(self): self.basedir = "dirnode/Dirnode/test_mdmf_cachemisses" self.set_up_grid(oneshare=True) d = self._test_deepcheck_create() # Clear the counters and set the rootnode d.addCallback(lambda rootnode: not [ss._clear_counters() for ss in self.g.wrappers_by_id.values()] or rootnode) d.addCallback(lambda rootnode: rootnode.start_deep_check().when_done()) def _check(ign): count = sum([ss.counter_by_methname['slot_readv'] for ss in self.g.wrappers_by_id.values()]) self.failIf(count > 60, 'Expected only 60 cache misses,' 'unfortunately there were %d' % (count,)) d.addCallback(_check) return d def test_deepcheck_mdmf(self): self.basedir = "dirnode/Dirnode/test_deepcheck_mdmf" self.set_up_grid(oneshare=True) d = self._test_deepcheck_create(MDMF_VERSION) d.addCallback(lambda rootnode: rootnode.start_deep_check().when_done()) def _check_results(r): self.failUnless(IDeepCheckResults.providedBy(r)) c = r.get_counters() self.failUnlessReallyEqual(c, {"count-objects-checked": 4, "count-objects-healthy": 4, "count-objects-unhealthy": 0, "count-objects-unrecoverable": 0, "count-corrupt-shares": 0, }) self.failIf(r.get_corrupt_shares()) self.failUnlessReallyEqual(len(r.get_all_results()), 4) d.addCallback(_check_results) return d def test_deepcheck_and_repair(self): self.basedir = "dirnode/Dirnode/test_deepcheck_and_repair" self.set_up_grid(oneshare=True) d = self._test_deepcheck_create() d.addCallback(lambda rootnode: rootnode.start_deep_check_and_repair().when_done()) def _check_results(r): self.failUnless(IDeepCheckAndRepairResults.providedBy(r)) c = r.get_counters() self.failUnlessReallyEqual(c, {"count-objects-checked": 4, "count-objects-healthy-pre-repair": 4, "count-objects-unhealthy-pre-repair": 0, "count-objects-unrecoverable-pre-repair": 0, "count-corrupt-shares-pre-repair": 0, "count-objects-healthy-post-repair": 4, "count-objects-unhealthy-post-repair": 0, "count-objects-unrecoverable-post-repair": 0, "count-corrupt-shares-post-repair": 0, "count-repairs-attempted": 0, "count-repairs-successful": 0, "count-repairs-unsuccessful": 0, }) self.failIf(r.get_corrupt_shares()) self.failIf(r.get_remaining_corrupt_shares()) self.failUnlessReallyEqual(len(r.get_all_results()), 4) d.addCallback(_check_results) return d def test_deepcheck_and_repair_mdmf(self): self.basedir = "dirnode/Dirnode/test_deepcheck_and_repair_mdmf" self.set_up_grid(oneshare=True) d = self._test_deepcheck_create(version=MDMF_VERSION) d.addCallback(lambda rootnode: rootnode.start_deep_check_and_repair().when_done()) def _check_results(r): self.failUnless(IDeepCheckAndRepairResults.providedBy(r)) c = r.get_counters() self.failUnlessReallyEqual(c, {"count-objects-checked": 4, "count-objects-healthy-pre-repair": 4, "count-objects-unhealthy-pre-repair": 0, "count-objects-unrecoverable-pre-repair": 0, "count-corrupt-shares-pre-repair": 0, "count-objects-healthy-post-repair": 4, "count-objects-unhealthy-post-repair": 0, "count-objects-unrecoverable-post-repair": 0, "count-corrupt-shares-post-repair": 0, "count-repairs-attempted": 0, "count-repairs-successful": 0, "count-repairs-unsuccessful": 0, }) self.failIf(r.get_corrupt_shares()) self.failIf(r.get_remaining_corrupt_shares()) self.failUnlessReallyEqual(len(r.get_all_results()), 4) d.addCallback(_check_results) return d def _mark_file_bad(self, rootnode): self.delete_shares_numbered(rootnode.get_uri(), [0]) return rootnode def test_deepcheck_problems(self): self.basedir = "dirnode/Dirnode/test_deepcheck_problems" self.set_up_grid() d = self._test_deepcheck_create() d.addCallback(lambda rootnode: self._mark_file_bad(rootnode)) d.addCallback(lambda rootnode: rootnode.start_deep_check().when_done()) def _check_results(r): c = r.get_counters() self.failUnlessReallyEqual(c, {"count-objects-checked": 4, "count-objects-healthy": 3, "count-objects-unhealthy": 1, "count-objects-unrecoverable": 0, "count-corrupt-shares": 0, }) #self.failUnlessReallyEqual(len(r.get_problems()), 1) # TODO d.addCallback(_check_results) return d def test_deepcheck_problems_mdmf(self): self.basedir = "dirnode/Dirnode/test_deepcheck_problems_mdmf" self.set_up_grid() d = self._test_deepcheck_create(version=MDMF_VERSION) d.addCallback(lambda rootnode: self._mark_file_bad(rootnode)) d.addCallback(lambda rootnode: rootnode.start_deep_check().when_done()) def _check_results(r): c = r.get_counters() self.failUnlessReallyEqual(c, {"count-objects-checked": 4, "count-objects-healthy": 3, "count-objects-unhealthy": 1, "count-objects-unrecoverable": 0, "count-corrupt-shares": 0, }) #self.failUnlessReallyEqual(len(r.get_problems()), 1) # TODO d.addCallback(_check_results) return d def _do_readonly_test(self, version=SDMF_VERSION): c = self.g.clients[0] nm = c.nodemaker filecap = make_chk_file_uri(1234) filenode = nm.create_from_cap(filecap) uploadable = upload.Data(b"some data", convergence=b"some convergence string") d = c.create_dirnode(version=version) def _created(rw_dn): backing_node = rw_dn._node self.failUnlessEqual(backing_node.get_version(), version) d2 = rw_dn.set_uri(u"child", filecap, filecap) d2.addCallback(lambda res: rw_dn) return d2 d.addCallback(_created) def _ready(rw_dn): ro_uri = rw_dn.get_readonly_uri() ro_dn = c.create_node_from_uri(ro_uri) self.failUnless(ro_dn.is_readonly()) self.failUnless(ro_dn.is_mutable()) self.failIf(ro_dn.is_unknown()) self.failIf(ro_dn.is_allowed_in_immutable_directory()) ro_dn.raise_error() self.shouldFail(dirnode.NotWriteableError, "set_uri ro", None, ro_dn.set_uri, u"newchild", filecap, filecap) self.shouldFail(dirnode.NotWriteableError, "set_uri ro", None, ro_dn.set_node, u"newchild", filenode) self.shouldFail(dirnode.NotWriteableError, "set_nodes ro", None, ro_dn.set_nodes, { u"newchild": (filenode, None) }) self.shouldFail(dirnode.NotWriteableError, "set_uri ro", None, ro_dn.add_file, u"newchild", uploadable) self.shouldFail(dirnode.NotWriteableError, "set_uri ro", None, ro_dn.delete, u"child") self.shouldFail(dirnode.NotWriteableError, "set_uri ro", None, ro_dn.create_subdirectory, u"newchild") self.shouldFail(dirnode.NotWriteableError, "set_metadata_for ro", None, ro_dn.set_metadata_for, u"child", {}) self.shouldFail(dirnode.NotWriteableError, "set_uri ro", None, ro_dn.move_child_to, u"child", rw_dn) self.shouldFail(dirnode.NotWriteableError, "set_uri ro", None, rw_dn.move_child_to, u"child", ro_dn) return ro_dn.list() d.addCallback(_ready) def _listed(children): self.failUnless(u"child" in children) d.addCallback(_listed) return d def test_readonly(self): self.basedir = "dirnode/Dirnode/test_readonly" self.set_up_grid(oneshare=True) return self._do_readonly_test() def test_readonly_mdmf(self): self.basedir = "dirnode/Dirnode/test_readonly_mdmf" self.set_up_grid(oneshare=True) return self._do_readonly_test(version=MDMF_VERSION) def failUnlessGreaterThan(self, a, b): self.failUnless(a > b, "%r should be > %r" % (a, b)) def failUnlessGreaterOrEqualThan(self, a, b): self.failUnless(a >= b, "%r should be >= %r" % (a, b)) def test_create(self): self.basedir = "dirnode/Dirnode/test_create" self.set_up_grid(oneshare=True) return self._do_create_test() def test_update_metadata(self): (t1, t2, t3) = (626644800.0, 634745640.0, 892226160.0) md1 = dirnode.update_metadata({"ctime": t1}, {}, t2) self.failUnlessEqual(md1, {"tahoe":{"linkcrtime": t1, "linkmotime": t2}}) md2 = dirnode.update_metadata(md1, {"key": "value", "tahoe": {"bad": "mojo"}}, t3) self.failUnlessEqual(md2, {"key": "value", "tahoe":{"linkcrtime": t1, "linkmotime": t3}}) md3 = dirnode.update_metadata({}, None, t3) self.failUnlessEqual(md3, {"tahoe":{"linkcrtime": t3, "linkmotime": t3}}) md4 = dirnode.update_metadata({}, {"bool": True, "number": 42}, t1) self.failUnlessEqual(md4, {"bool": True, "number": 42, "tahoe":{"linkcrtime": t1, "linkmotime": t1}}) def _do_create_subdirectory_test(self, version=SDMF_VERSION): c = self.g.clients[0] nm = c.nodemaker d = c.create_dirnode(version=version) def _then(n): # / self.rootnode = n fake_file_uri = make_mutable_file_uri() other_file_uri = make_mutable_file_uri() md = {"metakey": "metavalue"} kids = {u"kid1": (nm.create_from_cap(fake_file_uri), {}), u"kid2": (nm.create_from_cap(other_file_uri), md), } d = n.create_subdirectory(u"subdir", kids, mutable_version=version) def _check(sub): d = n.get_child_at_path(u"subdir") d.addCallback(lambda sub2: self.failUnlessReallyEqual(sub2.get_uri(), sub.get_uri())) d.addCallback(lambda ign: sub.list()) return d d.addCallback(_check) def _check_kids(kids2): self.failUnlessEqual(set(kids.keys()), set(kids2.keys())) self.failUnlessEqual(kids2[u"kid2"][1]["metakey"], "metavalue") d.addCallback(_check_kids) return d d.addCallback(_then) return d def test_create_subdirectory(self): self.basedir = "dirnode/Dirnode/test_create_subdirectory" self.set_up_grid(oneshare=True) return self._do_create_subdirectory_test() def test_create_subdirectory_mdmf(self): self.basedir = "dirnode/Dirnode/test_create_subdirectory_mdmf" self.set_up_grid(oneshare=True) return self._do_create_subdirectory_test(version=MDMF_VERSION) def test_create_mdmf(self): self.basedir = "dirnode/Dirnode/test_mdmf" self.set_up_grid(oneshare=True) return self._do_create_test(mdmf=True) def test_mdmf_initial_children(self): self.basedir = "dirnode/Dirnode/test_mdmf" self.set_up_grid(oneshare=True) return self._do_initial_children_test(mdmf=True) class MinimalFakeMutableFile(object): def get_writekey(self): return b"writekey" class Packing(testutil.ReallyEqualMixin, unittest.TestCase): # This is a base32-encoded representation of the directory tree # root/file1 # root/file2 # root/file3 # as represented after being fed to _pack_contents. # We have it here so we can decode it, feed it to # _unpack_contents, and verify that _unpack_contents # works correctly. known_tree = "GM4TOORVHJTGS3DFGEWDSNJ2KVJESOSDJBFTU33MPB2GS3LZNVYG6N3GGI3WU5TIORTXC3DOMJ2G4NB2MVWXUZDONBVTE5LNGRZWK2LYN55GY23XGNYXQMTOMZUWU5TENN4DG23ZG5UTO2L2NQ2DO6LFMRWDMZJWGRQTUMZ2GEYDUMJQFQYTIMZ22XZKZORX5XS7CAQCSK3URR6QOHISHRCMGER5LRFSZRNAS5ZSALCS6TWFQAE754IVOIKJVK73WZPP3VUUEDTX3WHTBBZ5YX3CEKHCPG3ZWQLYA4QM6LDRCF7TJQYWLIZHKGN5ROA3AUZPXESBNLQQ6JTC2DBJU2D47IZJTLR3PKZ4RVF57XLPWY7FX7SZV3T6IJ3ORFW37FXUPGOE3ROPFNUX5DCGMAQJ3PGGULBRGM3TU6ZCMN2GS3LFEI5CAMJSGQ3DMNRTHA4TOLRUGI3TKNRWGEWCAITUMFUG6ZJCHIQHWITMNFXGW3LPORUW2ZJCHIQDCMRUGY3DMMZYHE3S4NBSG42TMNRRFQQCE3DJNZVWG4TUNFWWKIR2EAYTENBWGY3DGOBZG4XDIMRXGU3DMML5FQQCE3LUNFWWKIR2EAYTENBWGY3DGOBZG4XDIMRXGU3DMML5FQWDGOJRHI2TUZTJNRSTELBZGQ5FKUSJHJBUQSZ2MFYGKZ3SOBSWQ43IO52WO23CNAZWU3DUGVSWSNTIOE5DK33POVTW4ZLNMNWDK6DHPA2GS2THNF2W25DEN5VGY2LQNFRGG5DKNNRHO5TZPFTWI6LNMRYGQ2LCGJTHM4J2GM5DCMB2GQWDCNBSHKVVQBGRYMACKJ27CVQ6O6B4QPR72RFVTGOZUI76XUSWAX73JRV5PYRHMIFYZIA25MXDPGUGML6M2NMRSG4YD4W4K37ZDYSXHMJ3IUVT4F64YTQQVBJFFFOUC7J7LAB2VFCL5UKKGMR2D3F4EPOYC7UYWQZNR5KXHBSNXLCNBX2SNF22DCXJIHSMEKWEWOG5XCJEVVZ7UW5IB6I64XXQSJ34B5CAYZGZIIMR6LBRGMZTU6ZCMN2GS3LFEI5CAMJSGQ3DMNRTHA4TOLRUGMYDEMJYFQQCE5DBNBXWKIR2EB5SE3DJNZVW233UNFWWKIR2EAYTENBWGY3DGOBZG4XDIMZQGIYTQLBAEJWGS3TLMNZHI2LNMURDUIBRGI2DMNRWGM4DSNZOGQZTAMRRHB6SYIBCNV2GS3LFEI5CAMJSGQ3DMNRTHA4TOLRUGMYDEMJYPUWCYMZZGU5DKOTGNFWGKMZMHE2DUVKSJE5EGSCLHJRW25DDPBYTO2DXPB3GM6DBNYZTI6LJMV3DM2LWNB4TU4LWMNSWW3LKORXWK5DEMN3TI23NNE3WEM3SORRGY5THPA3TKNBUMNZG453BOF2GSZLXMVWWI3DJOFZW623RHIZTUMJQHI2SYMJUGI5BOSHWDPG3WKPAVXCF3XMKA7QVIWPRMWJHDTQHD27AHDCPJWDQENQ5H5ZZILTXQNIXXCIW4LKQABU2GCFRG5FHQN7CHD7HF4EKNRZFIV2ZYQIBM7IQU7F4RGB3XCX3FREPBKQ7UCICHVWPCYFGA6OLH3J45LXQ6GWWICJ3PGWJNLZ7PCRNLAPNYUGU6BENS7OXMBEOOFRIZV3PF2FFWZ5WHDPKXERYP7GNHKRMGEZTOOT3EJRXI2LNMURDUIBRGI2DMNRWGM4DSNZOGQZTGNRSGY4SYIBCORQWQ33FEI5CA6ZCNRUW423NN52GS3LFEI5CAMJSGQ3DMNRTHA4TOLRUGMZTMMRWHEWCAITMNFXGWY3SORUW2ZJCHIQDCMRUGY3DMMZYHE3S4NBTGM3DENRZPUWCAITNORUW2ZJCHIQDCMRUGY3DMMZYHE3S4NBTGM3DENRZPUWCY===" def test_unpack_and_pack_behavior(self): known_tree = b32decode(self.known_tree) nodemaker = NodeMaker(None, None, None, None, None, {"k": 3, "n": 10}, None, None) write_uri = b"URI:SSK-RO:e3mdrzfwhoq42hy5ubcz6rp3o4:ybyibhnp3vvwuq2vaw2ckjmesgkklfs6ghxleztqidihjyofgw7q" filenode = nodemaker.create_from_cap(write_uri) node = dirnode.DirectoryNode(filenode, nodemaker, None) children = node._unpack_contents(known_tree) self._check_children(children) packed_children = node._pack_contents(children) children = node._unpack_contents(packed_children) self._check_children(children) def _check_children(self, children): # Are all the expected child nodes there? self.failUnless(u'file1' in children) self.failUnless(u'file2' in children) self.failUnless(u'file3' in children) # Are the metadata for child 3 right? file3_rocap = b"URI:CHK:cmtcxq7hwxvfxan34yiev6ivhy:qvcekmjtoetdcw4kmi7b3rtblvgx7544crnwaqtiewemdliqsokq:3:10:5" file3_rwcap = b"URI:CHK:cmtcxq7hwxvfxan34yiev6ivhy:qvcekmjtoetdcw4kmi7b3rtblvgx7544crnwaqtiewemdliqsokq:3:10:5" file3_metadata = {'ctime': 1246663897.4336269, 'tahoe': {'linkmotime': 1246663897.4336269, 'linkcrtime': 1246663897.4336269}, 'mtime': 1246663897.4336269} self.failUnlessEqual(file3_metadata, children[u'file3'][1]) self.failUnlessReallyEqual(file3_rocap, children[u'file3'][0].get_readonly_uri()) self.failUnlessReallyEqual(file3_rwcap, children[u'file3'][0].get_uri()) # Are the metadata for child 2 right? file2_rocap = b"URI:CHK:apegrpehshwugkbh3jlt5ei6hq:5oougnemcl5xgx4ijgiumtdojlipibctjkbwvyygdymdphib2fvq:3:10:4" file2_rwcap = b"URI:CHK:apegrpehshwugkbh3jlt5ei6hq:5oougnemcl5xgx4ijgiumtdojlipibctjkbwvyygdymdphib2fvq:3:10:4" file2_metadata = {'ctime': 1246663897.430218, 'tahoe': {'linkmotime': 1246663897.430218, 'linkcrtime': 1246663897.430218}, 'mtime': 1246663897.430218} self.failUnlessEqual(file2_metadata, children[u'file2'][1]) self.failUnlessReallyEqual(file2_rocap, children[u'file2'][0].get_readonly_uri()) self.failUnlessReallyEqual(file2_rwcap, children[u'file2'][0].get_uri()) # Are the metadata for child 1 right? file1_rocap = b"URI:CHK:olxtimympo7f27jvhtgqlnbtn4:emzdnhk2um4seixozlkw3qx2nfijvdkx3ky7i7izl47yedl6e64a:3:10:10" file1_rwcap = b"URI:CHK:olxtimympo7f27jvhtgqlnbtn4:emzdnhk2um4seixozlkw3qx2nfijvdkx3ky7i7izl47yedl6e64a:3:10:10" file1_metadata = {'ctime': 1246663897.4275661, 'tahoe': {'linkmotime': 1246663897.4275661, 'linkcrtime': 1246663897.4275661}, 'mtime': 1246663897.4275661} self.failUnlessEqual(file1_metadata, children[u'file1'][1]) self.failUnlessReallyEqual(file1_rocap, children[u'file1'][0].get_readonly_uri()) self.failUnlessReallyEqual(file1_rwcap, children[u'file1'][0].get_uri()) def _make_kids(self, nm, which): caps = {"imm": b"URI:CHK:n7r3m6wmomelk4sep3kw5cvduq:os7ijw5c3maek7pg65e5254k2fzjflavtpejjyhshpsxuqzhcwwq:3:20:14861", "lit": b"URI:LIT:n5xgk", # LIT for "one" "write": b"URI:SSK:vfvcbdfbszyrsaxchgevhmmlii:euw4iw7bbnkrrwpzuburbhppuxhc3gwxv26f6imekhz7zyw2ojnq", "read": b"URI:SSK-RO:e3mdrzfwhoq42hy5ubcz6rp3o4:ybyibhnp3vvwuq2vaw2ckjmesgkklfs6ghxleztqidihjyofgw7q", "dirwrite": b"URI:DIR2:n6x24zd3seu725yluj75q5boaa:mm6yoqjhl6ueh7iereldqxue4nene4wl7rqfjfybqrehdqmqskvq", "dirread": b"URI:DIR2-RO:b7sr5qsifnicca7cbk3rhrhbvq:mm6yoqjhl6ueh7iereldqxue4nene4wl7rqfjfybqrehdqmqskvq", } kids = {} for name in which: kids[str(name)] = (nm.create_from_cap(caps[name]), {}) return kids def test_pack_unpack_unknown(self): """ Minimal testing for roundtripping unknown URIs. """ nm = NodeMaker(None, None, None, None, None, {"k": 3, "n": 10}, None, None) fn = MinimalFakeMutableFile() # UnknownNode has massively complex rules about when it's an error. # Just force it not to be an error. unknown_rw = UnknownNode(b"whatevs://write", None) unknown_rw.error = None unknown_ro = UnknownNode(None, b"whatevs://readonly") unknown_ro.error = None kids = { "unknown_rw": (unknown_rw, {}), "unknown_ro": (unknown_ro, {}) } packed = dirnode.pack_children(kids, fn.get_writekey(), deep_immutable=False) write_uri = b"URI:SSK-RO:e3mdrzfwhoq42hy5ubcz6rp3o4:ybyibhnp3vvwuq2vaw2ckjmesgkklfs6ghxleztqidihjyofgw7q" filenode = nm.create_from_cap(write_uri) dn = dirnode.DirectoryNode(filenode, nm, None) unkids = dn._unpack_contents(packed) self.assertEqual(kids, unkids) @given(text(min_size=1, max_size=20)) def test_pack_unpack_unicode_hypothesis(self, name): """ pack -> unpack results in the same objects (with a unicode name) """ nm = NodeMaker(None, None, None, None, None, {"k": 3, "n": 10}, None, None) fn = MinimalFakeMutableFile() # FIXME TODO: we shouldn't have to do this out here, but # Hypothesis found that a name with "\x2000" does not make the # round-trip properly .. so for now we'll only give the packer # normalized names. # See also: # https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2606 # https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1076 name = unicodedata.normalize('NFC', name) kids = { name: (LiteralFileNode(uri.from_string(one_uri)), {}), } packed = dirnode.pack_children(kids, fn.get_writekey(), deep_immutable=False) write_uri = b"URI:SSK-RO:e3mdrzfwhoq42hy5ubcz6rp3o4:ybyibhnp3vvwuq2vaw2ckjmesgkklfs6ghxleztqidihjyofgw7q" filenode = nm.create_from_cap(write_uri) dn = dirnode.DirectoryNode(filenode, nm, None) unkids = dn._unpack_contents(packed) self.assertEqual(kids, unkids) def test_deep_immutable(self): nm = NodeMaker(None, None, None, None, None, {"k": 3, "n": 10}, None, None) fn = MinimalFakeMutableFile() kids = self._make_kids(nm, ["imm", "lit", "write", "read", "dirwrite", "dirread"]) packed = dirnode.pack_children(kids, fn.get_writekey(), deep_immutable=False) self.failUnlessIn(b"lit", packed) kids = self._make_kids(nm, ["imm", "lit"]) packed = dirnode.pack_children(kids, fn.get_writekey(), deep_immutable=True) self.failUnlessIn(b"lit", packed) kids = self._make_kids(nm, ["imm", "lit", "write"]) self.failUnlessRaises(dirnode.MustBeDeepImmutableError, dirnode.pack_children, kids, fn.get_writekey(), deep_immutable=True) # read-only is not enough: all children must be immutable kids = self._make_kids(nm, ["imm", "lit", "read"]) self.failUnlessRaises(dirnode.MustBeDeepImmutableError, dirnode.pack_children, kids, fn.get_writekey(), deep_immutable=True) kids = self._make_kids(nm, ["imm", "lit", "dirwrite"]) self.failUnlessRaises(dirnode.MustBeDeepImmutableError, dirnode.pack_children, kids, fn.get_writekey(), deep_immutable=True) kids = self._make_kids(nm, ["imm", "lit", "dirread"]) self.failUnlessRaises(dirnode.MustBeDeepImmutableError, dirnode.pack_children, kids, fn.get_writekey(), deep_immutable=True) @implementer(IMutableFileNode) class FakeMutableFile(object): # type: ignore # incomplete implementation counter = 0 def __init__(self, initial_contents=b""): data = self._get_initial_contents(initial_contents) self.data = data.read(data.get_size()) self.data = b"".join(self.data) counter = FakeMutableFile.counter FakeMutableFile.counter += 1 writekey = hashutil.ssk_writekey_hash(b"%d" % counter) fingerprint = hashutil.ssk_pubkey_fingerprint_hash(b"%d" % counter) self.uri = uri.WriteableSSKFileURI(writekey, fingerprint) def _get_initial_contents(self, contents): if isinstance(contents, bytes): return contents if contents is None: return b"" assert callable(contents), "%s should be callable, not %s" % \ (contents, type(contents)) return contents(self) def get_cap(self): return self.uri def get_uri(self): return self.uri.to_string() def get_write_uri(self): return self.uri.to_string() def download_best_version(self): return defer.succeed(self.data) def get_writekey(self): return b"writekey" def is_readonly(self): return False def is_mutable(self): return True def is_unknown(self): return False def is_allowed_in_immutable_directory(self): return False def raise_error(self): pass def modify(self, modifier): data = modifier(self.data, None, True) self.data = data return defer.succeed(None) class FakeNodeMaker(NodeMaker): def create_mutable_file(self, contents=b"", keysize=None, version=None, keypair=None): assert keypair is None, "FakeNodeMaker does not support externally supplied keypairs" return defer.succeed(FakeMutableFile(contents)) class FakeClient2(_Client): # type: ignore # tahoe-lafs/ticket/3573 def __init__(self): self.nodemaker = FakeNodeMaker(None, None, None, None, None, {"k":3,"n":10}, None, None) def create_node_from_uri(self, rwcap, rocap): return self.nodemaker.create_from_cap(rwcap, rocap) class Dirnode2(testutil.ReallyEqualMixin, testutil.ShouldFailMixin, unittest.TestCase): def setUp(self): client = FakeClient2() self.nodemaker = client.nodemaker def test_from_future(self): # Create a mutable directory that contains unknown URI types, and make sure # we tolerate them properly. d = self.nodemaker.create_new_mutable_directory() future_write_uri = u"x-tahoe-crazy://I_am_from_the_future_rw_\u263A".encode('utf-8') future_read_uri = u"x-tahoe-crazy-readonly://I_am_from_the_future_ro_\u263A".encode('utf-8') future_imm_uri = u"x-tahoe-crazy-immutable://I_am_from_the_future_imm_\u263A".encode('utf-8') future_node = UnknownNode(future_write_uri, future_read_uri) def _then(n): self._node = n return n.set_node(u"future", future_node) d.addCallback(_then) # We should be prohibited from adding an unknown URI to a directory # just in the rw_uri slot, since we don't know how to diminish the cap # to a readcap (for the ro_uri slot). d.addCallback(lambda ign: self.shouldFail(MustNotBeUnknownRWError, "copy unknown", "cannot attach unknown rw cap as child", self._node.set_uri, u"add", future_write_uri, None)) # However, we should be able to add both rw_uri and ro_uri as a pair of # unknown URIs. d.addCallback(lambda ign: self._node.set_uri(u"add-pair", future_write_uri, future_read_uri)) # and to add an URI prefixed with "ro." or "imm." when it is given in a # write slot (or URL parameter). d.addCallback(lambda ign: self._node.set_uri(u"add-ro", b"ro." + future_read_uri, None)) d.addCallback(lambda ign: self._node.set_uri(u"add-imm", b"imm." + future_imm_uri, None)) d.addCallback(lambda ign: self._node.list()) def _check(children): self.failUnlessReallyEqual(len(children), 4) (fn, metadata) = children[u"future"] self.failUnless(isinstance(fn, UnknownNode), fn) self.failUnlessReallyEqual(fn.get_uri(), future_write_uri) self.failUnlessReallyEqual(fn.get_write_uri(), future_write_uri) self.failUnlessReallyEqual(fn.get_readonly_uri(), b"ro." + future_read_uri) (fn2, metadata2) = children[u"add-pair"] self.failUnless(isinstance(fn2, UnknownNode), fn2) self.failUnlessReallyEqual(fn2.get_uri(), future_write_uri) self.failUnlessReallyEqual(fn2.get_write_uri(), future_write_uri) self.failUnlessReallyEqual(fn2.get_readonly_uri(), b"ro." + future_read_uri) (fn3, metadata3) = children[u"add-ro"] self.failUnless(isinstance(fn3, UnknownNode), fn3) self.failUnlessReallyEqual(fn3.get_uri(), b"ro." + future_read_uri) self.failUnlessReallyEqual(fn3.get_write_uri(), None) self.failUnlessReallyEqual(fn3.get_readonly_uri(), b"ro." + future_read_uri) (fn4, metadata4) = children[u"add-imm"] self.failUnless(isinstance(fn4, UnknownNode), fn4) self.failUnlessReallyEqual(fn4.get_uri(), b"imm." + future_imm_uri) self.failUnlessReallyEqual(fn4.get_write_uri(), None) self.failUnlessReallyEqual(fn4.get_readonly_uri(), b"imm." + future_imm_uri) # We should also be allowed to copy the "future" UnknownNode, because # it contains all the information that was in the original directory # (readcap and writecap), so we're preserving everything. return self._node.set_node(u"copy", fn) d.addCallback(_check) d.addCallback(lambda ign: self._node.list()) def _check2(children): self.failUnlessReallyEqual(len(children), 5) (fn, metadata) = children[u"copy"] self.failUnless(isinstance(fn, UnknownNode), fn) self.failUnlessReallyEqual(fn.get_uri(), future_write_uri) self.failUnlessReallyEqual(fn.get_write_uri(), future_write_uri) self.failUnlessReallyEqual(fn.get_readonly_uri(), b"ro." + future_read_uri) d.addCallback(_check2) return d def test_unknown_strip_prefix_for_ro(self): self.failUnlessReallyEqual(strip_prefix_for_ro(b"foo", False), b"foo") self.failUnlessReallyEqual(strip_prefix_for_ro(b"ro.foo", False), b"foo") self.failUnlessReallyEqual(strip_prefix_for_ro(b"imm.foo", False), b"imm.foo") self.failUnlessReallyEqual(strip_prefix_for_ro(b"foo", True), b"foo") self.failUnlessReallyEqual(strip_prefix_for_ro(b"ro.foo", True), b"foo") self.failUnlessReallyEqual(strip_prefix_for_ro(b"imm.foo", True), b"foo") def test_unknownnode(self): lit_uri = one_uri # This does not attempt to be exhaustive. no_no = [# Opaque node, but not an error. ( 0, UnknownNode(None, None)), ( 1, UnknownNode(None, None, deep_immutable=True)), ] unknown_rw = [# These are errors because we're only given a rw_uri, and we can't # diminish it. ( 2, UnknownNode(b"foo", None)), ( 3, UnknownNode(b"foo", None, deep_immutable=True)), ( 4, UnknownNode(b"ro.foo", None, deep_immutable=True)), ( 5, UnknownNode(b"ro." + mut_read_uri, None, deep_immutable=True)), ( 5.1, UnknownNode(b"ro." + mdmf_read_uri, None, deep_immutable=True)), ( 6, UnknownNode(b"URI:SSK-RO:foo", None, deep_immutable=True)), ( 7, UnknownNode(b"URI:SSK:foo", None)), ] must_be_ro = [# These are errors because a readonly constraint is not met. ( 8, UnknownNode(b"ro." + mut_write_uri, None)), ( 8.1, UnknownNode(b"ro." + mdmf_write_uri, None)), ( 9, UnknownNode(None, b"ro." + mut_write_uri)), ( 9.1, UnknownNode(None, b"ro." + mdmf_write_uri)), ] must_be_imm = [# These are errors because an immutable constraint is not met. (10, UnknownNode(None, b"ro.URI:SSK-RO:foo", deep_immutable=True)), (11, UnknownNode(None, b"imm.URI:SSK:foo")), (12, UnknownNode(None, b"imm.URI:SSK-RO:foo")), (13, UnknownNode(b"bar", b"ro.foo", deep_immutable=True)), (14, UnknownNode(b"bar", b"imm.foo", deep_immutable=True)), (15, UnknownNode(b"bar", b"imm." + lit_uri, deep_immutable=True)), (16, UnknownNode(b"imm." + mut_write_uri, None)), (16.1, UnknownNode(b"imm." + mdmf_write_uri, None)), (17, UnknownNode(b"imm." + mut_read_uri, None)), (17.1, UnknownNode(b"imm." + mdmf_read_uri, None)), (18, UnknownNode(b"bar", b"imm.foo")), ] bad_uri = [# These are errors because the URI is bad once we've stripped the prefix. (19, UnknownNode(b"ro.URI:SSK-RO:foo", None)), (20, UnknownNode(b"imm.URI:CHK:foo", None, deep_immutable=True)), (21, UnknownNode(None, b"URI:CHK:foo")), (22, UnknownNode(None, b"URI:CHK:foo", deep_immutable=True)), ] ro_prefixed = [# These are valid, and the readcap should end up with a ro. prefix. (23, UnknownNode(None, b"foo")), (24, UnknownNode(None, b"ro.foo")), (25, UnknownNode(None, b"ro." + lit_uri)), (26, UnknownNode(b"bar", b"foo")), (27, UnknownNode(b"bar", b"ro.foo")), (28, UnknownNode(b"bar", b"ro." + lit_uri)), (29, UnknownNode(b"ro.foo", None)), (30, UnknownNode(b"ro." + lit_uri, None)), ] imm_prefixed = [# These are valid, and the readcap should end up with an imm. prefix. (31, UnknownNode(None, b"foo", deep_immutable=True)), (32, UnknownNode(None, b"ro.foo", deep_immutable=True)), (33, UnknownNode(None, b"imm.foo")), (34, UnknownNode(None, b"imm.foo", deep_immutable=True)), (35, UnknownNode(b"imm." + lit_uri, None)), (36, UnknownNode(b"imm." + lit_uri, None, deep_immutable=True)), (37, UnknownNode(None, b"imm." + lit_uri)), (38, UnknownNode(None, b"imm." + lit_uri, deep_immutable=True)), ] error = unknown_rw + must_be_ro + must_be_imm + bad_uri ok = ro_prefixed + imm_prefixed for (i, n) in no_no + error + ok: self.failUnless(n.is_unknown(), i) for (i, n) in no_no + error: self.failUnless(n.get_uri() is None, i) self.failUnless(n.get_write_uri() is None, i) self.failUnless(n.get_readonly_uri() is None, i) for (i, n) in no_no + ok: n.raise_error() for (i, n) in unknown_rw: self.failUnlessRaises(MustNotBeUnknownRWError, lambda n=n: n.raise_error()) for (i, n) in must_be_ro: self.failUnlessRaises(MustBeReadonlyError, lambda n=n: n.raise_error()) for (i, n) in must_be_imm: self.failUnlessRaises(MustBeDeepImmutableError, lambda n=n: n.raise_error()) for (i, n) in bad_uri: self.failUnlessRaises(uri.BadURIError, lambda n=n: n.raise_error()) for (i, n) in ok: self.failIf(n.get_readonly_uri() is None, i) for (i, n) in ro_prefixed: self.failUnless(n.get_readonly_uri().startswith(b"ro."), i) for (i, n) in imm_prefixed: self.failUnless(n.get_readonly_uri().startswith(b"imm."), i) class DeepStats(testutil.ReallyEqualMixin, unittest.TestCase): def test_stats(self): ds = dirnode.DeepStats(None) ds.add("count-files") ds.add("size-immutable-files", 123) ds.histogram("size-files-histogram", 123) ds.max("largest-directory", 444) s = ds.get_results() self.failUnlessReallyEqual(s["count-files"], 1) self.failUnlessReallyEqual(s["size-immutable-files"], 123) self.failUnlessReallyEqual(s["largest-directory"], 444) self.failUnlessReallyEqual(s["count-literal-files"], 0) ds.add("count-files") ds.add("size-immutable-files", 321) ds.histogram("size-files-histogram", 321) ds.max("largest-directory", 2) s = ds.get_results() self.failUnlessReallyEqual(s["count-files"], 2) self.failUnlessReallyEqual(s["size-immutable-files"], 444) self.failUnlessReallyEqual(s["largest-directory"], 444) self.failUnlessReallyEqual(s["count-literal-files"], 0) self.failUnlessReallyEqual(s["size-files-histogram"], [ (101, 316, 1), (317, 1000, 1) ]) ds = dirnode.DeepStats(None) for i in range(1, 1100): ds.histogram("size-files-histogram", i) ds.histogram("size-files-histogram", 4*1000*1000*1000*1000) # 4TB s = ds.get_results() self.failUnlessReallyEqual(s["size-files-histogram"], [ (1, 3, 3), (4, 10, 7), (11, 31, 21), (32, 100, 69), (101, 316, 216), (317, 1000, 684), (1001, 3162, 99), (3162277660169, 10000000000000, 1), ]) class UCWEingMutableFileNode(MutableFileNode): please_ucwe_after_next_upload = False def _upload(self, new_contents, servermap): d = MutableFileNode._upload(self, new_contents, servermap) def _ucwe(res): if self.please_ucwe_after_next_upload: self.please_ucwe_after_next_upload = False raise UncoordinatedWriteError() return res d.addCallback(_ucwe) return d class UCWEingNodeMaker(NodeMaker): def _create_mutable(self, cap): n = UCWEingMutableFileNode(self.storage_broker, self.secret_holder, self.default_encoding_parameters, self.history) return n.init_from_cap(cap) class Deleter(GridTestMixin, testutil.ReallyEqualMixin, unittest.TestCase): def test_retry(self): # ticket #550, a dirnode.delete which experiences an # UncoordinatedWriteError will fail with an incorrect "you're # deleting something which isn't there" NoSuchChildError exception. # to trigger this, we start by creating a directory with a single # file in it. Then we create a special dirnode that uses a modified # MutableFileNode which will raise UncoordinatedWriteError once on # demand. We then call dirnode.delete, which ought to retry and # succeed. self.basedir = self.mktemp() self.set_up_grid(oneshare=True) c0 = self.g.clients[0] d = c0.create_dirnode() small = upload.Data(b"Small enough for a LIT", None) def _created_dir(dn): self.root = dn self.root_uri = dn.get_uri() return dn.add_file(u"file", small) d.addCallback(_created_dir) def _do_delete(ignored): nm = UCWEingNodeMaker(c0.storage_broker, c0._secret_holder, c0.get_history(), c0.getServiceNamed("uploader"), c0.terminator, c0.get_encoding_parameters(), c0.mutable_file_default, c0._key_generator) n = nm.create_from_cap(self.root_uri) assert n._node.please_ucwe_after_next_upload == False n._node.please_ucwe_after_next_upload = True # This should succeed, not raise an exception return n.delete(u"file") d.addCallback(_do_delete) return d class Adder(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixin): def test_overwrite(self): # note: This functionality could be tested without actually creating # several RSA keys. It would be faster without the GridTestMixin: use # dn.set_node(nodemaker.create_from_cap(make_chk_file_uri())) instead # of dn.add_file, and use a special NodeMaker that creates fake # mutable files. self.basedir = "dirnode/Adder/test_overwrite" self.set_up_grid(oneshare=True) c = self.g.clients[0] fileuri = make_chk_file_uri(1234) filenode = c.nodemaker.create_from_cap(fileuri) d = c.create_dirnode() def _create_directory_tree(root_node): # Build # root/file1 # root/file2 # root/dir1 d = root_node.add_file(u'file1', upload.Data(b"Important Things", None)) d.addCallback(lambda res: root_node.add_file(u'file2', upload.Data(b"Sekrit Codes", None))) d.addCallback(lambda res: root_node.create_subdirectory(u"dir1")) d.addCallback(lambda res: root_node) return d d.addCallback(_create_directory_tree) def _test_adder(root_node): d = root_node.set_node(u'file1', filenode) # We've overwritten file1. Let's try it with a directory d.addCallback(lambda res: root_node.create_subdirectory(u'dir2')) d.addCallback(lambda res: root_node.set_node(u'dir2', filenode)) # We try overwriting a file with a child while also specifying # overwrite=False. We should receive an ExistingChildError # when we do this. d.addCallback(lambda res: self.shouldFail(ExistingChildError, "set_node", "child 'file1' already exists", root_node.set_node, u"file1", filenode, overwrite=False)) # If we try with a directory, we should see the same thing d.addCallback(lambda res: self.shouldFail(ExistingChildError, "set_node", "child 'dir1' already exists", root_node.set_node, u'dir1', filenode, overwrite=False)) d.addCallback(lambda res: root_node.set_node(u'file1', filenode, overwrite=dirnode.ONLY_FILES)) d.addCallback(lambda res: self.shouldFail(ExistingChildError, "set_node", "child 'dir1' already exists", root_node.set_node, u'dir1', filenode, overwrite=dirnode.ONLY_FILES)) return d d.addCallback(_test_adder) return d class DeterministicDirnode(testutil.ReallyEqualMixin, testutil.ShouldFailMixin, unittest.TestCase): def setUp(self): # Copied from allmydata.test.mutable.test_filenode super(DeterministicDirnode, self).setUp() self._storage = FakeStorage() self._peers = list( make_peer(self._storage, n) for n in range(10) ) self.nodemaker = make_nodemaker_with_peers(self._peers) async def test_create_with_random_keypair(self): """ Create a dirnode using a random RSA keypair. The writekey and fingerprint of the enclosed mutable filecap should match those derived from the given keypair. """ privkey, pubkey = create_signing_keypair(2048) writekey, _, fingerprint = derive_mutable_keys((pubkey, privkey)) node = await self.nodemaker.create_new_mutable_directory( keypair=(pubkey, privkey) ) self.failUnless(isinstance(node, dirnode.DirectoryNode)) dircap = uri.from_string(node.get_uri()) self.failUnless(isinstance(dircap, uri.DirectoryURI)) filecap = dircap.get_filenode_cap() self.failUnless(isinstance(filecap, uri.WriteableSSKFileURI)) self.failUnlessReallyEqual(filecap.writekey, writekey) self.failUnlessReallyEqual(filecap.fingerprint, fingerprint) async def test_create_with_known_keypair(self): """ Create a dirnode using a known RSA keypair. The writekey and fingerprint of the enclosed mutable filecap should match those derived from the given keypair. Because these values are derived deterministically, given the same keypair, the resulting filecap should also always be the same. """ # Generated with `openssl genrsa -out openssl-rsa-2048-2.txt 2048` pempath = FilePath(__file__).sibling("data").child("openssl-rsa-2048-2.txt") privkey = load_pem_private_key(pempath.getContent(), password=None) pubkey = privkey.public_key() writekey, _, fingerprint = derive_mutable_keys((pubkey, privkey)) node = await self.nodemaker.create_new_mutable_directory( keypair=(pubkey, privkey) ) self.failUnless(isinstance(node, dirnode.DirectoryNode)) dircap = uri.from_string(node.get_uri()) self.failUnless(isinstance(dircap, uri.DirectoryURI)) filecap = dircap.get_filenode_cap() self.failUnless(isinstance(filecap, uri.WriteableSSKFileURI)) self.failUnlessReallyEqual(filecap.writekey, writekey) self.failUnlessReallyEqual(filecap.fingerprint, fingerprint) self.failUnlessReallyEqual( # Despite being named "to_string", this actually returns bytes.. dircap.to_string(), b'URI:DIR2:n4opqgewgcn4mddu4oiippaxru:ukpe4z6xdlujdpguoabergyih3bj7iaafukdqzwthy2ytdd5bs2a' ) tahoe_lafs-1.20.0/src/allmydata/test/test_download.py0000644000000000000000000041365413615410400017572 0ustar00""" Ported to Python 3. """ from future.utils import bchr # system-level upload+download roundtrip test, but using shares created from # a previous run. This asserts that the current code is capable of decoding # shares from a previous version. from typing import Any import os from twisted.trial import unittest from twisted.internet import defer, reactor from allmydata import uri from allmydata.storage.server import storage_index_to_dir from allmydata.util import base32, fileutil, spans, log, hashutil from allmydata.util.consumer import download_to_data, MemoryConsumer from allmydata.immutable import upload, layout from allmydata.test.no_network import GridTestMixin, NoNetworkServer from allmydata.test.common import ShouldFailMixin from allmydata.interfaces import NotEnoughSharesError, NoSharesError, \ DownloadStopped from allmydata.immutable.downloader.common import BadSegmentNumberError, \ BadCiphertextHashError, COMPLETE, OVERDUE, DEAD from allmydata.immutable.downloader.status import DownloadStatus from allmydata.immutable.downloader.fetcher import SegmentFetcher from allmydata.codec import CRSDecoder from foolscap.eventual import eventually, fireEventually, flushEventualQueue plaintext = b"This is a moderate-sized file.\n" * 10 mutable_plaintext = b"This is a moderate-sized mutable file.\n" * 10 # this chunk was generated by create_share(), written to disk, then pasted # into this file. These shares were created by 1.2.0-r3247, a version that's # probably fairly close to 1.3.0 . #--------- BEGIN stored_shares.py -------------- immutable_uri = b"URI:CHK:g4i6qkk7mlj4vkl5ncg6dwo73i:qcas2ebousfk3q5rkl2ncayeku52kpyse76v5yeel2t2eaa4f6ha:3:10:310" immutable_shares = { 0: { # client[0] 0: base32.a2b(b"aaaaaaiaaacyeaaaaaaqaaaaaeaaaadiaaaaa2aaaaaciaaaacgaaaaavqaaaagmaaaab3aaaaazmksehmgmlmmeqkbxbljh5qnfq36b7h5ukgqccmy3665khphcxihkce7jukeuegdxtn26p353ork6qihitbshwucpopzvdnpkflg6vbvko7ohcmxjywpdkvjmuzq6hysxfl74mamn224nrsyl7czmvtwtss6kkzljridkffeaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaactwxn7tsxj2nh6skwbghycguqfj7xrpegeporex5ebctynbgbl7y5y3kjcfritduzdk5rvwqs4lwzvb7fgvljgozbbtamhoriuzaeruaaqt2fbbxr5yv4vqeabkjqow6sd73dfqab3qban3htx6rn2y6mujdwaacbpvbyim4ewanv2vku44tunk7vdjkty2wkfm3jg67pqmm2newyib4aafazigyt6kxmirnlio5sdvbkvh43rwpctm6coigl64chn6z7w45rcaaccvmfgplu4kz5erphnx3xhzclypawi2j5zsvewmn4s2wbba4k2ktaab45y3kjcfritduzdk5rvwqs4lwzvb7fgvljgozbbtamhoriuzaeruaaaae4gg33emvrv63tbnvstumz2mnzhglddn5sgky27obqxeylnom5dqortgezc2mzngeycyy3spfyhi5dfpb2f62dbonudumzshkl7fjw5sp7x3yw4sdhze6qf7zgsjocpqtwl2gj5o6vufvixto3u2lddoj4xa5dumv4hix3sn5xxix3imfzwqortgi5fhno37hfotu2p5evmcmpqenjakt7pc6imi65cjp2icfhq2cmcx7rmnzswkzdfmrpxg2dbojsxgorrhizsy3tvnvpxgzlhnvsw45dthiytumjmonswo3lfnz2f643jpjstumz2gmyteldtnbqxezk7ojxw65c7nbqxg2b2gmzdubzmius26hljzu4j7gq5hdshwueqcfjc2bmveiyqbdxgyejetzovfrzws6tfhiztumzrgawhiyljnrpwg33emvrv64dbojqw24z2ha5dgmjsfuzs2mjqfr2g65dbnrpxg2dbojsxgorshiytalaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabj5uiln36za2n4oyier7k5e4sx6newmmflfqhj7xffy32p5iohlyf33bdx5dafkfwr7rxwxjcsg3ljflkaae537llwnnykgf36h52dojfplbwi"), 5: base32.a2b(b"aaaaaaiaaacyeaaaaaaqaaaaaeaaaadiaaaaa2aaaaaciaaaacgaaaaavqaaaagmaaaab3aaaaazmsdsvwbnfx2rnh7dusqniqomsdeetuafps6cawyb4pzxpkzal7w5ufaknxfnqw2qywv4c3a2zlumb2x2rx5osbxd3kqmebjndqf7zihbtagqczgwrka5rnywtsaeyijyh26okua2u7loep2nzo5etirjrxmp3yxpb4pheusaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaactwxn7tsxj2nh6skwbghycguqfj7xrpegeporex5ebctynbgbl7zs3zcg7igd2xoa4eu3lffqginpmoxrshqe6n3hzpocihgeu4vvymaadjz54nelgyi47767pkbsjwdjgsv7uyd5ntrztw6juavj7sd7wx7aaacx7wxlycyjniwxvby4ar546ncb4d3jnbhssnq4n4l4xeajurmn5diabgxwi6i5d2ysny3vavrm3a5lsuvng5mhbzk7axesyeddzw6uzmnluaakglpei35aypk5ydqstnmuwazbv5r26gi6atzxm7f5yja4ystswxbqaakbsqnrh4voyrc2wq53ehkcvkpzxdm6fgz4e4qmx5yeo35t7nz3ceaaaae4gg33emvrv63tbnvstumz2mnzhglddn5sgky27obqxeylnom5dqortgezc2mzngeycyy3spfyhi5dfpb2f62dbonudumzshkl7fjw5sp7x3yw4sdhze6qf7zgsjocpqtwl2gj5o6vufvixto3u2lddoj4xa5dumv4hix3sn5xxix3imfzwqortgi5fhno37hfotu2p5evmcmpqenjakt7pc6imi65cjp2icfhq2cmcx7rmnzswkzdfmrpxg2dbojsxgorrhizsy3tvnvpxgzlhnvsw45dthiytumjmonswo3lfnz2f643jpjstumz2gmyteldtnbqxezk7ojxw65c7nbqxg2b2gmzdubzmius26hljzu4j7gq5hdshwueqcfjc2bmveiyqbdxgyejetzovfrzws6tfhiztumzrgawhiyljnrpwg33emvrv64dbojqw24z2ha5dgmjsfuzs2mjqfr2g65dbnrpxg2dbojsxgorshiytalaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabj5uiln36za2n4oyier7k5e4sx6newmmflfqhj7xffy32p5iohlyf33bdx5dafkfwr7rxwxjcsg3ljflkaae537llwnnykgf36h52dojfplbwi"), }, 1: { # client[1] 2: base32.a2b(b"aaaaaaiaaacyeaaaaaaqaaaaaeaaaadiaaaaa2aaaaaciaaaacgaaaaavqaaaagmaaaab3aaaaazmj7um4zfgqo35m62ln6has6xz43klzjphj5eg46mb5x2jzgr6x6zb4voveo5uef53xbjbktr5rlupomy7x5b34amqeeg4r6obt6kpo2x4s3m3cwoo54oijyqfms3n3fethykhtglc47r4ci7ugqgz5d5fap3xzyhm4ehaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaactwxn7tsxj2nh6skwbghycguqfj7xrpegeporex5ebctynbgbl7zqkzg32wa74epeppqwneujs6tjptlm4qw75hoafobsoif3ok5odkaarmcwjw6vqh7bdzd34ftjfcmxu2l423hefx7j3qblqmtsbo3sxlq2qaewyffwgzojfi4uj2praj5azehnr4fhan5kdyewhtfncrqzoe42ijeaaikvquz5otrlhusf45w7o47ejpb4czdjhxgkuszrxslkyeedrljkmaabigkbwe7sv3celk2dxmq5ikvj7g4ntyu3hqtsbs7xar3pwp5xhmiqaa6k7uub7uqlamlqi2oduautemch242scu7cfor6kedxs6mm3uwjsmaaaae4gg33emvrv63tbnvstumz2mnzhglddn5sgky27obqxeylnom5dqortgezc2mzngeycyy3spfyhi5dfpb2f62dbonudumzshkl7fjw5sp7x3yw4sdhze6qf7zgsjocpqtwl2gj5o6vufvixto3u2lddoj4xa5dumv4hix3sn5xxix3imfzwqortgi5fhno37hfotu2p5evmcmpqenjakt7pc6imi65cjp2icfhq2cmcx7rmnzswkzdfmrpxg2dbojsxgorrhizsy3tvnvpxgzlhnvsw45dthiytumjmonswo3lfnz2f643jpjstumz2gmyteldtnbqxezk7ojxw65c7nbqxg2b2gmzdubzmius26hljzu4j7gq5hdshwueqcfjc2bmveiyqbdxgyejetzovfrzws6tfhiztumzrgawhiyljnrpwg33emvrv64dbojqw24z2ha5dgmjsfuzs2mjqfr2g65dbnrpxg2dbojsxgorshiytalaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabdp37hh2k4ys4d7qusb5e3dakjntythtcwcwfok7e52pu64zn4wrwbtlkzxzntwuwemi6e6mek5n4i7h3bw7nkat2zmqieftinxgzl2jfplbwi"), 7: base32.a2b(b"aaaaaaiaaacyeaaaaaaqaaaaaeaaaadiaaaaa2aaaaaciaaaacgaaaaavqaaaagmaaaab3aaaaaznhsh2frhzxbutelvddtbuf3tfilhcj2zi3cxjyzy7pg7ewamazcblv76mvey54fxmch64chqfi24jmondc4uzitby3wjeui4nfp7kv6ufo67exptkvwk7cnbouvjiapyqzrps4r6ise4jhlr7mtp2tlizb5hyaqm3fhsvrmqaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaactwxn7tsxj2nh6skwbghycguqfj7xrpegeporex5ebctynbgbl72g6h2oewtfcwgupjbjnh4k5k6d3k2fpi2q6nyidh3yo5ui6cslreaajms7f3pcsywhjbgrybzp64jzlsyjqbu7h4hvdlwf77ar6l63imdeqaaudfa3cpzk5rcfvnb3wioufku7togz4kntyjzazp3qi5x3h63tweiaagtt3y2iwnqrz77566udetmgsnfl7jqh23hdthn4tibkt7eh7np6aaakvpbzjdki64qaigkdj2bven3uigxbpurpwtrkjs4b6habv2ls7zqaac2g6h2oewtfcwgupjbjnh4k5k6d3k2fpi2q6nyidh3yo5ui6cslreaaaae4gg33emvrv63tbnvstumz2mnzhglddn5sgky27obqxeylnom5dqortgezc2mzngeycyy3spfyhi5dfpb2f62dbonudumzshkl7fjw5sp7x3yw4sdhze6qf7zgsjocpqtwl2gj5o6vufvixto3u2lddoj4xa5dumv4hix3sn5xxix3imfzwqortgi5fhno37hfotu2p5evmcmpqenjakt7pc6imi65cjp2icfhq2cmcx7rmnzswkzdfmrpxg2dbojsxgorrhizsy3tvnvpxgzlhnvsw45dthiytumjmonswo3lfnz2f643jpjstumz2gmyteldtnbqxezk7ojxw65c7nbqxg2b2gmzdubzmius26hljzu4j7gq5hdshwueqcfjc2bmveiyqbdxgyejetzovfrzws6tfhiztumzrgawhiyljnrpwg33emvrv64dbojqw24z2ha5dgmjsfuzs2mjqfr2g65dbnrpxg2dbojsxgorshiytalaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabdp37hh2k4ys4d7qusb5e3dakjntythtcwcwfok7e52pu64zn4wrwbtlkzxzntwuwemi6e6mek5n4i7h3bw7nkat2zmqieftinxgzl2jfplbwi"), }, 2: { # client[2] 1: base32.a2b(b"aaaaaaiaaacyeaaaaaaqaaaaaeaaaadiaaaaa2aaaaaciaaaacgaaaaavqaaaagmaaaab3aaaaazmkrwrt6figauxkgqyk3nggp5eeoeq5htt7tke4gfqj2u5roieslao4fldcwlq4btzk4brhkaerqiih6mhudotttrb6xzmvnqgg33fjcqeuw6teb3gml2pmhsezisa5svnzlvqnbaz6kzdmhisbwgu6ocexf2ge2rvc67gneqaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaactwxn7tsxj2nh6skwbghycguqfj7xrpegeporex5ebctynbgbl72piueg6hxcxswaqafjgb232ip7mmwaahoaebxm6o72fxldzsreoyaaif6uhbbtqsybwxkvkttsorvl6unfkpdkzivtne3356brtjus3bahqaee6riin4pofpfmbaaksmdvxuq76yzmaao4aidoz457ulowhtfci5qaafazigyt6kxmirnlio5sdvbkvh43rwpctm6coigl64chn6z7w45rcaaccvmfgplu4kz5erphnx3xhzclypawi2j5zsvewmn4s2wbba4k2ktaab45y3kjcfritduzdk5rvwqs4lwzvb7fgvljgozbbtamhoriuzaeruaaaae4gg33emvrv63tbnvstumz2mnzhglddn5sgky27obqxeylnom5dqortgezc2mzngeycyy3spfyhi5dfpb2f62dbonudumzshkl7fjw5sp7x3yw4sdhze6qf7zgsjocpqtwl2gj5o6vufvixto3u2lddoj4xa5dumv4hix3sn5xxix3imfzwqortgi5fhno37hfotu2p5evmcmpqenjakt7pc6imi65cjp2icfhq2cmcx7rmnzswkzdfmrpxg2dbojsxgorrhizsy3tvnvpxgzlhnvsw45dthiytumjmonswo3lfnz2f643jpjstumz2gmyteldtnbqxezk7ojxw65c7nbqxg2b2gmzdubzmius26hljzu4j7gq5hdshwueqcfjc2bmveiyqbdxgyejetzovfrzws6tfhiztumzrgawhiyljnrpwg33emvrv64dbojqw24z2ha5dgmjsfuzs2mjqfr2g65dbnrpxg2dbojsxgorshiytalaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabaugotrr7enihxy2b2nwodhxabihaf3ewc2hmcdjsqx5hi4h3rn7gnvpt3lzzo5qgbnlp4dybwr7dn7vu5hsiyo5pedlqcasb7csiuojfplbwi"), 6: base32.a2b(b"aaaaaaiaaacyeaaaaaaqaaaaaeaaaadiaaaaa2aaaaaciaaaacgaaaaavqaaaagmaaaab3aaaaazm34cgyp37ou5ohrofmk6bf5gcppxeb2njwmiwasn3uh4ykeocvq4vydsw36ksh63fcil3o257zupffrruiuqlwjvbdcdjiuqrojiromunzxxc34io7zlfafprzlvmztph4qsp67ozxmwvivqwtvu6ckr7pffsikgi2supviqaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaactwxn7tsxj2nh6skwbghycguqfj7xrpegeporex5ebctynbgbl7zlyoki2shxeacbsq2oqnjdo5cbvyl5el5u4ksmxapryanos4x6maaajms7f3pcsywhjbgrybzp64jzlsyjqbu7h4hvdlwf77ar6l63imdeqaaudfa3cpzk5rcfvnb3wioufku7togz4kntyjzazp3qi5x3h63tweiaagtt3y2iwnqrz77566udetmgsnfl7jqh23hdthn4tibkt7eh7np6aaakvpbzjdki64qaigkdj2bven3uigxbpurpwtrkjs4b6habv2ls7zqaac2g6h2oewtfcwgupjbjnh4k5k6d3k2fpi2q6nyidh3yo5ui6cslreaaaae4gg33emvrv63tbnvstumz2mnzhglddn5sgky27obqxeylnom5dqortgezc2mzngeycyy3spfyhi5dfpb2f62dbonudumzshkl7fjw5sp7x3yw4sdhze6qf7zgsjocpqtwl2gj5o6vufvixto3u2lddoj4xa5dumv4hix3sn5xxix3imfzwqortgi5fhno37hfotu2p5evmcmpqenjakt7pc6imi65cjp2icfhq2cmcx7rmnzswkzdfmrpxg2dbojsxgorrhizsy3tvnvpxgzlhnvsw45dthiytumjmonswo3lfnz2f643jpjstumz2gmyteldtnbqxezk7ojxw65c7nbqxg2b2gmzdubzmius26hljzu4j7gq5hdshwueqcfjc2bmveiyqbdxgyejetzovfrzws6tfhiztumzrgawhiyljnrpwg33emvrv64dbojqw24z2ha5dgmjsfuzs2mjqfr2g65dbnrpxg2dbojsxgorshiytalaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabaugotrr7enihxy2b2nwodhxabihaf3ewc2hmcdjsqx5hi4h3rn7gnvpt3lzzo5qgbnlp4dybwr7dn7vu5hsiyo5pedlqcasb7csiuojfplbwi"), }, 3: { # client[3] 4: base32.a2b(b"aaaaaaiaaacyeaaaaaaqaaaaaeaaaadiaaaaa2aaaaaciaaaacgaaaaavqaaaagmaaaab3aaaaaznjqn7ehmj6f4p3fjyliuvwnfothumsfhs7ienw4uln6joaxopqlmcy5daa4njrkgj7nqm6tpnmz2dci2b356pljv4zjj5ayzfihi4g26qdei7kjtegjuv4d3k3t4orpufnft3edbondkpj5etjczwhyulukzuy5socyivdfqaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaactwxn7tsxj2nh6skwbghycguqfj7xrpegeporex5ebctynbgbl7zpmr4r2hvre3rxkblczwb2xfjk2n2yodsv6bojfqightn5jsy2xiaatl3epeor5mjg4n2qkywnqovzkkwtowdq4vpqlsjmcbr43pkmwgv2aacx7wxlycyjniwxvby4ar546ncb4d3jnbhssnq4n4l4xeajurmn5diaagtt3y2iwnqrz77566udetmgsnfl7jqh23hdthn4tibkt7eh7np6aaakglpei35aypk5ydqstnmuwazbv5r26gi6atzxm7f5yja4ystswxbqaakbsqnrh4voyrc2wq53ehkcvkpzxdm6fgz4e4qmx5yeo35t7nz3ceaaaae4gg33emvrv63tbnvstumz2mnzhglddn5sgky27obqxeylnom5dqortgezc2mzngeycyy3spfyhi5dfpb2f62dbonudumzshkl7fjw5sp7x3yw4sdhze6qf7zgsjocpqtwl2gj5o6vufvixto3u2lddoj4xa5dumv4hix3sn5xxix3imfzwqortgi5fhno37hfotu2p5evmcmpqenjakt7pc6imi65cjp2icfhq2cmcx7rmnzswkzdfmrpxg2dbojsxgorrhizsy3tvnvpxgzlhnvsw45dthiytumjmonswo3lfnz2f643jpjstumz2gmyteldtnbqxezk7ojxw65c7nbqxg2b2gmzdubzmius26hljzu4j7gq5hdshwueqcfjc2bmveiyqbdxgyejetzovfrzws6tfhiztumzrgawhiyljnrpwg33emvrv64dbojqw24z2ha5dgmjsfuzs2mjqfr2g65dbnrpxg2dbojsxgorshiytalaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaacifoqlus3puiqkekp6g6fdecjcx2bak27angodamzoxugovlhtcj5xbly7teqwmf73fqk3clyfvs6hdauq5qnqahlxlmp2vrmnneedgjfplbwi"), 9: base32.a2b(b"aaaaaaiaaacyeaaaaaaqaaaaaeaaaadiaaaaa2aaaaaciaaaacgaaaaavqaaaagmaaaab3aaaaazn2tz3qt62bgsdnvksvdegsylb2kbltouheryflpho7hugme7svk7so2v7hmcgc43tcyugybuqzgifvkllikfiiezvml7ilolb7ivwvrv4d4t2gbywa44ibqwogmjtffta4b2sfwqebfg7pptergeqm5wo3tndtf7p3vftabqaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaactwxn7tsxj2nh6skwbghycguqfj7xrpegeporex5ebctynbgbl7y3m26swfhsb66ze4cmyhohaksid7fyljgkhag32ibc7vx2yj4j5saayg3gxuvrj4qpxwjhatgb3rycusa7zoc2jsrybw6saix5n6wcpcpmqaamxjsc6bwv4w4or2oylltmsbfbobvmenj3sa6lnq6iy4tugsnv72eaaybvqu3gmlomi3dnf2tum3hoseavpesyia2i2wqgwbmbtrgmotu6oaadirzs2idl54toffh4a2hehvg2e3zoed4dr6pcdpuqpnz2knte7gqqac6kfatp33ianoqvg6mdd4vaxa27lo6vpugbcvanhskaqq2kewn6kwaaaae4gg33emvrv63tbnvstumz2mnzhglddn5sgky27obqxeylnom5dqortgezc2mzngeycyy3spfyhi5dfpb2f62dbonudumzshkl7fjw5sp7x3yw4sdhze6qf7zgsjocpqtwl2gj5o6vufvixto3u2lddoj4xa5dumv4hix3sn5xxix3imfzwqortgi5fhno37hfotu2p5evmcmpqenjakt7pc6imi65cjp2icfhq2cmcx7rmnzswkzdfmrpxg2dbojsxgorrhizsy3tvnvpxgzlhnvsw45dthiytumjmonswo3lfnz2f643jpjstumz2gmyteldtnbqxezk7ojxw65c7nbqxg2b2gmzdubzmius26hljzu4j7gq5hdshwueqcfjc2bmveiyqbdxgyejetzovfrzws6tfhiztumzrgawhiyljnrpwg33emvrv64dbojqw24z2ha5dgmjsfuzs2mjqfr2g65dbnrpxg2dbojsxgorshiytalaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaacifoqlus3puiqkekp6g6fdecjcx2bak27angodamzoxugovlhtcj5xbly7teqwmf73fqk3clyfvs6hdauq5qnqahlxlmp2vrmnneedgjfplbwi"), }, 4: { # client[4] 3: base32.a2b(b"aaaaaaiaaacyeaaaaaaqaaaaaeaaaadiaaaaa2aaaaaciaaaacgaaaaavqaaaagmaaaab3aaaaazmbduh5nwvcvpgrihhnjxacz2jvzu3prrdqewo3vmxkhu5yd3fa3eil56fyh5l7ojimghwbf2o6ri7cmppr34qflr5o4w6s5fekxhdt3qvlgsw5yp5wrmjjffhph5czd5kzoo7yyg5x3wgxxzdvwtuom2c5olao62ep77b7wqaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaactwxn7tsxj2nh6skwbghycguqfj7xrpegeporex5ebctynbgbl73mcs3dmxesuoke5hyqe6qmsdwy6ctqg6vb4cldzswriymxconeesaarmcwjw6vqh7bdzd34ftjfcmxu2l423hefx7j3qblqmtsbo3sxlq2qaaudfa3cpzk5rcfvnb3wioufku7togz4kntyjzazp3qi5x3h63tweiaaikvquz5otrlhusf45w7o47ejpb4czdjhxgkuszrxslkyeedrljkmaajnqklmns4skrzitu7cat2bsio3dykoa32uhqjmpgk2fdbs4jzuqsiaa6k7uub7uqlamlqi2oduautemch242scu7cfor6kedxs6mm3uwjsmaaaae4gg33emvrv63tbnvstumz2mnzhglddn5sgky27obqxeylnom5dqortgezc2mzngeycyy3spfyhi5dfpb2f62dbonudumzshkl7fjw5sp7x3yw4sdhze6qf7zgsjocpqtwl2gj5o6vufvixto3u2lddoj4xa5dumv4hix3sn5xxix3imfzwqortgi5fhno37hfotu2p5evmcmpqenjakt7pc6imi65cjp2icfhq2cmcx7rmnzswkzdfmrpxg2dbojsxgorrhizsy3tvnvpxgzlhnvsw45dthiytumjmonswo3lfnz2f643jpjstumz2gmyteldtnbqxezk7ojxw65c7nbqxg2b2gmzdubzmius26hljzu4j7gq5hdshwueqcfjc2bmveiyqbdxgyejetzovfrzws6tfhiztumzrgawhiyljnrpwg33emvrv64dbojqw24z2ha5dgmjsfuzs2mjqfr2g65dbnrpxg2dbojsxgorshiytalaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaadusyxmwhtnfdeewwgxd25fwixycfdcy46ifqv4dhga23fko6dbl4ywo2d27n3zh3wd6zumhupvmtgspqrh6t7wbsghruzqd3imbo2tojfplbwi"), 8: base32.a2b(b"aaaaaaiaaacyeaaaaaaqaaaaaeaaaadiaaaaa2aaaaaciaaaacgaaaaavqaaaagmaaaab3aaaaaznjzqcxwyhgwlcpzvfb2berhoyw47h72gkzofwgksryqd4r6xlyougvyg4p3wkz7u37zllskeswuuh4w2rylbxecomnmqfv7n5ex3thjzq7ykr7gjkvq3kmrlhmxu3wnsr4ipsdn546btavjzc6yppoii2mxgnnk4qbxqrltaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaactwxn7tsxj2nh6skwbghycguqfj7xrpegeporex5ebctynbgbl72kfatp33ianoqvg6mdd4vaxa27lo6vpugbcvanhskaqq2kewn6kwaayg3gxuvrj4qpxwjhatgb3rycusa7zoc2jsrybw6saix5n6wcpcpmqaamxjsc6bwv4w4or2oylltmsbfbobvmenj3sa6lnq6iy4tugsnv72eaaybvqu3gmlomi3dnf2tum3hoseavpesyia2i2wqgwbmbtrgmotu6oaadirzs2idl54toffh4a2hehvg2e3zoed4dr6pcdpuqpnz2knte7gqqac6kfatp33ianoqvg6mdd4vaxa27lo6vpugbcvanhskaqq2kewn6kwaaaae4gg33emvrv63tbnvstumz2mnzhglddn5sgky27obqxeylnom5dqortgezc2mzngeycyy3spfyhi5dfpb2f62dbonudumzshkl7fjw5sp7x3yw4sdhze6qf7zgsjocpqtwl2gj5o6vufvixto3u2lddoj4xa5dumv4hix3sn5xxix3imfzwqortgi5fhno37hfotu2p5evmcmpqenjakt7pc6imi65cjp2icfhq2cmcx7rmnzswkzdfmrpxg2dbojsxgorrhizsy3tvnvpxgzlhnvsw45dthiytumjmonswo3lfnz2f643jpjstumz2gmyteldtnbqxezk7ojxw65c7nbqxg2b2gmzdubzmius26hljzu4j7gq5hdshwueqcfjc2bmveiyqbdxgyejetzovfrzws6tfhiztumzrgawhiyljnrpwg33emvrv64dbojqw24z2ha5dgmjsfuzs2mjqfr2g65dbnrpxg2dbojsxgorshiytalaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaadusyxmwhtnfdeewwgxd25fwixycfdcy46ifqv4dhga23fko6dbl4ywo2d27n3zh3wd6zumhupvmtgspqrh6t7wbsghruzqd3imbo2tojfplbwi"), }, } mutable_uri = b"URI:SSK:vfvcbdfbszyrsaxchgevhmmlii:euw4iw7bbnkrrwpzuburbhppuxhc3gwxv26f6imekhz7zyw2ojnq" mutable_shares = { 0: { # client[0] 2: base32.a2b(b"krqwq33febwxk5dbmjwgkiddn5xhiyljnzsxeidwgefhkckeaohgreckgxome2uhcps464pzydv5wsywald7wthurw2dp6qxtkeb5vtswoeshuyno24v5oble7xb4j6ij7wwqriaaaaaaaaabb5aaaaaaaaaacsoaaaaaakjl2ynu3wrm2mwdv3syv4r34b5mklbtjuv5i5bzcuiwgfnl4wtpombwn7l7ugdvv2xut7hwbttcjfsacuhc7ipf43gvrgrt5vj7hau52uenoywreckgxome2uhcps464pzydv5wsywaldqaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaarybcjqw7lbehfrbu7pwx26vvv3mbjprnqup7q7wxmdsoyiqaqzgbtv6cplrrgpzbnptsoqz7imnauamfaaaaaaaaaaamgaaaaaaaaaaaymaaaaghqaaacr4aaaayxaaaagnyaaaaaaaaaao4qaaaaaaaaacd2gcbacibqbudaskugjcdpodibaeaqkaadqiaq2abqqiaqqaucaeaqbvkghg3s5ivqiq4uiajdzagl7iqhz2uxwj6aqk47oscyqxyljmxkapldxhxigum2ejxflnps3yrxwwehop2zairilrouid5s6wxgnoqg2vq7rx77icxfnx7mq65niuknra3tezhpndwo7pdxtvtci645p4umyhdh5gp3kbdpypphvouaql662r6ufigp4wwm6emrsavlontn4wttg6lv7bcmq6ojw5utpvz3agoenovrkndncjzxog7sp2w7l6jkmzgfxd2asxos5khkjbxbuhgkd6j5yqlzsmk3kq67engtlgyd7hxk7nedw73bq2bs4353wtler23poucntgve22acfxdbyj2f6q2saj2agu2ptfk364d3zayddffxkcah4olnkczjonselwwrvdcu3vch3licaeirogosgsui3y4ovcyzleurbiunp7fsfk2fgmiek7b3jogvrhj4snvkpqjfh2w2lqnkvqvuoojmgrn6sll354mpomajbtlsv6fuguzgsxsm3l2ehxdxfdv6kcoyogaa5raetooahunf4ltvfdb4d7mwgpj4pg7tmcw55ku5vgx7tjqkdkuyq2uvhavkbsbujsrcfzve5ekuhftxm7nmtomibrblbwyxcr5mcy6qqwx66lrhejmgewrig74nzpriualhw4g22qaw423qeezqn6irea3vlgc3foz4egf6irincownoq7utv643vmtoueebigwrl6nehbos3ipsdx44tmucpvyui2jzgfulb5mrrcjuycmuzfigu6tf25lbysqn7n7smhqniddgctjt573rtd6o63wiaactacm7dw7giohzcgwe4okbijnmzlacetmfjjbasrd6whexjmwkaiaabdm4cpjolak4m47telnokjaxwodj7ont7n7vffnmhkzp3lyshkh3qaaohafr65kctby6wa34bjpnviviiwmwq5mft3yho4tmslaarpcg6biaajlxuwwafut5a6dsr7lq5fkmiik7icppic5ffjjmqaud4y746q2rzd42k42oitzukawdl2fupkoqcztfu7qf2flp55xh4lm6rzpdbb7gtnx4kaffym36rboalf2tbmatt46ra6igvjnvwmig6ivf6gqrhcietf373xrbm3bpeecz7luv7kv76i7pwa5xtubga37vnlu6hspejpsenxiptd23ipri7u5w7lz67mdjfrpahtp5j46obg4ct7c5lelfskzqw5hq7x7kd7pbcgq3gjbv53amzxjelwgxpf6ni74zb6aixhjjllivkthks6df52kvobtcnscytmjrrfbekvwmhtbcke2cgcyaj2cra7xmnd4bw2xe2qki5kycopo45ekfyxwzsmxuyxvjzqklnqjwm3j3gwcm75ftnrsvbj33w6eyr4dbz2tewum7vfsyfbb3ojw5ujtn22jxpr4nkmkqup6z7ukpp4cdxwyge2psc5suaxaltmp23gbawoo3qjeo44hgwgtkge2oowox3fpxwxkckaqgrxilfeyxmjp4cwf2rhpkbwtveqkukchv3u5rfkykwfj5zhleu3obsif6ldfclaef32wjpe5d6ddh2grdx2yt6tuhw53t6zuuumlw6t6i3e2ev7nh36e4shwbn3ew2bbahn6imgb5sismfttf5cdlr5kw6wvjoaqiaiz2onxecfx735dvon7epthnklq67tnqj4fvcwechbvmnkpiwd5fd36dirpshc7i7zj3rcr5y3kzps5nfnfnik27gdgemfn27iblcjh5bpkio6sr375bmxbh6fshbo7cvjzsdsirsafnbjzgl6ycqczwbacj5sxwgrzl6qbdhfbzev5fzutznzbasejqdjm3qxsdcuqvqau3kih2anr2itgmr44wmwtsk6bd42m2j436ptslaugmbbvtbzsukeqytkse7niovlilyfssn3ipzkqtclmetqrxxn7h56xn2ju7pjnuamd6ijfawn2jpjsrmnawaozeniosvhgovftoj24dl77ytdkxdl7ogappnlgkqsjyy43urtnj6tqf2psfptkbzyx4nu3rzgsqqi5ybx3pu6cvt6de67xutdz566wrkp2ymy5n7tqchmw77ss532noqcbfxv6quum6jmeed3exasdapvid6bilwzm5dcnutkcxktmsdryqopw5ntws3tnbd7um27clmxkgl2uinwzvv4tmo4axbj5zmgfd6sy2fiw6efdwjcyj4awdx3peuxcyh3ccee63w2nqaltierdrevl3f3hnjsrdrl4aosd23szhhaimhg2mjuocottcdsraoczh3waoyxx2skunaphe6w5gutu2z7cag3cx4pgsspfmspphuunzx357x6l36hj3tdys727rhawfwc4wc4ytgts4nrxlxl3xxzffunlhjhzj5guxljwxfrxwjfsg5c67pg3js7gvfqmpson6rjgiuwbsklranwhauq74lbesavftdzf7y3x5zwbi4uu6q2vqimbkttm7k6ycttsgknej2ylkwdxgtut7wecpepnb527pblj3vuzldjt3whsmstax536plulalxtxmvj6vvg4phofyaidhxhhkl4dfb6oabp3fi55jt77pw3jl55pwbsimjpdanuenll2xxctr6swaimuaqk4wvqa6rbgow3onr74v4alkuukc2tlmorvsjpgaazpun6pbfyorazzarhc2r7fjt55pmosowrqcpdwl2q34hcco2f3icmpktchxdvtpmitufnplqaifbtlktkpo7b22244n6dkmxtgcnxtixsit57uhh7rc5rqezjz7pfd7ojhrui5bcdzb7bo2nbo6o24lpwbg4bmqgbqpbwclq6kglgxefryxlkqydillki3545vcrelfw6reszml6emuyjscx377on2qpq26j5jrh5xmbwmpcyq6sewanlbmwwk2vqhq5zunbcyd6h5z3ms3bgfn7lflvev5vwmjnv5nzbgrmpamy453zuvy6xc6jp7tqgpmrlxup7suptejbacm6rdurdhcaori6i25wylgaikfov4dfgeswxdeerogy2m5tbzsdlr7pfhchd4wnokuipfwjzejxiruj5cljm66hvn47j3eseys3nsi6xdh566jgap5s5e7ytdkkhh5lsuv47oose4luozz427dzk577jccjg3n7b4myd565edmsywol3hgh2i54lcya6saaaaaaa"), 7: base32.a2b(b"krqwq33febwxk5dbmjwgkiddn5xhiyljnzsxeidwgefhkckeaohgreckgxome2uhcps464pzydv5wsywald7wthurw2dp6qxtkeb5vtswoeshuyno24v5oble7xb4j6ij7wwqriaaaaaaaaabb5aaaaaaaaaacsoaaaaaakjl2ynu3wrm2mwdv3syv4r34b5mklbtjuv5i5bzcuiwgfnl4wtpombwn7l7ugdvv2xut7hwbttcjfsacuhc7ipf43gvrgrt5vj7hau52uenoywreckgxome2uhcps464pzydv5wsywaldqaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaarybcjqw7lbehfrbu7pwx26vvv3mbjprnqup7q7wxmdsoyiqaqzgbtv6cplrrgpzbnptsoqz7imnauamfaaaaaaaaaaamgaaaaaaaaaaaymaaaaghqaaacr4aaaayxaaaagnyaaaaaaaaaao4qaaaaaaaaacd2gcbacibqbudaskugjcdpodibaeaqkaadqiaq2abqqiaqqaucaeaqbvkghg3s5ivqiq4uiajdzagl7iqhz2uxwj6aqk47oscyqxyljmxkapldxhxigum2ejxflnps3yrxwwehop2zairilrouid5s6wxgnoqg2vq7rx77icxfnx7mq65niuknra3tezhpndwo7pdxtvtci645p4umyhdh5gp3kbdpypphvouaql662r6ufigp4wwm6emrsavlontn4wttg6lv7bcmq6ojw5utpvz3agoenovrkndncjzxog7sp2w7l6jkmzgfxd2asxos5khkjbxbuhgkd6j5yqlzsmk3kq67engtlgyd7hxk7nedw73bq2bs4353wtler23poucntgve22acfxdbyj2f6q2saj2agu2ptfk364d3zayddffxkcah4olnkczjonselwwrvdcu3vch3licaeirogosgsui3y4ovcyzleurbiunp7fsfk2fgmiek7b3jogvrhj4snvkpqjfh2w2lqnkvqvuoojmgrn6sll354mpomajbtlsv6fuguzgsxsm3l2ehxdxfdv6kcoyogaa5raetooahunf4ltvfdb4d7mwgpj4pg7tmcw55ku5vgx7tjqkdkuyq2uvhavkbsbujsrcfzve5ekuhftxm7nmtomibrblbwyxcr5mcy6qqwx66lrhejmgewrig74nzpriualhw4g22qaw423qeezqn6irea3vlgc3foz4egf6irincownoq7utv643vmtoueebigwrl6nehbos3ipsdx44tmucpvyui2jzgfulb5mrrcjuycmuzfigu6tf25lbysqn7n7smhqniddgctjt573rtd6o63wiaactacm7dw7giohzcgwe4okbijnmzlacetmfjjbasrd6whexjmwkaiaaazuum3xriq54h5v6afcrrl3kkbd46oizwulc5fbslmblxfc3ldyyqaaszc7rkciv6rhwt5gbgnl5u54ihnqrfyuh7s54r2444mrhcwgizieaak4ap2xhvuz664fw3kayv7z5vawqs7skj6frzp3ihmk7js3tr7cwpnbfwoefuyn6bqkj5kssx3rvvffqgd3mhb7pbtegk6qfvsopvzmsiftabaykw3qitiqcv2wwfvdud5lkbjigatrf4ndeejsij5ab3eyaqqgxfiyxtv674qwltgynickeznu5el6uhs2k75hq2rsxhco2kmxw4didbdjodmjf2nrne63du76fd6laa7ng7zq4i7bx2xtohfrgwlxls6h7ibfsbybdz46sow3tn4vao3ulciz75kfbb62jrz3omvnihr2jwthks6df52kvobtcnscytmjrrfbekvwmhtbcke2cgcyaj2cra7xmnd4bw2xe2qki5kycopo45ekfyxwzsmxuyxvjzqklnqjwm3j3gwcm75ftnrsvbj33w6eyr4dbz2tewum7vfsyfbb3ojw5ujtn22jxpr4nkmkqup6z7ukpp4cdxwyge2psc5suaxaltmp23gbawoo3qjeo44hgwgtkge2oowox3fpxwxkckaqgrxilfeyxmjp4cwf2rhpkbwtveqkukchv3u5rfkykwfj5zhleu3obsif6ldfclaef32wjpe5d6ddh2grdx2yt6tuhw53t6zuuumlw6t6i3e2ev7nh36e4shwbn3ew2bbahn6imgb5sismfttf5cdlr5kw6wvjoaqiaiz2onxecfx735dvon7epthnklq67tnqj4fvcwechbvmnkpiwd5fd36dirpshc7i7zj3rcr5y3kzps5nfnfnik27gdgemfn27iblcjh5bpkio6sr375bmxbh6fshbo7cvjzsdsirsafnbjzgl6ycqczwbacj5sxwgrzl6qbdhfbzev5fzutznzbasejqdjm3qxsdcuqvqau3kih2anr2itgmr44wmwtsk6bd42m2j436ptslaugmbbvtbzsukeqytkse7niovlilyfssn3ipzkqtclmetqrxxn7h56xn2ju7pjnuamd6ijfawn2jpjsrmnawaozeniosvhgovftoj24dl77ytdkxdl7ogappnlgkqsjyy43urtnj6tqf2psfptkbzyx4nu3rzgsqqi5ybx3pu6cvt6de67xutdz566wrkp2ymy5n7tqchmw77ss532noqcbfxv6quum6jmeed3exasdapvid6bilwzm5dcnutkcxktmsdryqopw5ntws3tnbd7um27clmxkgl2uinwzvv4tmo4axbj5zmgfd6sy2fiw6efdwjcyj4awdx3peuxcyh3ccee63w2nqaltierdrevl3f3hnjsrdrl4aosd23szhhaimhg2mjuocottcdsraoczh3waoyxx2skunaphe6w5gutu2z7cag3cx4pgsspfmspphuunzx357x6l36hj3tdys727rhawfwc4wc4ytgts4nrxlxl3xxzffunlhjhzj5guxljwxfrxwjfsg5c67pg3js7gvfqmpson6rjgiuwbsklranwhauq74lbesavftdzf7y3x5zwbi4uu6q2vqimbkttm7k6ycttsgknej2ylkwdxgtut7wecpepnb527pblj3vuzldjt3whsmstax536plulalxtxmvj6vvg4phofyaidhxhhkl4dfb6oabp3fi55jt77pw3jl55pwbsimjpdanuenll2xxctr6swaimuaqk4wvqa6rbgow3onr74v4alkuukc2tlmorvsjpgaazpun6pbfyorazzarhc2r7fjt55pmosowrqcpdwl2q34hcco2f3icmpktchxdvtpmitufnplqaifbtlktkpo7b22244n6dkmxtgcnxtixsit57uhh7rc5rqezjz7pfd7ojhrui5bcdzb7bo2nbo6o24lpwbg4bmqgbqpbwclq6kglgxefryxlkqydillki3545vcrelfw6reszml6emuyjscx377on2qpq26j5jrh5xmbwmpcyq6sewanlbmwwk2vqhq5zunbcyd6h5z3ms3bgfn7lflvev5vwmjnv5nzbgrmpamy453zuvy6xc6jp7tqgpmrlxup7suptejbacm6rdurdhcaori6i25wylgaikfov4dfgeswxdeerogy2m5tbzsdlr7pfhchd4wnokuipfwjzejxiruj5cljm66hvn47j3eseys3nsi6xdh566jgap5s5e7ytdkkhh5lsuv47oose4luozz427dzk577jccjg3n7b4myd565edmsywol3hgh2i54lcya6saaaaaaa"), }, 1: { # client[1] 3: base32.a2b(b"krqwq33febwxk5dbmjwgkiddn5xhiyljnzsxeidwgefhkckeaohm5tnwcmfsfmep4exoamss5lqyleq2ehahoduym5vgk37pmxx2xekzrtlzfvhapzb2fe3quv6tv3atr3g6ykqaaaaaaaaabb5aaaaaaaaaacsoaaaaaakjl2ynvx5mk74p2on26ax4rlp5jcoto5jkz3ndmgbmurhez4a5rbuyr55acbwlgbndlebsdyvlt4ttog767zqpoq3n2a4pra5va2o5zvbttlh45tnwcmfsfmep4exoamss5lqyleq2ehaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaarybcjqw7lbehfrbu7pwx26vvv3mbjprnqup7q7wxmdsoyiqaqzgbtv6cplrrgpzbnptsoqz7imnauamfaaaaaaaaaaamgaaaaaaaaaaaymaaaaghqaaacr4aaaayxaaaagnyaaaaaaaaaao4qaaaaaaaaacd2gcbacibqbudaskugjcdpodibaeaqkaadqiaq2abqqiaqqaucaeaqbvkghg3s5ivqiq4uiajdzagl7iqhz2uxwj6aqk47oscyqxyljmxkapldxhxigum2ejxflnps3yrxwwehop2zairilrouid5s6wxgnoqg2vq7rx77icxfnx7mq65niuknra3tezhpndwo7pdxtvtci645p4umyhdh5gp3kbdpypphvouaql662r6ufigp4wwm6emrsavlontn4wttg6lv7bcmq6ojw5utpvz3agoenovrkndncjzxog7sp2w7l6jkmzgfxd2asxos5khkjbxbuhgkd6j5yqlzsmk3kq67engtlgyd7hxk7nedw73bq2bs4353wtler23poucntgve22acfxdbyj2f6q2saj2agu2ptfk364d3zayddffxkcah4olnkczjonselwwrvdcu3vch3licaeirogosgsui3y4ovcyzleurbiunp7fsfk2fgmiek7b3jogvrhj4snvkpqjfh2w2lqnkvqvuoojmgrn6sll354mpomajbtlsv6fuguzgsxsm3l2ehxdxfdv6kcoyogaa5raetooahunf4ltvfdb4d7mwgpj4pg7tmcw55ku5vgx7tjqkdkuyq2uvhavkbsbujsrcfzve5ekuhftxm7nmtomibrblbwyxcr5mcy6qqwx66lrhejmgewrig74nzpriualhw4g22qaw423qeezqn6irea3vlgc3foz4egf6irincownoq7utv643vmtoueebigwrl6nehbos3ipsdx44tmucpvyui2jzgfulb5mrrcjuycmuzfigu6tf25lbysqn7n7smhqniddgctjt573rtd6o63wiaactacm7dw7giohzcgwe4okbijnmzlacetmfjjbasrd6whexjmwkaiaabdm4cpjolak4m47telnokjaxwodj7ont7n7vffnmhkzp3lyshkh3qaaohafr65kctby6wa34bjpnviviiwmwq5mft3yho4tmslaarpcg6biaaixzuvzu4rhtiubmgxuli6u5aftglj7alukw733opywz5ds6gcd6nf32llac2j6qpbzi7vyosvgeefpubhxubossuuwiakb6mp6pini4rja473klkmi52lzfwofja7bb6pixgcxkwdaerc2irfpnrqwh5o2remu3iv3dtib75ku63cb6xzj4h53nmsguanjpganh3ow5yzovjcsezsj2cunyvlpva63zx5sudxe2zrtcu5zoty2tjzzlhodaz6rxe62ehbiktd4pmaodaz6ajsrohw7tdga2dpaftzbhadsolylgwgtbymenwthks6df52kvobtcnscytmjrrfbekvwmhtbcke2cgcyaj2cra7xmnd4bw2xe2qki5kycopo45ekfyxwzsmxuyxvjzqklnqjwm3j3gwcm75ftnrsvbj33w6eyr4dbz2tewum7vfsyfbb3ojw5ujtn22jxpr4nkmkqup6z7ukpp4cdxwyge2psc5suaxaltmp23gbawoo3qjeo44hgwgtkge2oowox3fpxwxkckaqgrxilfeyxmjp4cwf2rhpkbwtveqkukchv3u5rfkykwfj5zhleu3obsif6ldfclaef32wjpe5d6ddh2grdx2yt6tuhw53t6zuuumlw6t6i3e2ev7nh36e4shwbn3ew2bbahn6imgb5sismfttf5cdlr5kw6wvjoaqiaiz2onxecfx735dvon7epthnklq67tnqj4fvcwechbvmnkpiwd5fd36dirpshc7i7zj3rcr5y3kzps5nfnfnik27gdgemfn27iblcjh5bpkio6sr375bmxbh6fshbo7cvjzsdsirsafnbjzgl6ycqczwbacj5sxwgrzl6qbdhfbzev5fzutznzbasejqdjm3qxsdcuqvqau3kih2anr2itgmr44wmwtsk6bd42m2j436ptslaugmbbvtbzsukeqytkse7niovlilyfssn3ipzkqtclmetqrxxn7h56xn2ju7pjnuamd6ijfawn2jpjsrmnawaozeniosvhgovftoj24dl77ytdkxdl7ogappnlgkqsjyy43urtnj6tqf2psfptkbzyx4nu3rzgsqqi5ybx3pu6cvt6de67xutdz566wrkp2ymy5n7tqchmw77ss532noqcbfxv6quum6jmeed3exasdapvid6bilwzm5dcnutkcxktmsdryqopw5ntws3tnbd7um27clmxkgl2uinwzvv4tmo4axbj5zmgfd6sy2fiw6efdwjcyj4awdx3peuxcyh3ccee63w2nqaltierdrevl3f3hnjsrdrl4aosd23szhhaimhg2mjuocottcdsraoczh3waoyxx2skunaphe6w5gutu2z7cag3cx4pgsspfmspphuunzx357x6l36hj3tdys727rhawfwc4wc4ytgts4nrxlxl3xxzffunlhjhzj5guxljwxfrxwjfsg5c67pg3js7gvfqmpson6rjgiuwbsklranwhauq74lbesavftdzf7y3x5zwbi4uu6q2vqimbkttm7k6ycttsgknej2ylkwdxgtut7wecpepnb527pblj3vuzldjt3whsmstax536plulalxtxmvj6vvg4phofyaidhxhhkl4dfb6oabp3fi55jt77pw3jl55pwbsimjpdanuenll2xxctr6swaimuaqk4wvqa6rbgow3onr74v4alkuukc2tlmorvsjpgaazpun6pbfyorazzarhc2r7fjt55pmosowrqcpdwl2q34hcco2f3icmpktchxdvtpmitufnplqaifbtlktkpo7b22244n6dkmxtgcnxtixsit57uhh7rc5rqezjz7pfd7ojhrui5bcdzb7bo2nbo6o24lpwbg4bmqgbqpbwclq6kglgxefryxlkqydillki3545vcrelfw6reszml6emuyjscx377on2qpq26j5jrh5xmbwmpcyq6sewanlbmwwk2vqhq5zunbcyd6h5z3ms3bgfn7lflvev5vwmjnv5nzbgrmpamy453zuvy6xc6jp7tqgpmrlxup7suptejbacm6rdurdhcaori6i25wylgaikfov4dfgeswxdeerogy2m5tbzsdlr7pfhchd4wnokuipfwjzejxiruj5cljm66hvn47j3eseys3nsi6xdh566jgap5s5e7ytdkkhh5lsuv47oose4luozz427dzk577jccjg3n7b4myd565edmsywol3hgh2i54lcya6saaaaaaa"), 8: base32.a2b(b"krqwq33febwxk5dbmjwgkiddn5xhiyljnzsxeidwgefhkckeaohm5tnwcmfsfmep4exoamss5lqyleq2ehahoduym5vgk37pmxx2xekzrtlzfvhapzb2fe3quv6tv3atr3g6ykqaaaaaaaaabb5aaaaaaaaaacsoaaaaaakjl2ynvx5mk74p2on26ax4rlp5jcoto5jkz3ndmgbmurhez4a5rbuyr55acbwlgbndlebsdyvlt4ttog767zqpoq3n2a4pra5va2o5zvbttlh45tnwcmfsfmep4exoamss5lqyleq2ehaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaarybcjqw7lbehfrbu7pwx26vvv3mbjprnqup7q7wxmdsoyiqaqzgbtv6cplrrgpzbnptsoqz7imnauamfaaaaaaaaaaamgaaaaaaaaaaaymaaaaghqaaacr4aaaayxaaaagnyaaaaaaaaaao4qaaaaaaaaacd2gcbacibqbudaskugjcdpodibaeaqkaadqiaq2abqqiaqqaucaeaqbvkghg3s5ivqiq4uiajdzagl7iqhz2uxwj6aqk47oscyqxyljmxkapldxhxigum2ejxflnps3yrxwwehop2zairilrouid5s6wxgnoqg2vq7rx77icxfnx7mq65niuknra3tezhpndwo7pdxtvtci645p4umyhdh5gp3kbdpypphvouaql662r6ufigp4wwm6emrsavlontn4wttg6lv7bcmq6ojw5utpvz3agoenovrkndncjzxog7sp2w7l6jkmzgfxd2asxos5khkjbxbuhgkd6j5yqlzsmk3kq67engtlgyd7hxk7nedw73bq2bs4353wtler23poucntgve22acfxdbyj2f6q2saj2agu2ptfk364d3zayddffxkcah4olnkczjonselwwrvdcu3vch3licaeirogosgsui3y4ovcyzleurbiunp7fsfk2fgmiek7b3jogvrhj4snvkpqjfh2w2lqnkvqvuoojmgrn6sll354mpomajbtlsv6fuguzgsxsm3l2ehxdxfdv6kcoyogaa5raetooahunf4ltvfdb4d7mwgpj4pg7tmcw55ku5vgx7tjqkdkuyq2uvhavkbsbujsrcfzve5ekuhftxm7nmtomibrblbwyxcr5mcy6qqwx66lrhejmgewrig74nzpriualhw4g22qaw423qeezqn6irea3vlgc3foz4egf6irincownoq7utv643vmtoueebigwrl6nehbos3ipsdx44tmucpvyui2jzgfulb5mrrcjuycmuzfigu6tf25lbysqn7n7smhqniddgctjt573rtd6o63wiaabduzspufh6gomrp7sycuerhgl7ah3x3mpc6watmzlp6y23afmlxcaabui4znebv66jxcst6andsd2tncn4xcb6by7hrbx2ihw45fgzsptiiaaybvqu3gmlomi3dnf2tum3hoseavpesyia2i2wqgwbmbtrgmotu6oaamprqe6ozjrouoeltzhezhntop7wb6bbnnr3ak6x3ihvsjlz77gffkdet4sc63bxykwaikdyxwoehbrggxdu6qcwquzsnaltcgn52nyy4ypqbthfg4txtnznap6dktqtgtmtu7icooojppbwyi5c22uehbveptbuhbi7q3d4wuvsrptnd6wrhxwtlkxe4kurp4fey52p2v6urgephzxmaqfhm7pq3wxbi2uj5ourg65xnhbo4lrp7nzrdmk3svespmmitccvtwom6wtqefpp73j67zybiu4wrjjqt7vhip4ipuaezkmdy7feothks6df52kvobtcnscytmjrrfbekvwmhtbcke2cgcyaj2cra7xmnd4bw2xe2qki5kycopo45ekfyxwzsmxuyxvjzqklnqjwm3j3gwcm75ftnrsvbj33w6eyr4dbz2tewum7vfsyfbb3ojw5ujtn22jxpr4nkmkqup6z7ukpp4cdxwyge2psc5suaxaltmp23gbawoo3qjeo44hgwgtkge2oowox3fpxwxkckaqgrxilfeyxmjp4cwf2rhpkbwtveqkukchv3u5rfkykwfj5zhleu3obsif6ldfclaef32wjpe5d6ddh2grdx2yt6tuhw53t6zuuumlw6t6i3e2ev7nh36e4shwbn3ew2bbahn6imgb5sismfttf5cdlr5kw6wvjoaqiaiz2onxecfx735dvon7epthnklq67tnqj4fvcwechbvmnkpiwd5fd36dirpshc7i7zj3rcr5y3kzps5nfnfnik27gdgemfn27iblcjh5bpkio6sr375bmxbh6fshbo7cvjzsdsirsafnbjzgl6ycqczwbacj5sxwgrzl6qbdhfbzev5fzutznzbasejqdjm3qxsdcuqvqau3kih2anr2itgmr44wmwtsk6bd42m2j436ptslaugmbbvtbzsukeqytkse7niovlilyfssn3ipzkqtclmetqrxxn7h56xn2ju7pjnuamd6ijfawn2jpjsrmnawaozeniosvhgovftoj24dl77ytdkxdl7ogappnlgkqsjyy43urtnj6tqf2psfptkbzyx4nu3rzgsqqi5ybx3pu6cvt6de67xutdz566wrkp2ymy5n7tqchmw77ss532noqcbfxv6quum6jmeed3exasdapvid6bilwzm5dcnutkcxktmsdryqopw5ntws3tnbd7um27clmxkgl2uinwzvv4tmo4axbj5zmgfd6sy2fiw6efdwjcyj4awdx3peuxcyh3ccee63w2nqaltierdrevl3f3hnjsrdrl4aosd23szhhaimhg2mjuocottcdsraoczh3waoyxx2skunaphe6w5gutu2z7cag3cx4pgsspfmspphuunzx357x6l36hj3tdys727rhawfwc4wc4ytgts4nrxlxl3xxzffunlhjhzj5guxljwxfrxwjfsg5c67pg3js7gvfqmpson6rjgiuwbsklranwhauq74lbesavftdzf7y3x5zwbi4uu6q2vqimbkttm7k6ycttsgknej2ylkwdxgtut7wecpepnb527pblj3vuzldjt3whsmstax536plulalxtxmvj6vvg4phofyaidhxhhkl4dfb6oabp3fi55jt77pw3jl55pwbsimjpdanuenll2xxctr6swaimuaqk4wvqa6rbgow3onr74v4alkuukc2tlmorvsjpgaazpun6pbfyorazzarhc2r7fjt55pmosowrqcpdwl2q34hcco2f3icmpktchxdvtpmitufnplqaifbtlktkpo7b22244n6dkmxtgcnxtixsit57uhh7rc5rqezjz7pfd7ojhrui5bcdzb7bo2nbo6o24lpwbg4bmqgbqpbwclq6kglgxefryxlkqydillki3545vcrelfw6reszml6emuyjscx377on2qpq26j5jrh5xmbwmpcyq6sewanlbmwwk2vqhq5zunbcyd6h5z3ms3bgfn7lflvev5vwmjnv5nzbgrmpamy453zuvy6xc6jp7tqgpmrlxup7suptejbacm6rdurdhcaori6i25wylgaikfov4dfgeswxdeerogy2m5tbzsdlr7pfhchd4wnokuipfwjzejxiruj5cljm66hvn47j3eseys3nsi6xdh566jgap5s5e7ytdkkhh5lsuv47oose4luozz427dzk577jccjg3n7b4myd565edmsywol3hgh2i54lcya6saaaaaaa"), }, 2: { # client[2] 4: base32.a2b(b"krqwq33febwxk5dbmjwgkiddn5xhiyljnzsxeidwgefhkckeaohelfyqrvy7pzjh3tqx73xsfkpi3so4qjghlywdkwuioyjvbtgekiulaes4myuxydi2sudi2fkg2q5nkjrt3zaaaaaaaaaabb5aaaaaaaaaacsoaaaaaakjl2ynujj2kh34jfiungka3deihevw7p3mzhj7uobc3qnbfxqp3xfazrsicvtz3enqkn4xxlu5xvxjj2rtlv6j3w3kmpzn2jbrnuoafq2aacoulfyqrvy7pzjh3tqx73xsfkpi3so4qjgaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaarybcjqw7lbehfrbu7pwx26vvv3mbjprnqup7q7wxmdsoyiqaqzgbtv6cplrrgpzbnptsoqz7imnauamfaaaaaaaaaaamgaaaaaaaaaaaymaaaaghqaaacr4aaaayxaaaagnyaaaaaaaaaao4qaaaaaaaaacd2gcbacibqbudaskugjcdpodibaeaqkaadqiaq2abqqiaqqaucaeaqbvkghg3s5ivqiq4uiajdzagl7iqhz2uxwj6aqk47oscyqxyljmxkapldxhxigum2ejxflnps3yrxwwehop2zairilrouid5s6wxgnoqg2vq7rx77icxfnx7mq65niuknra3tezhpndwo7pdxtvtci645p4umyhdh5gp3kbdpypphvouaql662r6ufigp4wwm6emrsavlontn4wttg6lv7bcmq6ojw5utpvz3agoenovrkndncjzxog7sp2w7l6jkmzgfxd2asxos5khkjbxbuhgkd6j5yqlzsmk3kq67engtlgyd7hxk7nedw73bq2bs4353wtler23poucntgve22acfxdbyj2f6q2saj2agu2ptfk364d3zayddffxkcah4olnkczjonselwwrvdcu3vch3licaeirogosgsui3y4ovcyzleurbiunp7fsfk2fgmiek7b3jogvrhj4snvkpqjfh2w2lqnkvqvuoojmgrn6sll354mpomajbtlsv6fuguzgsxsm3l2ehxdxfdv6kcoyogaa5raetooahunf4ltvfdb4d7mwgpj4pg7tmcw55ku5vgx7tjqkdkuyq2uvhavkbsbujsrcfzve5ekuhftxm7nmtomibrblbwyxcr5mcy6qqwx66lrhejmgewrig74nzpriualhw4g22qaw423qeezqn6irea3vlgc3foz4egf6irincownoq7utv643vmtoueebigwrl6nehbos3ipsdx44tmucpvyui2jzgfulb5mrrcjuycmuzfigu6tf25lbysqn7n7smhqniddgctjt573rtd6o63wiaactacm7dw7giohzcgwe4okbijnmzlacetmfjjbasrd6whexjmwkaiaaazuum3xriq54h5v6afcrrl3kkbd46oizwulc5fbslmblxfc3ldyyqaavmjy6g336aewy42vw5rusytyi7vzs6y22c5jhxyt5w6gthcbjp4zaakhlvz26psskxjisz27qlpzw4annhegunhnvlyr35ijotdizegjf4lgx3o4dt3d6d4bjqexz2eu3dprjlmuvlkbfcpmkq2ceydywqqcqdhmdl2nm5ku6z6gnss2bsbn7ycab2ggktr3bjlzaeo5pb4meolrckviwiddsikieo4wyatlxtybmzkoh3fb2vxc34xb47ty2cyi55xjan6m4bbie7muzrzmjmzviwlotk6icove7ydpag6dlrjwu4svgs3y2ln5r463dmflqs3p4aa7dldhjb5kfpxq63tgquunkucsfvlkaiiisgthks6df52kvobtcnscytmjrrfbekvwmhtbcke2cgcyaj2cra7xmnd4bw2xe2qki5kycopo45ekfyxwzsmxuyxvjzqklnqjwm3j3gwcm75ftnrsvbj33w6eyr4dbz2tewum7vfsyfbb3ojw5ujtn22jxpr4nkmkqup6z7ukpp4cdxwyge2psc5suaxaltmp23gbawoo3qjeo44hgwgtkge2oowox3fpxwxkckaqgrxilfeyxmjp4cwf2rhpkbwtveqkukchv3u5rfkykwfj5zhleu3obsif6ldfclaef32wjpe5d6ddh2grdx2yt6tuhw53t6zuuumlw6t6i3e2ev7nh36e4shwbn3ew2bbahn6imgb5sismfttf5cdlr5kw6wvjoaqiaiz2onxecfx735dvon7epthnklq67tnqj4fvcwechbvmnkpiwd5fd36dirpshc7i7zj3rcr5y3kzps5nfnfnik27gdgemfn27iblcjh5bpkio6sr375bmxbh6fshbo7cvjzsdsirsafnbjzgl6ycqczwbacj5sxwgrzl6qbdhfbzev5fzutznzbasejqdjm3qxsdcuqvqau3kih2anr2itgmr44wmwtsk6bd42m2j436ptslaugmbbvtbzsukeqytkse7niovlilyfssn3ipzkqtclmetqrxxn7h56xn2ju7pjnuamd6ijfawn2jpjsrmnawaozeniosvhgovftoj24dl77ytdkxdl7ogappnlgkqsjyy43urtnj6tqf2psfptkbzyx4nu3rzgsqqi5ybx3pu6cvt6de67xutdz566wrkp2ymy5n7tqchmw77ss532noqcbfxv6quum6jmeed3exasdapvid6bilwzm5dcnutkcxktmsdryqopw5ntws3tnbd7um27clmxkgl2uinwzvv4tmo4axbj5zmgfd6sy2fiw6efdwjcyj4awdx3peuxcyh3ccee63w2nqaltierdrevl3f3hnjsrdrl4aosd23szhhaimhg2mjuocottcdsraoczh3waoyxx2skunaphe6w5gutu2z7cag3cx4pgsspfmspphuunzx357x6l36hj3tdys727rhawfwc4wc4ytgts4nrxlxl3xxzffunlhjhzj5guxljwxfrxwjfsg5c67pg3js7gvfqmpson6rjgiuwbsklranwhauq74lbesavftdzf7y3x5zwbi4uu6q2vqimbkttm7k6ycttsgknej2ylkwdxgtut7wecpepnb527pblj3vuzldjt3whsmstax536plulalxtxmvj6vvg4phofyaidhxhhkl4dfb6oabp3fi55jt77pw3jl55pwbsimjpdanuenll2xxctr6swaimuaqk4wvqa6rbgow3onr74v4alkuukc2tlmorvsjpgaazpun6pbfyorazzarhc2r7fjt55pmosowrqcpdwl2q34hcco2f3icmpktchxdvtpmitufnplqaifbtlktkpo7b22244n6dkmxtgcnxtixsit57uhh7rc5rqezjz7pfd7ojhrui5bcdzb7bo2nbo6o24lpwbg4bmqgbqpbwclq6kglgxefryxlkqydillki3545vcrelfw6reszml6emuyjscx377on2qpq26j5jrh5xmbwmpcyq6sewanlbmwwk2vqhq5zunbcyd6h5z3ms3bgfn7lflvev5vwmjnv5nzbgrmpamy453zuvy6xc6jp7tqgpmrlxup7suptejbacm6rdurdhcaori6i25wylgaikfov4dfgeswxdeerogy2m5tbzsdlr7pfhchd4wnokuipfwjzejxiruj5cljm66hvn47j3eseys3nsi6xdh566jgap5s5e7ytdkkhh5lsuv47oose4luozz427dzk577jccjg3n7b4myd565edmsywol3hgh2i54lcya6saaaaaaa"), 9: base32.a2b(b"krqwq33febwxk5dbmjwgkiddn5xhiyljnzsxeidwgefhkckeaohelfyqrvy7pzjh3tqx73xsfkpi3so4qjghlywdkwuioyjvbtgekiulaes4myuxydi2sudi2fkg2q5nkjrt3zaaaaaaaaaabb5aaaaaaaaaacsoaaaaaakjl2ynujj2kh34jfiungka3deihevw7p3mzhj7uobc3qnbfxqp3xfazrsicvtz3enqkn4xxlu5xvxjj2rtlv6j3w3kmpzn2jbrnuoafq2aacoulfyqrvy7pzjh3tqx73xsfkpi3so4qjgaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaarybcjqw7lbehfrbu7pwx26vvv3mbjprnqup7q7wxmdsoyiqaqzgbtv6cplrrgpzbnptsoqz7imnauamfaaaaaaaaaaamgaaaaaaaaaaaymaaaaghqaaacr4aaaayxaaaagnyaaaaaaaaaao4qaaaaaaaaacd2gcbacibqbudaskugjcdpodibaeaqkaadqiaq2abqqiaqqaucaeaqbvkghg3s5ivqiq4uiajdzagl7iqhz2uxwj6aqk47oscyqxyljmxkapldxhxigum2ejxflnps3yrxwwehop2zairilrouid5s6wxgnoqg2vq7rx77icxfnx7mq65niuknra3tezhpndwo7pdxtvtci645p4umyhdh5gp3kbdpypphvouaql662r6ufigp4wwm6emrsavlontn4wttg6lv7bcmq6ojw5utpvz3agoenovrkndncjzxog7sp2w7l6jkmzgfxd2asxos5khkjbxbuhgkd6j5yqlzsmk3kq67engtlgyd7hxk7nedw73bq2bs4353wtler23poucntgve22acfxdbyj2f6q2saj2agu2ptfk364d3zayddffxkcah4olnkczjonselwwrvdcu3vch3licaeirogosgsui3y4ovcyzleurbiunp7fsfk2fgmiek7b3jogvrhj4snvkpqjfh2w2lqnkvqvuoojmgrn6sll354mpomajbtlsv6fuguzgsxsm3l2ehxdxfdv6kcoyogaa5raetooahunf4ltvfdb4d7mwgpj4pg7tmcw55ku5vgx7tjqkdkuyq2uvhavkbsbujsrcfzve5ekuhftxm7nmtomibrblbwyxcr5mcy6qqwx66lrhejmgewrig74nzpriualhw4g22qaw423qeezqn6irea3vlgc3foz4egf6irincownoq7utv643vmtoueebigwrl6nehbos3ipsdx44tmucpvyui2jzgfulb5mrrcjuycmuzfigu6tf25lbysqn7n7smhqniddgctjt573rtd6o63wiaabduzspufh6gomrp7sycuerhgl7ah3x3mpc6watmzlp6y23afmlxcaabui4znebv66jxcst6andsd2tncn4xcb6by7hrbx2ihw45fgzsptiiaaybvqu3gmlomi3dnf2tum3hoseavpesyia2i2wqgwbmbtrgmotu6oaalugjhzef5wdpqvmaquhrpm4iodcmnohj5afnbjte2axgem33u3rr7yycphmuyxkhcfz4tsmtwzxh73a7aqwwy5qfpl5ud2zev477tcsviylwmlv6fgp54rk4iwputjkcgegczq6uynbvebu67jf6f2foocphznw7jrdsvphppguypjwmkkhugm6yjnrjka2ycvxsyh5xohn3fvbbhl4tvhedbaix3zlwxeayabnldp3oqnkjger7yrxh44wuv3adb76jh3nl6h45t4ixj77himst5plmpdtexyoozpxzjmedge5leynxhziothks6df52kvobtcnscytmjrrfbekvwmhtbcke2cgcyaj2cra7xmnd4bw2xe2qki5kycopo45ekfyxwzsmxuyxvjzqklnqjwm3j3gwcm75ftnrsvbj33w6eyr4dbz2tewum7vfsyfbb3ojw5ujtn22jxpr4nkmkqup6z7ukpp4cdxwyge2psc5suaxaltmp23gbawoo3qjeo44hgwgtkge2oowox3fpxwxkckaqgrxilfeyxmjp4cwf2rhpkbwtveqkukchv3u5rfkykwfj5zhleu3obsif6ldfclaef32wjpe5d6ddh2grdx2yt6tuhw53t6zuuumlw6t6i3e2ev7nh36e4shwbn3ew2bbahn6imgb5sismfttf5cdlr5kw6wvjoaqiaiz2onxecfx735dvon7epthnklq67tnqj4fvcwechbvmnkpiwd5fd36dirpshc7i7zj3rcr5y3kzps5nfnfnik27gdgemfn27iblcjh5bpkio6sr375bmxbh6fshbo7cvjzsdsirsafnbjzgl6ycqczwbacj5sxwgrzl6qbdhfbzev5fzutznzbasejqdjm3qxsdcuqvqau3kih2anr2itgmr44wmwtsk6bd42m2j436ptslaugmbbvtbzsukeqytkse7niovlilyfssn3ipzkqtclmetqrxxn7h56xn2ju7pjnuamd6ijfawn2jpjsrmnawaozeniosvhgovftoj24dl77ytdkxdl7ogappnlgkqsjyy43urtnj6tqf2psfptkbzyx4nu3rzgsqqi5ybx3pu6cvt6de67xutdz566wrkp2ymy5n7tqchmw77ss532noqcbfxv6quum6jmeed3exasdapvid6bilwzm5dcnutkcxktmsdryqopw5ntws3tnbd7um27clmxkgl2uinwzvv4tmo4axbj5zmgfd6sy2fiw6efdwjcyj4awdx3peuxcyh3ccee63w2nqaltierdrevl3f3hnjsrdrl4aosd23szhhaimhg2mjuocottcdsraoczh3waoyxx2skunaphe6w5gutu2z7cag3cx4pgsspfmspphuunzx357x6l36hj3tdys727rhawfwc4wc4ytgts4nrxlxl3xxzffunlhjhzj5guxljwxfrxwjfsg5c67pg3js7gvfqmpson6rjgiuwbsklranwhauq74lbesavftdzf7y3x5zwbi4uu6q2vqimbkttm7k6ycttsgknej2ylkwdxgtut7wecpepnb527pblj3vuzldjt3whsmstax536plulalxtxmvj6vvg4phofyaidhxhhkl4dfb6oabp3fi55jt77pw3jl55pwbsimjpdanuenll2xxctr6swaimuaqk4wvqa6rbgow3onr74v4alkuukc2tlmorvsjpgaazpun6pbfyorazzarhc2r7fjt55pmosowrqcpdwl2q34hcco2f3icmpktchxdvtpmitufnplqaifbtlktkpo7b22244n6dkmxtgcnxtixsit57uhh7rc5rqezjz7pfd7ojhrui5bcdzb7bo2nbo6o24lpwbg4bmqgbqpbwclq6kglgxefryxlkqydillki3545vcrelfw6reszml6emuyjscx377on2qpq26j5jrh5xmbwmpcyq6sewanlbmwwk2vqhq5zunbcyd6h5z3ms3bgfn7lflvev5vwmjnv5nzbgrmpamy453zuvy6xc6jp7tqgpmrlxup7suptejbacm6rdurdhcaori6i25wylgaikfov4dfgeswxdeerogy2m5tbzsdlr7pfhchd4wnokuipfwjzejxiruj5cljm66hvn47j3eseys3nsi6xdh566jgap5s5e7ytdkkhh5lsuv47oose4luozz427dzk577jccjg3n7b4myd565edmsywol3hgh2i54lcya6saaaaaaa"), }, 3: { # client[3] 1: base32.a2b(b"krqwq33febwxk5dbmjwgkiddn5xhiyljnzsxeidwgefhkckeaohar2c5jzdcrekne6myzpxl2z65d6ufdjuuyhabg2j57ecmy23jyflcp7djzupj4tfr345bkg7cmwxmpmn3h4iaaaaaaaaabb5aaaaaaaaaacsoaaaaaakjl2ynu3sjzjwrfjn4cwfspkueq47j6ej2uodmjsjexyray7dn6ut4nnuftdhhgxo3t3a5eoipsdy5evdihyeigny3c4adtpveplcwt76m7naar2c5jzdcrekne6myzpxl2z65d6ufdjuqaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaarybcjqw7lbehfrbu7pwx26vvv3mbjprnqup7q7wxmdsoyiqaqzgbtv6cplrrgpzbnptsoqz7imnauamfaaaaaaaaaaamgaaaaaaaaaaaymaaaaghqaaacr4aaaayxaaaagnyaaaaaaaaaao4qaaaaaaaaacd2gcbacibqbudaskugjcdpodibaeaqkaadqiaq2abqqiaqqaucaeaqbvkghg3s5ivqiq4uiajdzagl7iqhz2uxwj6aqk47oscyqxyljmxkapldxhxigum2ejxflnps3yrxwwehop2zairilrouid5s6wxgnoqg2vq7rx77icxfnx7mq65niuknra3tezhpndwo7pdxtvtci645p4umyhdh5gp3kbdpypphvouaql662r6ufigp4wwm6emrsavlontn4wttg6lv7bcmq6ojw5utpvz3agoenovrkndncjzxog7sp2w7l6jkmzgfxd2asxos5khkjbxbuhgkd6j5yqlzsmk3kq67engtlgyd7hxk7nedw73bq2bs4353wtler23poucntgve22acfxdbyj2f6q2saj2agu2ptfk364d3zayddffxkcah4olnkczjonselwwrvdcu3vch3licaeirogosgsui3y4ovcyzleurbiunp7fsfk2fgmiek7b3jogvrhj4snvkpqjfh2w2lqnkvqvuoojmgrn6sll354mpomajbtlsv6fuguzgsxsm3l2ehxdxfdv6kcoyogaa5raetooahunf4ltvfdb4d7mwgpj4pg7tmcw55ku5vgx7tjqkdkuyq2uvhavkbsbujsrcfzve5ekuhftxm7nmtomibrblbwyxcr5mcy6qqwx66lrhejmgewrig74nzpriualhw4g22qaw423qeezqn6irea3vlgc3foz4egf6irincownoq7utv643vmtoueebigwrl6nehbos3ipsdx44tmucpvyui2jzgfulb5mrrcjuycmuzfigu6tf25lbysqn7n7smhqniddgctjt573rtd6o63wiaactacm7dw7giohzcgwe4okbijnmzlacetmfjjbasrd6whexjmwkaiaabdm4cpjolak4m47telnokjaxwodj7ont7n7vffnmhkzp3lyshkh3qaarzybn64ru5rss7tmi4ttv26q66ebdvvrtyd3s5t7dmqku3uoefroaahxhmt46bsa3cpmjfwjyw3zijhhbqh3j2dbc42jaqj6wvmxoz7pecirykndmb6dylde5utzkpucky5pk3x4u6dphkq2ycmfuyvpg5lsudusosyofwfnokbe7qmld2xwaxah3qkywarndsfvp3rybq2y7q42silj5cnlbdxnabv2zhhix3h5o5kz2ttqzm34clnbo527obrxvqlxz3sofwcmz2kqs4c3ypj6o4ny4hkh6qu7ljs7xiygzmoojhnaxc6wjbnvnsu2socztfaegy6ft22tgtdudtok4z755vgj3etwmje73af2f2thks6df52kvobtcnscytmjrrfbekvwmhtbcke2cgcyaj2cra7xmnd4bw2xe2qki5kycopo45ekfyxwzsmxuyxvjzqklnqjwm3j3gwcm75ftnrsvbj33w6eyr4dbz2tewum7vfsyfbb3ojw5ujtn22jxpr4nkmkqup6z7ukpp4cdxwyge2psc5suaxaltmp23gbawoo3qjeo44hgwgtkge2oowox3fpxwxkckaqgrxilfeyxmjp4cwf2rhpkbwtveqkukchv3u5rfkykwfj5zhleu3obsif6ldfclaef32wjpe5d6ddh2grdx2yt6tuhw53t6zuuumlw6t6i3e2ev7nh36e4shwbn3ew2bbahn6imgb5sismfttf5cdlr5kw6wvjoaqiaiz2onxecfx735dvon7epthnklq67tnqj4fvcwechbvmnkpiwd5fd36dirpshc7i7zj3rcr5y3kzps5nfnfnik27gdgemfn27iblcjh5bpkio6sr375bmxbh6fshbo7cvjzsdsirsafnbjzgl6ycqczwbacj5sxwgrzl6qbdhfbzev5fzutznzbasejqdjm3qxsdcuqvqau3kih2anr2itgmr44wmwtsk6bd42m2j436ptslaugmbbvtbzsukeqytkse7niovlilyfssn3ipzkqtclmetqrxxn7h56xn2ju7pjnuamd6ijfawn2jpjsrmnawaozeniosvhgovftoj24dl77ytdkxdl7ogappnlgkqsjyy43urtnj6tqf2psfptkbzyx4nu3rzgsqqi5ybx3pu6cvt6de67xutdz566wrkp2ymy5n7tqchmw77ss532noqcbfxv6quum6jmeed3exasdapvid6bilwzm5dcnutkcxktmsdryqopw5ntws3tnbd7um27clmxkgl2uinwzvv4tmo4axbj5zmgfd6sy2fiw6efdwjcyj4awdx3peuxcyh3ccee63w2nqaltierdrevl3f3hnjsrdrl4aosd23szhhaimhg2mjuocottcdsraoczh3waoyxx2skunaphe6w5gutu2z7cag3cx4pgsspfmspphuunzx357x6l36hj3tdys727rhawfwc4wc4ytgts4nrxlxl3xxzffunlhjhzj5guxljwxfrxwjfsg5c67pg3js7gvfqmpson6rjgiuwbsklranwhauq74lbesavftdzf7y3x5zwbi4uu6q2vqimbkttm7k6ycttsgknej2ylkwdxgtut7wecpepnb527pblj3vuzldjt3whsmstax536plulalxtxmvj6vvg4phofyaidhxhhkl4dfb6oabp3fi55jt77pw3jl55pwbsimjpdanuenll2xxctr6swaimuaqk4wvqa6rbgow3onr74v4alkuukc2tlmorvsjpgaazpun6pbfyorazzarhc2r7fjt55pmosowrqcpdwl2q34hcco2f3icmpktchxdvtpmitufnplqaifbtlktkpo7b22244n6dkmxtgcnxtixsit57uhh7rc5rqezjz7pfd7ojhrui5bcdzb7bo2nbo6o24lpwbg4bmqgbqpbwclq6kglgxefryxlkqydillki3545vcrelfw6reszml6emuyjscx377on2qpq26j5jrh5xmbwmpcyq6sewanlbmwwk2vqhq5zunbcyd6h5z3ms3bgfn7lflvev5vwmjnv5nzbgrmpamy453zuvy6xc6jp7tqgpmrlxup7suptejbacm6rdurdhcaori6i25wylgaikfov4dfgeswxdeerogy2m5tbzsdlr7pfhchd4wnokuipfwjzejxiruj5cljm66hvn47j3eseys3nsi6xdh566jgap5s5e7ytdkkhh5lsuv47oose4luozz427dzk577jccjg3n7b4myd565edmsywol3hgh2i54lcya6saaaaaaa"), 6: base32.a2b(b"krqwq33febwxk5dbmjwgkiddn5xhiyljnzsxeidwgefhkckeaohar2c5jzdcrekne6myzpxl2z65d6ufdjuuyhabg2j57ecmy23jyflcp7djzupj4tfr345bkg7cmwxmpmn3h4iaaaaaaaaabb5aaaaaaaaaacsoaaaaaakjl2ynu3sjzjwrfjn4cwfspkueq47j6ej2uodmjsjexyray7dn6ut4nnuftdhhgxo3t3a5eoipsdy5evdihyeigny3c4adtpveplcwt76m7naar2c5jzdcrekne6myzpxl2z65d6ufdjuqaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaarybcjqw7lbehfrbu7pwx26vvv3mbjprnqup7q7wxmdsoyiqaqzgbtv6cplrrgpzbnptsoqz7imnauamfaaaaaaaaaaamgaaaaaaaaaaaymaaaaghqaaacr4aaaayxaaaagnyaaaaaaaaaao4qaaaaaaaaacd2gcbacibqbudaskugjcdpodibaeaqkaadqiaq2abqqiaqqaucaeaqbvkghg3s5ivqiq4uiajdzagl7iqhz2uxwj6aqk47oscyqxyljmxkapldxhxigum2ejxflnps3yrxwwehop2zairilrouid5s6wxgnoqg2vq7rx77icxfnx7mq65niuknra3tezhpndwo7pdxtvtci645p4umyhdh5gp3kbdpypphvouaql662r6ufigp4wwm6emrsavlontn4wttg6lv7bcmq6ojw5utpvz3agoenovrkndncjzxog7sp2w7l6jkmzgfxd2asxos5khkjbxbuhgkd6j5yqlzsmk3kq67engtlgyd7hxk7nedw73bq2bs4353wtler23poucntgve22acfxdbyj2f6q2saj2agu2ptfk364d3zayddffxkcah4olnkczjonselwwrvdcu3vch3licaeirogosgsui3y4ovcyzleurbiunp7fsfk2fgmiek7b3jogvrhj4snvkpqjfh2w2lqnkvqvuoojmgrn6sll354mpomajbtlsv6fuguzgsxsm3l2ehxdxfdv6kcoyogaa5raetooahunf4ltvfdb4d7mwgpj4pg7tmcw55ku5vgx7tjqkdkuyq2uvhavkbsbujsrcfzve5ekuhftxm7nmtomibrblbwyxcr5mcy6qqwx66lrhejmgewrig74nzpriualhw4g22qaw423qeezqn6irea3vlgc3foz4egf6irincownoq7utv643vmtoueebigwrl6nehbos3ipsdx44tmucpvyui2jzgfulb5mrrcjuycmuzfigu6tf25lbysqn7n7smhqniddgctjt573rtd6o63wiaactacm7dw7giohzcgwe4okbijnmzlacetmfjjbasrd6whexjmwkaiaaazuum3xriq54h5v6afcrrl3kkbd46oizwulc5fbslmblxfc3ldyyqaaszc7rkciv6rhwt5gbgnl5u54ihnqrfyuh7s54r2444mrhcwgizieaalkclm4iljq34daut2vffpxdlkklamhwyod66dgimv5alle47lszewah5lt22m7poc3nvamk7462qlijpzfe7cy4x5udwfpuznzy7rlhx7ev5hmvxi5m3nctyofw2axz6a4fttdxoefezaqu7wur2rtcmxx5wxmpdkfflvzvawzr2oecq7yriklbc2nfyk4ezeulmdaktctlwcoz26jt3yx5gg2ez6jnhblc5swn7qbl6t3ebm2fmworvtrpxyqhegsly6xtpbh2yfdu6ww52ypka6cc4crgov33cdnbxyekdmjck2h55ni4othks6df52kvobtcnscytmjrrfbekvwmhtbcke2cgcyaj2cra7xmnd4bw2xe2qki5kycopo45ekfyxwzsmxuyxvjzqklnqjwm3j3gwcm75ftnrsvbj33w6eyr4dbz2tewum7vfsyfbb3ojw5ujtn22jxpr4nkmkqup6z7ukpp4cdxwyge2psc5suaxaltmp23gbawoo3qjeo44hgwgtkge2oowox3fpxwxkckaqgrxilfeyxmjp4cwf2rhpkbwtveqkukchv3u5rfkykwfj5zhleu3obsif6ldfclaef32wjpe5d6ddh2grdx2yt6tuhw53t6zuuumlw6t6i3e2ev7nh36e4shwbn3ew2bbahn6imgb5sismfttf5cdlr5kw6wvjoaqiaiz2onxecfx735dvon7epthnklq67tnqj4fvcwechbvmnkpiwd5fd36dirpshc7i7zj3rcr5y3kzps5nfnfnik27gdgemfn27iblcjh5bpkio6sr375bmxbh6fshbo7cvjzsdsirsafnbjzgl6ycqczwbacj5sxwgrzl6qbdhfbzev5fzutznzbasejqdjm3qxsdcuqvqau3kih2anr2itgmr44wmwtsk6bd42m2j436ptslaugmbbvtbzsukeqytkse7niovlilyfssn3ipzkqtclmetqrxxn7h56xn2ju7pjnuamd6ijfawn2jpjsrmnawaozeniosvhgovftoj24dl77ytdkxdl7ogappnlgkqsjyy43urtnj6tqf2psfptkbzyx4nu3rzgsqqi5ybx3pu6cvt6de67xutdz566wrkp2ymy5n7tqchmw77ss532noqcbfxv6quum6jmeed3exasdapvid6bilwzm5dcnutkcxktmsdryqopw5ntws3tnbd7um27clmxkgl2uinwzvv4tmo4axbj5zmgfd6sy2fiw6efdwjcyj4awdx3peuxcyh3ccee63w2nqaltierdrevl3f3hnjsrdrl4aosd23szhhaimhg2mjuocottcdsraoczh3waoyxx2skunaphe6w5gutu2z7cag3cx4pgsspfmspphuunzx357x6l36hj3tdys727rhawfwc4wc4ytgts4nrxlxl3xxzffunlhjhzj5guxljwxfrxwjfsg5c67pg3js7gvfqmpson6rjgiuwbsklranwhauq74lbesavftdzf7y3x5zwbi4uu6q2vqimbkttm7k6ycttsgknej2ylkwdxgtut7wecpepnb527pblj3vuzldjt3whsmstax536plulalxtxmvj6vvg4phofyaidhxhhkl4dfb6oabp3fi55jt77pw3jl55pwbsimjpdanuenll2xxctr6swaimuaqk4wvqa6rbgow3onr74v4alkuukc2tlmorvsjpgaazpun6pbfyorazzarhc2r7fjt55pmosowrqcpdwl2q34hcco2f3icmpktchxdvtpmitufnplqaifbtlktkpo7b22244n6dkmxtgcnxtixsit57uhh7rc5rqezjz7pfd7ojhrui5bcdzb7bo2nbo6o24lpwbg4bmqgbqpbwclq6kglgxefryxlkqydillki3545vcrelfw6reszml6emuyjscx377on2qpq26j5jrh5xmbwmpcyq6sewanlbmwwk2vqhq5zunbcyd6h5z3ms3bgfn7lflvev5vwmjnv5nzbgrmpamy453zuvy6xc6jp7tqgpmrlxup7suptejbacm6rdurdhcaori6i25wylgaikfov4dfgeswxdeerogy2m5tbzsdlr7pfhchd4wnokuipfwjzejxiruj5cljm66hvn47j3eseys3nsi6xdh566jgap5s5e7ytdkkhh5lsuv47oose4luozz427dzk577jccjg3n7b4myd565edmsywol3hgh2i54lcya6saaaaaaa"), }, 4: { # client[4] 0: base32.a2b(b"krqwq33febwxk5dbmjwgkiddn5xhiyljnzsxeidwgefhkckeaohanguihdeqshi3vbil354mnoip7yzj3rpsvjbydjlngiqocl2s6dja4dqjzuaghaekxoithualnjp6artv6laaaaaaaaaabb5aaaaaaaaaacsoaaaaaakjl2ynvzguwqjavmynllmjm66qaqz4uh4dinujrxcaafvp5vvzrgueu3fxwkppvopapdw3p4hjezva23vxif5rzgacysmyo7tr4tjd44nnqpsanguihdeqshi3vbil354mnoip7yzj3rpqaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaarybcjqw7lbehfrbu7pwx26vvv3mbjprnqup7q7wxmdsoyiqaqzgbtv6cplrrgpzbnptsoqz7imnauamfaaaaaaaaaaamgaaaaaaaaaaaymaaaaghqaaacr4aaaayxaaaagnyaaaaaaaaaao4qaaaaaaaaacd2gcbacibqbudaskugjcdpodibaeaqkaadqiaq2abqqiaqqaucaeaqbvkghg3s5ivqiq4uiajdzagl7iqhz2uxwj6aqk47oscyqxyljmxkapldxhxigum2ejxflnps3yrxwwehop2zairilrouid5s6wxgnoqg2vq7rx77icxfnx7mq65niuknra3tezhpndwo7pdxtvtci645p4umyhdh5gp3kbdpypphvouaql662r6ufigp4wwm6emrsavlontn4wttg6lv7bcmq6ojw5utpvz3agoenovrkndncjzxog7sp2w7l6jkmzgfxd2asxos5khkjbxbuhgkd6j5yqlzsmk3kq67engtlgyd7hxk7nedw73bq2bs4353wtler23poucntgve22acfxdbyj2f6q2saj2agu2ptfk364d3zayddffxkcah4olnkczjonselwwrvdcu3vch3licaeirogosgsui3y4ovcyzleurbiunp7fsfk2fgmiek7b3jogvrhj4snvkpqjfh2w2lqnkvqvuoojmgrn6sll354mpomajbtlsv6fuguzgsxsm3l2ehxdxfdv6kcoyogaa5raetooahunf4ltvfdb4d7mwgpj4pg7tmcw55ku5vgx7tjqkdkuyq2uvhavkbsbujsrcfzve5ekuhftxm7nmtomibrblbwyxcr5mcy6qqwx66lrhejmgewrig74nzpriualhw4g22qaw423qeezqn6irea3vlgc3foz4egf6irincownoq7utv643vmtoueebigwrl6nehbos3ipsdx44tmucpvyui2jzgfulb5mrrcjuycmuzfigu6tf25lbysqn7n7smhqniddgctjt573rtd6o63wiaactacm7dw7giohzcgwe4okbijnmzlacetmfjjbasrd6whexjmwkaiaabdm4cpjolak4m47telnokjaxwodj7ont7n7vffnmhkzp3lyshkh3qaarzybn64ru5rss7tmi4ttv26q66ebdvvrtyd3s5t7dmqku3uoefroaaibdqu2gyd4hqwgj3jhsu7ievr26vxpzj4g6ovbvqeyljrk6n2xfidtwj6pazanrhwes3e4ln4uettqyd5u5bqroneqie7lkwlxm7xsbg4zhnlc2fybonhlpcatwlgdvk3jpn7sge4qnod2ufxgxc7rphbnunb52xrgmdgpojqhyfajxealxwdddlhhbttphrgv5zrub5mggbcec3honrtuuv3epex3s5yvkt2zmsaxfeu34psjwjltm4ys5qa72ryrmgjtmtu3i34jfmachhmgul2j2sddwydgvtpqnatglb3ejlhukxp3isthks6df52kvobtcnscytmjrrfbekvwmhtbcke2cgcyaj2cra7xmnd4bw2xe2qki5kycopo45ekfyxwzsmxuyxvjzqklnqjwm3j3gwcm75ftnrsvbj33w6eyr4dbz2tewum7vfsyfbb3ojw5ujtn22jxpr4nkmkqup6z7ukpp4cdxwyge2psc5suaxaltmp23gbawoo3qjeo44hgwgtkge2oowox3fpxwxkckaqgrxilfeyxmjp4cwf2rhpkbwtveqkukchv3u5rfkykwfj5zhleu3obsif6ldfclaef32wjpe5d6ddh2grdx2yt6tuhw53t6zuuumlw6t6i3e2ev7nh36e4shwbn3ew2bbahn6imgb5sismfttf5cdlr5kw6wvjoaqiaiz2onxecfx735dvon7epthnklq67tnqj4fvcwechbvmnkpiwd5fd36dirpshc7i7zj3rcr5y3kzps5nfnfnik27gdgemfn27iblcjh5bpkio6sr375bmxbh6fshbo7cvjzsdsirsafnbjzgl6ycqczwbacj5sxwgrzl6qbdhfbzev5fzutznzbasejqdjm3qxsdcuqvqau3kih2anr2itgmr44wmwtsk6bd42m2j436ptslaugmbbvtbzsukeqytkse7niovlilyfssn3ipzkqtclmetqrxxn7h56xn2ju7pjnuamd6ijfawn2jpjsrmnawaozeniosvhgovftoj24dl77ytdkxdl7ogappnlgkqsjyy43urtnj6tqf2psfptkbzyx4nu3rzgsqqi5ybx3pu6cvt6de67xutdz566wrkp2ymy5n7tqchmw77ss532noqcbfxv6quum6jmeed3exasdapvid6bilwzm5dcnutkcxktmsdryqopw5ntws3tnbd7um27clmxkgl2uinwzvv4tmo4axbj5zmgfd6sy2fiw6efdwjcyj4awdx3peuxcyh3ccee63w2nqaltierdrevl3f3hnjsrdrl4aosd23szhhaimhg2mjuocottcdsraoczh3waoyxx2skunaphe6w5gutu2z7cag3cx4pgsspfmspphuunzx357x6l36hj3tdys727rhawfwc4wc4ytgts4nrxlxl3xxzffunlhjhzj5guxljwxfrxwjfsg5c67pg3js7gvfqmpson6rjgiuwbsklranwhauq74lbesavftdzf7y3x5zwbi4uu6q2vqimbkttm7k6ycttsgknej2ylkwdxgtut7wecpepnb527pblj3vuzldjt3whsmstax536plulalxtxmvj6vvg4phofyaidhxhhkl4dfb6oabp3fi55jt77pw3jl55pwbsimjpdanuenll2xxctr6swaimuaqk4wvqa6rbgow3onr74v4alkuukc2tlmorvsjpgaazpun6pbfyorazzarhc2r7fjt55pmosowrqcpdwl2q34hcco2f3icmpktchxdvtpmitufnplqaifbtlktkpo7b22244n6dkmxtgcnxtixsit57uhh7rc5rqezjz7pfd7ojhrui5bcdzb7bo2nbo6o24lpwbg4bmqgbqpbwclq6kglgxefryxlkqydillki3545vcrelfw6reszml6emuyjscx377on2qpq26j5jrh5xmbwmpcyq6sewanlbmwwk2vqhq5zunbcyd6h5z3ms3bgfn7lflvev5vwmjnv5nzbgrmpamy453zuvy6xc6jp7tqgpmrlxup7suptejbacm6rdurdhcaori6i25wylgaikfov4dfgeswxdeerogy2m5tbzsdlr7pfhchd4wnokuipfwjzejxiruj5cljm66hvn47j3eseys3nsi6xdh566jgap5s5e7ytdkkhh5lsuv47oose4luozz427dzk577jccjg3n7b4myd565edmsywol3hgh2i54lcya6saaaaaaa"), 5: base32.a2b(b"krqwq33febwxk5dbmjwgkiddn5xhiyljnzsxeidwgefhkckeaohanguihdeqshi3vbil354mnoip7yzj3rpsvjbydjlngiqocl2s6dja4dqjzuaghaekxoithualnjp6artv6laaaaaaaaaabb5aaaaaaaaaacsoaaaaaakjl2ynvzguwqjavmynllmjm66qaqz4uh4dinujrxcaafvp5vvzrgueu3fxwkppvopapdw3p4hjezva23vxif5rzgacysmyo7tr4tjd44nnqpsanguihdeqshi3vbil354mnoip7yzj3rpqaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaarybcjqw7lbehfrbu7pwx26vvv3mbjprnqup7q7wxmdsoyiqaqzgbtv6cplrrgpzbnptsoqz7imnauamfaaaaaaaaaaamgaaaaaaaaaaaymaaaaghqaaacr4aaaayxaaaagnyaaaaaaaaaao4qaaaaaaaaacd2gcbacibqbudaskugjcdpodibaeaqkaadqiaq2abqqiaqqaucaeaqbvkghg3s5ivqiq4uiajdzagl7iqhz2uxwj6aqk47oscyqxyljmxkapldxhxigum2ejxflnps3yrxwwehop2zairilrouid5s6wxgnoqg2vq7rx77icxfnx7mq65niuknra3tezhpndwo7pdxtvtci645p4umyhdh5gp3kbdpypphvouaql662r6ufigp4wwm6emrsavlontn4wttg6lv7bcmq6ojw5utpvz3agoenovrkndncjzxog7sp2w7l6jkmzgfxd2asxos5khkjbxbuhgkd6j5yqlzsmk3kq67engtlgyd7hxk7nedw73bq2bs4353wtler23poucntgve22acfxdbyj2f6q2saj2agu2ptfk364d3zayddffxkcah4olnkczjonselwwrvdcu3vch3licaeirogosgsui3y4ovcyzleurbiunp7fsfk2fgmiek7b3jogvrhj4snvkpqjfh2w2lqnkvqvuoojmgrn6sll354mpomajbtlsv6fuguzgsxsm3l2ehxdxfdv6kcoyogaa5raetooahunf4ltvfdb4d7mwgpj4pg7tmcw55ku5vgx7tjqkdkuyq2uvhavkbsbujsrcfzve5ekuhftxm7nmtomibrblbwyxcr5mcy6qqwx66lrhejmgewrig74nzpriualhw4g22qaw423qeezqn6irea3vlgc3foz4egf6irincownoq7utv643vmtoueebigwrl6nehbos3ipsdx44tmucpvyui2jzgfulb5mrrcjuycmuzfigu6tf25lbysqn7n7smhqniddgctjt573rtd6o63wiaactacm7dw7giohzcgwe4okbijnmzlacetmfjjbasrd6whexjmwkaiaaazuum3xriq54h5v6afcrrl3kkbd46oizwulc5fbslmblxfc3ldyyqaavmjy6g336aewy42vw5rusytyi7vzs6y22c5jhxyt5w6gthcbjp4zaajwnpw5yhhwh4hyctajptujjwg7cswzjkwucke6yvbuejqhrnbafadv245phzjfluujm5pyfx43oagwtsdkgtw2v4i56uexjrumsdes6go7556an26wmzpbskyrsx4qbzqcedilovhlkrlnhvsfr4mjwkw62mkf4kde7jgesu4ztbzc7xmuobydnxk5hdyyly6n7socvrsqw6z56v6osxr2vgxpz6jor7ciyclkungeaayume5hdrm6cbnvwgua4gc2fcpixfdbkiijnmlicribyoinnpu6zdce4mdfqyl4qzup3kkk5qju2wthks6df52kvobtcnscytmjrrfbekvwmhtbcke2cgcyaj2cra7xmnd4bw2xe2qki5kycopo45ekfyxwzsmxuyxvjzqklnqjwm3j3gwcm75ftnrsvbj33w6eyr4dbz2tewum7vfsyfbb3ojw5ujtn22jxpr4nkmkqup6z7ukpp4cdxwyge2psc5suaxaltmp23gbawoo3qjeo44hgwgtkge2oowox3fpxwxkckaqgrxilfeyxmjp4cwf2rhpkbwtveqkukchv3u5rfkykwfj5zhleu3obsif6ldfclaef32wjpe5d6ddh2grdx2yt6tuhw53t6zuuumlw6t6i3e2ev7nh36e4shwbn3ew2bbahn6imgb5sismfttf5cdlr5kw6wvjoaqiaiz2onxecfx735dvon7epthnklq67tnqj4fvcwechbvmnkpiwd5fd36dirpshc7i7zj3rcr5y3kzps5nfnfnik27gdgemfn27iblcjh5bpkio6sr375bmxbh6fshbo7cvjzsdsirsafnbjzgl6ycqczwbacj5sxwgrzl6qbdhfbzev5fzutznzbasejqdjm3qxsdcuqvqau3kih2anr2itgmr44wmwtsk6bd42m2j436ptslaugmbbvtbzsukeqytkse7niovlilyfssn3ipzkqtclmetqrxxn7h56xn2ju7pjnuamd6ijfawn2jpjsrmnawaozeniosvhgovftoj24dl77ytdkxdl7ogappnlgkqsjyy43urtnj6tqf2psfptkbzyx4nu3rzgsqqi5ybx3pu6cvt6de67xutdz566wrkp2ymy5n7tqchmw77ss532noqcbfxv6quum6jmeed3exasdapvid6bilwzm5dcnutkcxktmsdryqopw5ntws3tnbd7um27clmxkgl2uinwzvv4tmo4axbj5zmgfd6sy2fiw6efdwjcyj4awdx3peuxcyh3ccee63w2nqaltierdrevl3f3hnjsrdrl4aosd23szhhaimhg2mjuocottcdsraoczh3waoyxx2skunaphe6w5gutu2z7cag3cx4pgsspfmspphuunzx357x6l36hj3tdys727rhawfwc4wc4ytgts4nrxlxl3xxzffunlhjhzj5guxljwxfrxwjfsg5c67pg3js7gvfqmpson6rjgiuwbsklranwhauq74lbesavftdzf7y3x5zwbi4uu6q2vqimbkttm7k6ycttsgknej2ylkwdxgtut7wecpepnb527pblj3vuzldjt3whsmstax536plulalxtxmvj6vvg4phofyaidhxhhkl4dfb6oabp3fi55jt77pw3jl55pwbsimjpdanuenll2xxctr6swaimuaqk4wvqa6rbgow3onr74v4alkuukc2tlmorvsjpgaazpun6pbfyorazzarhc2r7fjt55pmosowrqcpdwl2q34hcco2f3icmpktchxdvtpmitufnplqaifbtlktkpo7b22244n6dkmxtgcnxtixsit57uhh7rc5rqezjz7pfd7ojhrui5bcdzb7bo2nbo6o24lpwbg4bmqgbqpbwclq6kglgxefryxlkqydillki3545vcrelfw6reszml6emuyjscx377on2qpq26j5jrh5xmbwmpcyq6sewanlbmwwk2vqhq5zunbcyd6h5z3ms3bgfn7lflvev5vwmjnv5nzbgrmpamy453zuvy6xc6jp7tqgpmrlxup7suptejbacm6rdurdhcaori6i25wylgaikfov4dfgeswxdeerogy2m5tbzsdlr7pfhchd4wnokuipfwjzejxiruj5cljm66hvn47j3eseys3nsi6xdh566jgap5s5e7ytdkkhh5lsuv47oose4luozz427dzk577jccjg3n7b4myd565edmsywol3hgh2i54lcya6saaaaaaa"), }, } #--------- END stored_shares.py ---------------- class _Base(GridTestMixin, ShouldFailMixin): def create_shares(self, ignored=None): u = upload.Data(plaintext, None) d = self.c0.upload(u) f = open("stored_shares.py", "w") def _created_immutable(ur): # write the generated shares and URI to a file, which can then be # incorporated into this one next time. f.write('immutable_uri = b"%s"\n' % ur.get_uri()) f.write('immutable_shares = {\n') si = uri.from_string(ur.get_uri()).get_storage_index() si_dir = storage_index_to_dir(si) for (i,ss,ssdir) in self.iterate_servers(): sharedir = os.path.join(ssdir, "shares", si_dir) shares = {} for fn in os.listdir(sharedir): shnum = int(fn) sharedata = open(os.path.join(sharedir, fn), "rb").read() shares[shnum] = sharedata fileutil.rm_dir(sharedir) if shares: f.write(' %d: { # client[%d]\n' % (i, i)) for shnum in sorted(shares.keys()): f.write(' %d: base32.a2b(b"%s"),\n' % (shnum, base32.b2a(shares[shnum]))) f.write(' },\n') f.write('}\n') f.write('\n') d.addCallback(_created_immutable) d.addCallback(lambda ignored: self.c0.create_mutable_file(mutable_plaintext)) def _created_mutable(n): f.write('mutable_uri = b"%s"\n' % n.get_uri()) f.write('mutable_shares = {\n') si = uri.from_string(n.get_uri()).get_storage_index() si_dir = storage_index_to_dir(si) for (i,ss,ssdir) in self.iterate_servers(): sharedir = os.path.join(ssdir, "shares", si_dir) shares = {} for fn in os.listdir(sharedir): shnum = int(fn) sharedata = open(os.path.join(sharedir, fn), "rb").read() shares[shnum] = sharedata fileutil.rm_dir(sharedir) if shares: f.write(' %d: { # client[%d]\n' % (i, i)) for shnum in sorted(shares.keys()): f.write(' %d: base32.a2b(b"%s"),\n' % (shnum, base32.b2a(shares[shnum]))) f.write(' },\n') f.write('}\n') f.close() d.addCallback(_created_mutable) def _done(ignored): f.close() d.addCallback(_done) return d def load_shares(self, ignored=None): # this uses the data generated by create_shares() to populate the # storage servers with pre-generated shares si = uri.from_string(immutable_uri).get_storage_index() si_dir = storage_index_to_dir(si) for i in immutable_shares: shares = immutable_shares[i] for shnum in shares: dn = os.path.join(self.get_serverdir(i), "shares", si_dir) fileutil.make_dirs(dn) fn = os.path.join(dn, str(shnum)) f = open(fn, "wb") f.write(shares[shnum]) f.close() si = uri.from_string(mutable_uri).get_storage_index() si_dir = storage_index_to_dir(si) for i in mutable_shares: shares = mutable_shares[i] for shnum in shares: dn = os.path.join(self.get_serverdir(i), "shares", si_dir) fileutil.make_dirs(dn) fn = os.path.join(dn, str(shnum)) f = open(fn, "wb") f.write(shares[shnum]) f.close() def download_immutable(self, ignored=None): n = self.c0.create_node_from_uri(immutable_uri) d = download_to_data(n) def _got_data(data): self.failUnlessEqual(data, plaintext) d.addCallback(_got_data) # make sure we can use the same node twice d.addCallback(lambda ign: download_to_data(n)) d.addCallback(_got_data) return d def download_mutable(self, ignored=None): n = self.c0.create_node_from_uri(mutable_uri) d = n.download_best_version() def _got_data(data): self.failUnlessEqual(data, mutable_plaintext) d.addCallback(_got_data) return d class DownloadTest(_Base, unittest.TestCase): def test_download(self): self.basedir = self.mktemp() self.set_up_grid() self.c0 = self.g.clients[0] # do this to create the shares #return self.create_shares() self.load_shares() d = self.download_immutable() d.addCallback(self.download_mutable) return d def test_download_failover(self): self.basedir = self.mktemp() self.set_up_grid() self.c0 = self.g.clients[0] self.load_shares() si = uri.from_string(immutable_uri).get_storage_index() si_dir = storage_index_to_dir(si) n = self.c0.create_node_from_uri(immutable_uri) d = download_to_data(n) def _got_data(data): self.failUnlessEqual(data, plaintext) d.addCallback(_got_data) def _clobber_some_shares(ign): # find the three shares that were used, and delete them. Then # download again, forcing the downloader to fail over to other # shares for s in n._cnode._node._shares: for clientnum in immutable_shares: for shnum in immutable_shares[clientnum]: if s._shnum == shnum: fn = os.path.join(self.get_serverdir(clientnum), "shares", si_dir, str(shnum)) os.unlink(fn) d.addCallback(_clobber_some_shares) d.addCallback(lambda ign: download_to_data(n)) d.addCallback(_got_data) def _clobber_most_shares(ign): # delete all but one of the shares that are still alive live_shares = [s for s in n._cnode._node._shares if s.is_alive()] save_me = live_shares[0]._shnum for clientnum in immutable_shares: for shnum in immutable_shares[clientnum]: if shnum == save_me: continue fn = os.path.join(self.get_serverdir(clientnum), "shares", si_dir, str(shnum)) if os.path.exists(fn): os.unlink(fn) # now the download should fail with NotEnoughSharesError return self.shouldFail(NotEnoughSharesError, "1shares", None, download_to_data, n) d.addCallback(_clobber_most_shares) def _clobber_all_shares(ign): # delete the last remaining share for clientnum in immutable_shares: for shnum in immutable_shares[clientnum]: fn = os.path.join(self.get_serverdir(clientnum), "shares", si_dir, str(shnum)) if os.path.exists(fn): os.unlink(fn) # now a new download should fail with NoSharesError. We want a # new ImmutableFileNode so it will forget about the old shares. # If we merely called create_node_from_uri() without first # dereferencing the original node, the NodeMaker's _node_cache # would give us back the old one. n = None n = self.c0.create_node_from_uri(immutable_uri) return self.shouldFail(NoSharesError, "0shares", None, download_to_data, n) d.addCallback(_clobber_all_shares) return d def test_lost_servers(self): # while downloading a file (after seg[0], before seg[1]), lose the # three servers that we were using. The download should switch over # to other servers. self.basedir = self.mktemp() self.set_up_grid() self.c0 = self.g.clients[0] # upload a file with multiple segments, so we can catch the download # in the middle. u = upload.Data(plaintext, None) u.max_segment_size = 70 # 5 segs d = self.c0.upload(u) def _uploaded(ur): self.uri = ur.get_uri() self.n = self.c0.create_node_from_uri(self.uri) return download_to_data(self.n) d.addCallback(_uploaded) def _got_data(data): self.failUnlessEqual(data, plaintext) d.addCallback(_got_data) def _kill_some_shares(): # find the shares that were used and delete them shares = self.n._cnode._node._shares self.killed_share_nums = sorted([s._shnum for s in shares]) # break the RIBucketReader references # (we don't break the RIStorageServer references, because that # isn't needed to test the current downloader implementation) for s in shares: s._rref.broken = True def _download_again(ign): # download again, deleting some shares after the first write # to the consumer c = StallingConsumer(_kill_some_shares) return self.n.read(c) d.addCallback(_download_again) def _check_failover(c): self.failUnlessEqual(b"".join(c.chunks), plaintext) shares = self.n._cnode._node._shares shnums = sorted([s._shnum for s in shares]) self.failIfEqual(shnums, self.killed_share_nums) d.addCallback(_check_failover) return d def test_long_offset(self): # bug #1154: mplayer doing a seek-to-end results in an offset of type # 'long', rather than 'int', and apparently __len__ is required to # return an int. Rewrote Spans/DataSpans to provide s.len() instead # of len(s) . self.basedir = self.mktemp() self.set_up_grid() self.c0 = self.g.clients[0] self.load_shares() n = self.c0.create_node_from_uri(immutable_uri) c = MemoryConsumer() d = n.read(c, int(0), int(10)) d.addCallback(lambda c: len(b"".join(c.chunks))) d.addCallback(lambda size: self.failUnlessEqual(size, 10)) return d def test_badguess(self): self.basedir = self.mktemp() self.set_up_grid() self.c0 = self.g.clients[0] self.load_shares() n = self.c0.create_node_from_uri(immutable_uri) # Cause the downloader to guess a segsize that's too low, so it will # ask for a segment number that's too high (beyond the end of the # real list, causing BadSegmentNumberError), to exercise # Segmentation._retry_bad_segment n._cnode._maybe_create_download_node() n._cnode._node._build_guessed_tables(90) con1 = MemoryConsumer() # plaintext size of 310 bytes, wrong-segsize of 90 bytes, will make # us think that file[180:200] is in the third segment (segnum=2), but # really there's only one segment d = n.read(con1, 180, 20) def _done(res): self.failUnlessEqual(b"".join(con1.chunks), plaintext[180:200]) d.addCallback(_done) return d def test_simultaneous_badguess(self): self.basedir = self.mktemp() self.set_up_grid() self.c0 = self.g.clients[0] # upload a file with multiple segments, and a non-default segsize, to # exercise the offset-guessing code. Because we don't tell the # downloader about the unusual segsize, it will guess wrong, and have # to do extra roundtrips to get the correct data. u = upload.Data(plaintext, None) u.max_segment_size = 70 # 5 segs, 8-wide hashtree con1 = MemoryConsumer() con2 = MemoryConsumer() d = self.c0.upload(u) def _uploaded(ur): n = self.c0.create_node_from_uri(ur.get_uri()) d1 = n.read(con1, 70, 20) d2 = n.read(con2, 140, 20) return defer.gatherResults([d1,d2]) d.addCallback(_uploaded) def _done(res): self.failUnlessEqual(b"".join(con1.chunks), plaintext[70:90]) self.failUnlessEqual(b"".join(con2.chunks), plaintext[140:160]) d.addCallback(_done) return d def test_simultaneous_goodguess(self): self.basedir = self.mktemp() self.set_up_grid() self.c0 = self.g.clients[0] # upload a file with multiple segments, and a non-default segsize, to # exercise the offset-guessing code. This time we *do* tell the # downloader about the unusual segsize, so it can guess right. u = upload.Data(plaintext, None) u.max_segment_size = 70 # 5 segs, 8-wide hashtree con1 = MemoryConsumer() con2 = MemoryConsumer() d = self.c0.upload(u) def _uploaded(ur): n = self.c0.create_node_from_uri(ur.get_uri()) n._cnode._maybe_create_download_node() n._cnode._node._build_guessed_tables(u.max_segment_size) d1 = n.read(con1, 70, 20) d2 = n.read(con2, 140, 20) return defer.gatherResults([d1,d2]) d.addCallback(_uploaded) def _done(res): self.failUnlessEqual(b"".join(con1.chunks), plaintext[70:90]) self.failUnlessEqual(b"".join(con2.chunks), plaintext[140:160]) d.addCallback(_done) return d def test_sequential_goodguess(self): self.basedir = self.mktemp() self.set_up_grid() self.c0 = self.g.clients[0] data = (plaintext*100)[:30000] # multiple of k # upload a file with multiple segments, and a non-default segsize, to # exercise the offset-guessing code. This time we *do* tell the # downloader about the unusual segsize, so it can guess right. u = upload.Data(data, None) u.max_segment_size = 6000 # 5 segs, 8-wide hashtree con1 = MemoryConsumer() con2 = MemoryConsumer() d = self.c0.upload(u) def _uploaded(ur): n = self.c0.create_node_from_uri(ur.get_uri()) n._cnode._maybe_create_download_node() n._cnode._node._build_guessed_tables(u.max_segment_size) d = n.read(con1, 12000, 20) def _read1(ign): self.failUnlessEqual(b"".join(con1.chunks), data[12000:12020]) return n.read(con2, 24000, 20) d.addCallback(_read1) def _read2(ign): self.failUnlessEqual(b"".join(con2.chunks), data[24000:24020]) d.addCallback(_read2) return d d.addCallback(_uploaded) return d def test_simultaneous_get_blocks(self): self.basedir = self.mktemp() self.set_up_grid() self.c0 = self.g.clients[0] self.load_shares() stay_empty = [] n = self.c0.create_node_from_uri(immutable_uri) d = download_to_data(n) def _use_shares(ign): shares = list(n._cnode._node._shares) s0 = shares[0] # make sure .cancel works too o0 = s0.get_block(0) o0.subscribe(lambda **kwargs: stay_empty.append(kwargs)) o1 = s0.get_block(0) o2 = s0.get_block(0) o0.cancel() o3 = s0.get_block(1) # state=BADSEGNUM d1 = defer.Deferred() d2 = defer.Deferred() d3 = defer.Deferred() o1.subscribe(lambda **kwargs: d1.callback(kwargs)) o2.subscribe(lambda **kwargs: d2.callback(kwargs)) o3.subscribe(lambda **kwargs: d3.callback(kwargs)) return defer.gatherResults([d1,d2,d3]) d.addCallback(_use_shares) def _done(res): r1,r2,r3 = res self.failUnlessEqual(r1["state"], "COMPLETE") self.failUnlessEqual(r2["state"], "COMPLETE") self.failUnlessEqual(r3["state"], "BADSEGNUM") self.failUnless("block" in r1) self.failUnless("block" in r2) self.failIf(stay_empty) d.addCallback(_done) return d def test_simul_1fail_1cancel(self): # This exercises an mplayer behavior in ticket #1154. I believe that # mplayer made two simultaneous webapi GET requests: first one for an # index region at the end of the (mp3/video) file, then one for the # first block of the file (the order doesn't really matter). All GETs # failed (NoSharesError) because of the type(__len__)==long bug. Each # GET submitted a DownloadNode.get_segment() request, which was # queued by the DN (DN._segment_requests), so the second one was # blocked waiting on the first one. When the first one failed, # DN.fetch_failed() was invoked, which errbacks the first GET, but # left the other one hanging (the lost-progress bug mentioned in # #1154 comment 10) # # Then mplayer sees that the index region GET failed, so it cancels # the first-block GET (by closing the HTTP request), triggering # stopProducer. The second GET was waiting in the Deferred (between # n.get_segment() and self._request_retired), so its # _cancel_segment_request was active, so was invoked. However, # DN._active_segment was None since it was not working on any segment # at that time, hence the error in #1154. self.basedir = self.mktemp() self.set_up_grid() self.c0 = self.g.clients[0] # upload a file with multiple segments, so we can catch the download # in the middle. Tell the downloader, so it can guess correctly. u = upload.Data(plaintext, None) u.max_segment_size = 70 # 5 segs d = self.c0.upload(u) def _uploaded(ur): # corrupt all the shares so the download will fail def _corruptor(s, debug=False): which = 48 # first byte of block0 return s[:which] + bchr(ord(s[which:which+1])^0x01) + s[which+1:] self.corrupt_all_shares(ur.get_uri(), _corruptor) n = self.c0.create_node_from_uri(ur.get_uri()) n._cnode._maybe_create_download_node() n._cnode._node._build_guessed_tables(u.max_segment_size) con1 = MemoryConsumer() con2 = MemoryConsumer() d = n.read(con1, int(0), int(20)) d2 = n.read(con2, int(140), int(20)) # con2 will be cancelled, so d2 should fail with DownloadStopped def _con2_should_not_succeed(res): self.fail("the second read should not have succeeded") def _con2_failed(f): self.failUnless(f.check(DownloadStopped)) d2.addCallbacks(_con2_should_not_succeed, _con2_failed) def _con1_should_not_succeed(res): self.fail("the first read should not have succeeded") def _con1_failed(f): self.failUnless(f.check(NoSharesError)) con2.producer.stopProducing() return d2 d.addCallbacks(_con1_should_not_succeed, _con1_failed) return d d.addCallback(_uploaded) return d def test_simultaneous_onefails(self): self.basedir = self.mktemp() self.set_up_grid() self.c0 = self.g.clients[0] # upload a file with multiple segments, so we can catch the download # in the middle. Tell the downloader, so it can guess correctly. u = upload.Data(plaintext, None) u.max_segment_size = 70 # 5 segs d = self.c0.upload(u) def _uploaded(ur): # corrupt all the shares so the download will fail def _corruptor(s, debug=False): which = 48 # first byte of block0 return s[:which] + bchr(ord(s[which:which+1])^0x01) + s[which+1:] self.corrupt_all_shares(ur.get_uri(), _corruptor) n = self.c0.create_node_from_uri(ur.get_uri()) n._cnode._maybe_create_download_node() n._cnode._node._build_guessed_tables(u.max_segment_size) con1 = MemoryConsumer() con2 = MemoryConsumer() d = n.read(con1, int(0), int(20)) d2 = n.read(con2, int(140), int(20)) # con2 should wait for con1 to fail and then con2 should succeed. # In particular, we should not lose progress. If this test fails, # it will fail with a timeout error. def _con2_should_succeed(res): # this should succeed because we only corrupted the first # segment of each share. The segment that holds [140:160] is # fine, as are the hash chains and UEB. self.failUnlessEqual(b"".join(con2.chunks), plaintext[140:160]) d2.addCallback(_con2_should_succeed) def _con1_should_not_succeed(res): self.fail("the first read should not have succeeded") def _con1_failed(f): self.failUnless(f.check(NoSharesError)) # we *don't* cancel the second one here: this exercises a # lost-progress bug from #1154. We just wait for it to # succeed. return d2 d.addCallbacks(_con1_should_not_succeed, _con1_failed) return d d.addCallback(_uploaded) return d def test_download_no_overrun(self): self.basedir = self.mktemp() self.set_up_grid() self.c0 = self.g.clients[0] self.load_shares() # tweak the client's copies of server-version data, so it believes # that they're old and can't handle reads that overrun the length of # the share. This exercises a different code path. for s in self.c0.storage_broker.get_connected_servers(): v = s.get_version() v1 = v[b"http://allmydata.org/tahoe/protocols/storage/v1"] v1[b"tolerates-immutable-read-overrun"] = False n = self.c0.create_node_from_uri(immutable_uri) d = download_to_data(n) def _got_data(data): self.failUnlessEqual(data, plaintext) d.addCallback(_got_data) return d def test_download_segment(self): self.basedir = self.mktemp() self.set_up_grid() self.c0 = self.g.clients[0] self.load_shares() n = self.c0.create_node_from_uri(immutable_uri) cn = n._cnode (d,c) = cn.get_segment(0) def _got_segment(offset_and_data_and_decodetime): (offset, data, decodetime) = offset_and_data_and_decodetime self.failUnlessEqual(offset, 0) self.failUnlessEqual(len(data), len(plaintext)) d.addCallback(_got_segment) return d def test_download_segment_cancel(self): self.basedir = self.mktemp() self.set_up_grid() self.c0 = self.g.clients[0] self.load_shares() n = self.c0.create_node_from_uri(immutable_uri) cn = n._cnode (d,c) = cn.get_segment(0) fired = [] d.addCallback(fired.append) c.cancel() d = fireEventually() d.addCallback(flushEventualQueue) def _check(ign): self.failUnlessEqual(fired, []) d.addCallback(_check) return d def test_download_bad_segment(self): self.basedir = self.mktemp() self.set_up_grid() self.c0 = self.g.clients[0] self.load_shares() n = self.c0.create_node_from_uri(immutable_uri) cn = n._cnode def _try_download(): (d,c) = cn.get_segment(1) return d d = self.shouldFail(BadSegmentNumberError, "badseg", "segnum=1, numsegs=1", _try_download) return d def test_download_segment_terminate(self): self.basedir = self.mktemp() self.set_up_grid() self.c0 = self.g.clients[0] self.load_shares() n = self.c0.create_node_from_uri(immutable_uri) cn = n._cnode (d,c) = cn.get_segment(0) fired = [] d.addCallback(fired.append) self.c0.terminator.disownServiceParent() d = fireEventually() d.addCallback(flushEventualQueue) def _check(ign): self.failUnlessEqual(fired, []) d.addCallback(_check) return d def test_pause(self): self.basedir = self.mktemp() self.set_up_grid() self.c0 = self.g.clients[0] self.load_shares() n = self.c0.create_node_from_uri(immutable_uri) c = PausingConsumer() d = n.read(c) def _downloaded(mc): newdata = b"".join(mc.chunks) self.failUnlessEqual(newdata, plaintext) d.addCallback(_downloaded) return d def test_pause_then_stop(self): self.basedir = self.mktemp() self.set_up_grid() self.c0 = self.g.clients[0] self.load_shares() n = self.c0.create_node_from_uri(immutable_uri) c = PausingAndStoppingConsumer() d = self.shouldFail(DownloadStopped, "test_pause_then_stop", "our Consumer called stopProducing()", n.read, c) return d def test_stop(self): # use a download target that stops after the first segment (#473) self.basedir = self.mktemp() self.set_up_grid() self.c0 = self.g.clients[0] self.load_shares() n = self.c0.create_node_from_uri(immutable_uri) c = StoppingConsumer() d = self.shouldFail(DownloadStopped, "test_stop", "our Consumer called stopProducing()", n.read, c) return d def test_stop_immediately(self): # and a target that stops right after registerProducer (maybe #1154) self.basedir = self.mktemp() self.set_up_grid() self.c0 = self.g.clients[0] self.load_shares() n = self.c0.create_node_from_uri(immutable_uri) c = ImmediatelyStoppingConsumer() # stops after registerProducer d = self.shouldFail(DownloadStopped, "test_stop_immediately", "our Consumer called stopProducing()", n.read, c) return d def test_stop_immediately2(self): # and a target that stops right after registerProducer (maybe #1154) self.basedir = self.mktemp() self.set_up_grid() self.c0 = self.g.clients[0] self.load_shares() n = self.c0.create_node_from_uri(immutable_uri) c = MemoryConsumer() d0 = n.read(c) c.producer.stopProducing() d = self.shouldFail(DownloadStopped, "test_stop_immediately", "our Consumer called stopProducing()", lambda: d0) return d def test_download_segment_bad_ciphertext_hash(self): # The crypttext_hash_tree asserts the integrity of the decoded # ciphertext, and exists to detect two sorts of problems. The first # is a bug in zfec decode. The second is the "two-sided t-shirt" # attack (found by Christian Grothoff), in which a malicious uploader # creates two sets of shares (one for file A, second for file B), # uploads a combination of them (shares 0-4 of A, 5-9 of B), and then # builds an otherwise normal UEB around those shares: their goal is # to give their victim a filecap which sometimes downloads the good A # contents, and sometimes the bad B contents, depending upon which # servers/shares they can get to. Having a hash of the ciphertext # forces them to commit to exactly one version. (Christian's prize # for finding this problem was a t-shirt with two sides: the shares # of file A on the front, B on the back). # creating a set of shares with this property is too hard, although # it'd be nice to do so and confirm our fix. (it requires a lot of # tampering with the uploader). So instead, we just damage the # decoder. The tail decoder is rebuilt each time, so we need to use a # file with multiple segments. self.basedir = self.mktemp() self.set_up_grid() self.c0 = self.g.clients[0] u = upload.Data(plaintext, None) u.max_segment_size = 60 # 6 segs d = self.c0.upload(u) def _uploaded(ur): n = self.c0.create_node_from_uri(ur.get_uri()) n._cnode._maybe_create_download_node() n._cnode._node._build_guessed_tables(u.max_segment_size) d = download_to_data(n) def _break_codec(data): # the codec isn't created until the UEB is retrieved node = n._cnode._node vcap = node._verifycap k, N = vcap.needed_shares, vcap.total_shares bad_codec = BrokenDecoder() bad_codec.set_params(node.segment_size, k, N) node._codec = bad_codec d.addCallback(_break_codec) # now try to download it again. The broken codec will provide # ciphertext that fails the hash test. d.addCallback(lambda ign: self.shouldFail(BadCiphertextHashError, "badhash", "hash failure in " "ciphertext_hash_tree: segnum=0", download_to_data, n)) return d d.addCallback(_uploaded) return d def OFFtest_download_segment_XXX(self): self.basedir = self.mktemp() self.set_up_grid() self.c0 = self.g.clients[0] # upload a file with multiple segments, and a non-default segsize, to # exercise the offset-guessing code. This time we *do* tell the # downloader about the unusual segsize, so it can guess right. u = upload.Data(plaintext, None) u.max_segment_size = 70 # 5 segs, 8-wide hashtree con1 = MemoryConsumer() con2 = MemoryConsumer() d = self.c0.upload(u) def _uploaded(ur): n = self.c0.create_node_from_uri(ur.get_uri()) n._cnode._maybe_create_download_node() n._cnode._node._build_guessed_tables(u.max_segment_size) d1 = n.read(con1, 70, 20) #d2 = n.read(con2, 140, 20) d2 = defer.succeed(None) return defer.gatherResults([d1,d2]) d.addCallback(_uploaded) def _done(res): self.failUnlessEqual(b"".join(con1.chunks), plaintext[70:90]) self.failUnlessEqual(b"".join(con2.chunks), plaintext[140:160]) #d.addCallback(_done) return d def test_duplicate_shares(self): self.basedir = self.mktemp() self.set_up_grid() self.c0 = self.g.clients[0] self.load_shares() # make sure everybody has a copy of sh0. The second server contacted # will report two shares, and the ShareFinder will handle the # duplicate by attaching both to the same CommonShare instance. si = uri.from_string(immutable_uri).get_storage_index() si_dir = storage_index_to_dir(si) sh0_file = [sharefile for (shnum, serverid, sharefile) in self.find_uri_shares(immutable_uri) if shnum == 0][0] sh0_data = open(sh0_file, "rb").read() for clientnum in immutable_shares: if 0 in immutable_shares[clientnum]: continue cdir = self.get_serverdir(clientnum) target = os.path.join(cdir, "shares", si_dir, "0") outf = open(target, "wb") outf.write(sh0_data) outf.close() d = self.download_immutable() return d def test_verifycap(self): self.basedir = self.mktemp() self.set_up_grid() self.c0 = self.g.clients[0] self.load_shares() n = self.c0.create_node_from_uri(immutable_uri) vcap = n.get_verify_cap().to_string() vn = self.c0.create_node_from_uri(vcap) d = download_to_data(vn) def _got_ciphertext(ciphertext): self.failUnlessEqual(len(ciphertext), len(plaintext)) self.failIfEqual(ciphertext, plaintext) d.addCallback(_got_ciphertext) return d class BrokenDecoder(CRSDecoder): def decode(self, shares, shareids): d = CRSDecoder.decode(self, shares, shareids) def _decoded(buffers): def _corruptor(s, which): return s[:which] + bchr(ord(s[which:which+1])^0x01) + s[which+1:] buffers[0] = _corruptor(buffers[0], 0) # flip lsb of first byte return buffers d.addCallback(_decoded) return d class PausingConsumer(MemoryConsumer): def __init__(self): MemoryConsumer.__init__(self) self.size = 0 self.writes = 0 def write(self, data): self.size += len(data) self.writes += 1 if self.writes <= 2: # we happen to use 4 segments, and want to avoid pausing on the # last one (since then the _unpause timer will still be running) self.producer.pauseProducing() reactor.callLater(0.1, self._unpause) return MemoryConsumer.write(self, data) def _unpause(self): self.producer.resumeProducing() class PausingAndStoppingConsumer(PausingConsumer): debug_stopped = False def write(self, data): if self.debug_stopped: raise Exception("I'm stopped, don't write to me") self.producer.pauseProducing() eventually(self._stop) def _stop(self): self.debug_stopped = True self.producer.stopProducing() class StoppingConsumer(PausingConsumer): def write(self, data): self.producer.stopProducing() class ImmediatelyStoppingConsumer(MemoryConsumer): def registerProducer(self, p, streaming): MemoryConsumer.registerProducer(self, p, streaming) self.producer.stopProducing() class StallingConsumer(MemoryConsumer): def __init__(self, halfway_cb): MemoryConsumer.__init__(self) self.halfway_cb = halfway_cb self.writes = 0 def write(self, data): self.writes += 1 if self.writes == 1: self.halfway_cb() return MemoryConsumer.write(self, data) class Corruption(_Base, unittest.TestCase): def _corrupt_flip(self, ign, imm_uri, which): log.msg("corrupt %d" % which) def _corruptor(s, debug=False): return s[:which] + bchr(ord(s[which:which+1])^0x01) + s[which+1:] self.corrupt_shares_numbered(imm_uri, [2], _corruptor) def _corrupt_set(self, ign, imm_uri, which, newvalue): # type: (Any, bytes, int, int) -> None """ Replace a single byte share file number 2 for the given capability with a new byte. :param imm_uri: Corrupt share number 2 belonging to this capability. :param which: The byte position to replace. :param newvalue: The new byte value to set in the share. """ log.msg("corrupt %d" % which) def _corruptor(s, debug=False): return s[:which] + bchr(newvalue) + s[which+1:] self.corrupt_shares_numbered(imm_uri, [2], _corruptor) def test_each_byte(self): """ Test share selection behavior of the downloader in the face of certain kinds of data corruption. 1. upload a small share to the no-network grid 2. read all of the resulting share files out of the no-network storage servers 3. for each of a. each byte of the share file version field b. each byte of the immutable share version field c. each byte of the immutable share data offset field d. the most significant byte of the block_shares offset field e. one of the bytes of one of the merkle trees f. one of the bytes of the share hashes list i. flip the least significant bit in all of the the share files ii. perform the download/check/restore process 4. add 2 ** 24 to the share file version number 5. perform the download/check/restore process 6. add 2 ** 24 to the share version number 7. perform the download/check/restore process The download/check/restore process is: 1. attempt to download the data 2. assert that the recovered plaintext is correct 3. assert that only the "correct" share numbers were used to reconstruct the plaintext 4. restore all of the share files to their pristine condition """ # Setting catalog_detection=True performs an exhaustive test of the # Downloader's response to corruption in the lsb of each byte of the # 2070-byte share, with two goals: make sure we tolerate all forms of # corruption (i.e. don't hang or return bad data), and make a list of # which bytes can be corrupted without influencing the download # (since we don't need every byte of the share). That takes 50s to # run on my laptop and doesn't have any actual asserts, so we don't # normally do that. self.catalog_detection = False self.basedir = "download/Corruption/each_byte" self.set_up_grid() self.c0 = self.g.clients[0] # to exercise the block-hash-tree code properly, we need to have # multiple segments. We don't tell the downloader about the different # segsize, so it guesses wrong and must do extra roundtrips. u = upload.Data(plaintext, None) u.max_segment_size = 120 # 3 segs, 4-wide hashtree if self.catalog_detection: undetected = spans.Spans() def _download(ign, imm_uri, which, expected): n = self.c0.create_node_from_uri(imm_uri) n._cnode._maybe_create_download_node() # for this test to work, we need to have a new Node each time. # Make sure the NodeMaker's weakcache hasn't interfered. assert not n._cnode._node._shares d = download_to_data(n) def _got_data(data): self.failUnlessEqual(data, plaintext) shnums = sorted([s._shnum for s in n._cnode._node._shares]) no_sh2 = bool(2 not in shnums) sh2 = [s for s in n._cnode._node._shares if s._shnum == 2] sh2_had_corruption = False if sh2 and sh2[0].had_corruption: sh2_had_corruption = True num_needed = len(n._cnode._node._shares) if self.catalog_detection: detected = no_sh2 or sh2_had_corruption or (num_needed!=3) if not detected: undetected.add(which, 1) if expected == "no-sh2": self.failIfIn(2, shnums) elif expected == "2bad-need-3": self.failIf(no_sh2) self.failUnless(sh2[0].had_corruption) self.failUnlessEqual(num_needed, 3) elif expected == "need-4th": # XXX check with warner; what relevance does this # have for the "need-4th" stuff? #self.failIf(no_sh2) #self.failUnless(sh2[0].had_corruption) self.failIfEqual(num_needed, 3) d.addCallback(_got_data) return d d = self.c0.upload(u) def _uploaded(ur): imm_uri = ur.get_uri() self.shares = self.copy_shares(imm_uri) d = defer.succeed(None) # 'victims' is a list of corruption tests to run. Each one flips # the low-order bit of the specified offset in the share file (so # offset=0 is the MSB of the container version, offset=15 is the # LSB of the share version, offset=24 is the MSB of the # data-block-offset, and offset=48 is the first byte of the first # data-block). Each one also specifies what sort of corruption # we're expecting to see. no_sh2_victims = [0,1,2,3] # container version need3_victims = [ ] # none currently in this category # when the offsets are corrupted, the Share will be unable to # retrieve the data it wants (because it thinks that data lives # off in the weeds somewhere), and Share treats DataUnavailable # as abandon-this-share, so in general we'll be forced to look # for a 4th share. need_4th_victims = [12,13,14,15, # offset[data] 24,25,26,27, # offset[block_hashes] ] need_4th_victims.append(36) # block data # when corrupting hash trees, we must corrupt a value that isn't # directly set from somewhere else. Since we download data from # seg2, corrupt something on its hash chain, like [2] (the # right-hand child of the root) need_4th_victims.append(600+2*32) # block_hashes[2] # Share.loop is pretty conservative: it abandons the share at the # first sign of corruption. It doesn't strictly need to be this # way: if the UEB were corrupt, we could still get good block # data from that share, as long as there was a good copy of the # UEB elsewhere. If this behavior is relaxed, then corruption in # the following fields (which are present in multiple shares) # should fall into the "need3_victims" case instead of the # "need_4th_victims" case. need_4th_victims.append(824) # share_hashes corrupt_me = ([(i,"no-sh2") for i in no_sh2_victims] + [(i, "2bad-need-3") for i in need3_victims] + [(i, "need-4th") for i in need_4th_victims]) if self.catalog_detection: share_len = len(list(self.shares.values())[0]) corrupt_me = [(i, "") for i in range(share_len)] # This is a work around for ticket #2024. corrupt_me = corrupt_me[0:8]+corrupt_me[12:] for i,expected in corrupt_me: # All these tests result in a successful download. What we're # measuring is how many shares the downloader had to use. d.addCallback(self._corrupt_flip, imm_uri, i) d.addCallback(_download, imm_uri, i, expected) d.addCallback(lambda ign: self.restore_all_shares(self.shares)) d.addCallback(fireEventually) corrupt_values = [ # Make the container version for share number 2 look # unsupported. If you add support for immutable share file # version number much past 16 million then you will have to # update this test. Also maybe you have other problems. (1, 255, "no-sh2"), # Make the immutable share number 2 (not the container, the # thing inside the container) look unsupported. Ditto the # above about version numbers in the ballpark of 16 million. (13, 255, "need-4th"), ] for i,newvalue,expected in corrupt_values: d.addCallback(self._corrupt_set, imm_uri, i, newvalue) d.addCallback(_download, imm_uri, i, expected) d.addCallback(lambda ign: self.restore_all_shares(self.shares)) d.addCallback(fireEventually) return d d.addCallback(_uploaded) def _show_results(ign): share_len = len(list(self.shares.values())[0]) print() print("of [0:%d], corruption ignored in %s" % (share_len, undetected.dump())) if self.catalog_detection: d.addCallback(_show_results) # of [0:2070], corruption ignored in len=1133: # [4-11],[16-23],[28-31],[152-439],[600-663],[1309-2069] # [4-11]: container sizes # [16-23]: share block/data sizes # [152-375]: plaintext hash tree # [376-408]: crypttext_hash_tree[0] (root) # [408-439]: crypttext_hash_tree[1] (computed) # [600-631]: block hash tree[0] (root) # [632-663]: block hash tree[1] (computed) # [1309-]: reserved+unused UEB space return d def test_failure(self): # this test corrupts all shares in the same way, and asserts that the # download fails. self.basedir = "download/Corruption/failure" self.set_up_grid() self.c0 = self.g.clients[0] # to exercise the block-hash-tree code properly, we need to have # multiple segments. We don't tell the downloader about the different # segsize, so it guesses wrong and must do extra roundtrips. u = upload.Data(plaintext, None) u.max_segment_size = 120 # 3 segs, 4-wide hashtree d = self.c0.upload(u) def _uploaded(ur): imm_uri = ur.get_uri() self.shares = self.copy_shares(imm_uri) corrupt_me = [(48, "block data", "Last failure: None"), (600+2*32, "block_hashes[2]", "BadHashError"), (376+2*32, "crypttext_hash_tree[2]", "BadHashError"), (824, "share_hashes", "BadHashError"), ] def _download(imm_uri): n = self.c0.create_node_from_uri(imm_uri) n._cnode._maybe_create_download_node() # for this test to work, we need to have a new Node each time. # Make sure the NodeMaker's weakcache hasn't interfered. assert not n._cnode._node._shares return download_to_data(n) d = defer.succeed(None) for i,which,substring in corrupt_me: # All these tests result in a failed download. d.addCallback(self._corrupt_flip_all, imm_uri, i) d.addCallback(lambda ign, which=which, substring=substring: self.shouldFail(NoSharesError, which, substring, _download, imm_uri)) d.addCallback(lambda ign: self.restore_all_shares(self.shares)) d.addCallback(fireEventually) return d d.addCallback(_uploaded) return d def _corrupt_flip_all(self, ign: Any, imm_uri: bytes, which: int) -> None: """ Flip the least significant bit at a given byte position in all share files for the given capability. """ def _corruptor(s, debug=False): # type: (bytes, bool) -> bytes before_corruption = s[:which] after_corruption = s[which+1:] original_byte = s[which:which+1] corrupt_byte = bchr(ord(original_byte) ^ 0x01) return b"".join([before_corruption, corrupt_byte, after_corruption]) self.corrupt_all_shares(imm_uri, _corruptor) class DownloadV2(_Base, unittest.TestCase): # tests which exercise v2-share code. They first upload a file with # FORCE_V2 set. def setUp(self): d = defer.maybeDeferred(_Base.setUp, self) def _set_force_v2(ign): self.old_force_v2 = layout.FORCE_V2 layout.FORCE_V2 = True d.addCallback(_set_force_v2) return d def tearDown(self): layout.FORCE_V2 = self.old_force_v2 return _Base.tearDown(self) def test_download(self): self.basedir = self.mktemp() self.set_up_grid() self.c0 = self.g.clients[0] # upload a file u = upload.Data(plaintext, None) d = self.c0.upload(u) def _uploaded(ur): imm_uri = ur.get_uri() n = self.c0.create_node_from_uri(imm_uri) return download_to_data(n) d.addCallback(_uploaded) return d def test_download_no_overrun(self): self.basedir = self.mktemp() self.set_up_grid() self.c0 = self.g.clients[0] # tweak the client's copies of server-version data, so it believes # that they're old and can't handle reads that overrun the length of # the share. This exercises a different code path. for s in self.c0.storage_broker.get_connected_servers(): v = s.get_version() v1 = v[b"http://allmydata.org/tahoe/protocols/storage/v1"] v1[b"tolerates-immutable-read-overrun"] = False # upload a file u = upload.Data(plaintext, None) d = self.c0.upload(u) def _uploaded(ur): imm_uri = ur.get_uri() n = self.c0.create_node_from_uri(imm_uri) return download_to_data(n) d.addCallback(_uploaded) return d def OFF_test_no_overrun_corrupt_shver(self): # unnecessary self.basedir = self.mktemp() self.set_up_grid() self.c0 = self.g.clients[0] for s in self.c0.storage_broker.get_connected_servers(): v = s.get_version() v1 = v["http://allmydata.org/tahoe/protocols/storage/v1"] v1["tolerates-immutable-read-overrun"] = False # upload a file u = upload.Data(plaintext, None) d = self.c0.upload(u) def _uploaded(ur): imm_uri = ur.get_uri() def _do_corrupt(which, newvalue): def _corruptor(s, debug=False): return s[:which] + chr(newvalue) + s[which+1:] self.corrupt_shares_numbered(imm_uri, [0], _corruptor) _do_corrupt(12+3, 0x00) n = self.c0.create_node_from_uri(imm_uri) d = download_to_data(n) def _got_data(data): self.failUnlessEqual(data, plaintext) d.addCallback(_got_data) return d d.addCallback(_uploaded) return d class Status(unittest.TestCase): def test_status(self): now = 12345.1 ds = DownloadStatus("si-1", 123) self.failUnlessEqual(ds.get_status(), "idle") ev0 = ds.add_segment_request(0, now) self.failUnlessEqual(ds.get_status(), "fetching segment 0") ev0.activate(now+0.5) ev0.deliver(now+1, 0, 1000, 2.0) self.failUnlessEqual(ds.get_status(), "idle") ev2 = ds.add_segment_request(2, now+2) del ev2 # hush pyflakes ev1 = ds.add_segment_request(1, now+2) self.failUnlessEqual(ds.get_status(), "fetching segments 1,2") ev1.error(now+3) self.failUnlessEqual(ds.get_status(), "fetching segment 2; errors on segment 1") def test_progress(self): now = 12345.1 ds = DownloadStatus("si-1", 123) self.failUnlessEqual(ds.get_progress(), 0.0) e = ds.add_read_event(0, 1000, now) self.failUnlessEqual(ds.get_progress(), 0.0) e.update(500, 2.0, 2.0) self.failUnlessEqual(ds.get_progress(), 0.5) e.finished(now+2) self.failUnlessEqual(ds.get_progress(), 1.0) e1 = ds.add_read_event(1000, 2000, now+3) e2 = ds.add_read_event(4000, 2000, now+3) self.failUnlessEqual(ds.get_progress(), 0.0) e1.update(1000, 2.0, 2.0) self.failUnlessEqual(ds.get_progress(), 0.25) e2.update(1000, 2.0, 2.0) self.failUnlessEqual(ds.get_progress(), 0.5) e1.update(1000, 2.0, 2.0) e1.finished(now+4) # now there is only one outstanding read, and it is 50% done self.failUnlessEqual(ds.get_progress(), 0.5) e2.update(1000, 2.0, 2.0) e2.finished(now+5) self.failUnlessEqual(ds.get_progress(), 1.0) def test_active(self): now = 12345.1 ds = DownloadStatus("si-1", 123) self.failUnlessEqual(ds.get_active(), False) e1 = ds.add_read_event(0, 1000, now) self.failUnlessEqual(ds.get_active(), True) e2 = ds.add_read_event(1, 1000, now+1) self.failUnlessEqual(ds.get_active(), True) e1.finished(now+2) self.failUnlessEqual(ds.get_active(), True) e2.finished(now+3) self.failUnlessEqual(ds.get_active(), False) def make_server(clientid): tubid = hashutil.tagged_hash(b"clientid", clientid)[:20] return NoNetworkServer(tubid, None) def make_servers(clientids): servers = {} for clientid in clientids: servers[clientid] = make_server(clientid) return servers class MyShare(object): def __init__(self, shnum, server, rtt): self._shnum = shnum self._server = server self._dyhb_rtt = rtt def __repr__(self): return "sh%d-on-%s" % (self._shnum, str(self._server.get_name(), "ascii")) class MySegmentFetcher(SegmentFetcher): def __init__(self, *args, **kwargs): SegmentFetcher.__init__(self, *args, **kwargs) self._test_start_shares = [] def _start_share(self, share, shnum): self._test_start_shares.append(share) class FakeNode(object): def __init__(self): self.want_more = 0 self.failed = None self.processed = None self._si_prefix = "si_prefix" def want_more_shares(self): self.want_more += 1 def fetch_failed(self, fetcher, f): self.failed = f def process_blocks(self, segnum, blocks): self.processed = (segnum, blocks) def get_num_segments(self): return 1, True class Selection(unittest.TestCase): def test_failure(self): """If the fetch loop fails, it tell the Node the fetch failed.""" node = FakeNode() # Simulate a failure: node.get_num_segments = lambda: 1/0 sf = SegmentFetcher(node, 0, 3, None) sf.add_shares([]) d = flushEventualQueue() def _check1(ign): [_] = self.flushLoggedErrors(ZeroDivisionError) self.failUnless(node.failed) self.failUnless(node.failed.check(ZeroDivisionError)) d.addCallback(_check1) return d def test_no_shares(self): node = FakeNode() sf = SegmentFetcher(node, 0, 3, None) sf.add_shares([]) d = flushEventualQueue() def _check1(ign): self.failUnlessEqual(node.want_more, 1) self.failUnlessEqual(node.failed, None) sf.no_more_shares() return flushEventualQueue() d.addCallback(_check1) def _check2(ign): self.failUnless(node.failed) self.failUnless(node.failed.check(NoSharesError)) d.addCallback(_check2) return d def test_only_one_share(self): node = FakeNode() sf = MySegmentFetcher(node, 0, 3, None) serverA = make_server(b"peer-A") shares = [MyShare(0, serverA, 0.0)] sf.add_shares(shares) d = flushEventualQueue() def _check1(ign): self.failUnlessEqual(node.want_more, 1) self.failUnlessEqual(node.failed, None) sf.no_more_shares() return flushEventualQueue() d.addCallback(_check1) def _check2(ign): self.failUnless(node.failed) self.failUnless(node.failed.check(NotEnoughSharesError)) sname = serverA.get_name() self.failUnlessIn("complete= pending=sh0-on-%s overdue= unused=" % str(sname, "ascii"), str(node.failed)) d.addCallback(_check2) return d def test_good_diversity_early(self): node = FakeNode() sf = MySegmentFetcher(node, 0, 3, None) shares = [MyShare(i, make_server(b"peer-%d" % i), i) for i in range(10)] sf.add_shares(shares) d = flushEventualQueue() def _check1(ign): self.failUnlessEqual(node.want_more, 0) self.failUnlessEqual(sf._test_start_shares, shares[:3]) for sh in sf._test_start_shares: sf._block_request_activity(sh, sh._shnum, COMPLETE, "block-%d" % sh._shnum) return flushEventualQueue() d.addCallback(_check1) def _check2(ign): self.failIfEqual(node.processed, None) self.failUnlessEqual(node.processed, (0, {0: "block-0", 1: "block-1", 2: "block-2"}) ) d.addCallback(_check2) return d def test_good_diversity_late(self): node = FakeNode() sf = MySegmentFetcher(node, 0, 3, None) shares = [MyShare(i, make_server(b"peer-%d" % i), i) for i in range(10)] sf.add_shares([]) d = flushEventualQueue() def _check1(ign): self.failUnlessEqual(node.want_more, 1) sf.add_shares(shares) return flushEventualQueue() d.addCallback(_check1) def _check2(ign): self.failUnlessEqual(sf._test_start_shares, shares[:3]) for sh in sf._test_start_shares: sf._block_request_activity(sh, sh._shnum, COMPLETE, "block-%d" % sh._shnum) return flushEventualQueue() d.addCallback(_check2) def _check3(ign): self.failIfEqual(node.processed, None) self.failUnlessEqual(node.processed, (0, {0: "block-0", 1: "block-1", 2: "block-2"}) ) d.addCallback(_check3) return d def test_avoid_bad_diversity_late(self): node = FakeNode() sf = MySegmentFetcher(node, 0, 3, None) # we could satisfy the read entirely from the first server, but we'd # prefer not to. Instead, we expect to only pull one share from the # first server servers = make_servers([b"peer-A", b"peer-B", b"peer-C"]) shares = [MyShare(0, servers[b"peer-A"], 0.0), MyShare(1, servers[b"peer-A"], 0.0), MyShare(2, servers[b"peer-A"], 0.0), MyShare(3, servers[b"peer-B"], 1.0), MyShare(4, servers[b"peer-C"], 2.0), ] sf.add_shares([]) d = flushEventualQueue() def _check1(ign): self.failUnlessEqual(node.want_more, 1) sf.add_shares(shares) return flushEventualQueue() d.addCallback(_check1) def _check2(ign): self.failUnlessEqual(sf._test_start_shares, [shares[0], shares[3], shares[4]]) for sh in sf._test_start_shares: sf._block_request_activity(sh, sh._shnum, COMPLETE, "block-%d" % sh._shnum) return flushEventualQueue() d.addCallback(_check2) def _check3(ign): self.failIfEqual(node.processed, None) self.failUnlessEqual(node.processed, (0, {0: "block-0", 3: "block-3", 4: "block-4"}) ) d.addCallback(_check3) return d def test_suffer_bad_diversity_late(self): node = FakeNode() sf = MySegmentFetcher(node, 0, 3, None) # we satisfy the read entirely from the first server because we don't # have any other choice. serverA = make_server(b"peer-A") shares = [MyShare(0, serverA, 0.0), MyShare(1, serverA, 0.0), MyShare(2, serverA, 0.0), MyShare(3, serverA, 0.0), MyShare(4, serverA, 0.0), ] sf.add_shares([]) d = flushEventualQueue() def _check1(ign): self.failUnlessEqual(node.want_more, 1) sf.add_shares(shares) return flushEventualQueue() d.addCallback(_check1) def _check2(ign): self.failUnlessEqual(node.want_more, 3) self.failUnlessEqual(sf._test_start_shares, [shares[0], shares[1], shares[2]]) for sh in sf._test_start_shares: sf._block_request_activity(sh, sh._shnum, COMPLETE, "block-%d" % sh._shnum) return flushEventualQueue() d.addCallback(_check2) def _check3(ign): self.failIfEqual(node.processed, None) self.failUnlessEqual(node.processed, (0, {0: "block-0", 1: "block-1", 2: "block-2"}) ) d.addCallback(_check3) return d def test_suffer_bad_diversity_early(self): node = FakeNode() sf = MySegmentFetcher(node, 0, 3, None) # we satisfy the read entirely from the first server because we don't # have any other choice. serverA = make_server(b"peer-A") shares = [MyShare(0, serverA, 0.0), MyShare(1, serverA, 0.0), MyShare(2, serverA, 0.0), MyShare(3, serverA, 0.0), MyShare(4, serverA, 0.0), ] sf.add_shares(shares) d = flushEventualQueue() def _check1(ign): self.failUnlessEqual(node.want_more, 2) self.failUnlessEqual(sf._test_start_shares, [shares[0], shares[1], shares[2]]) for sh in sf._test_start_shares: sf._block_request_activity(sh, sh._shnum, COMPLETE, "block-%d" % sh._shnum) return flushEventualQueue() d.addCallback(_check1) def _check2(ign): self.failIfEqual(node.processed, None) self.failUnlessEqual(node.processed, (0, {0: "block-0", 1: "block-1", 2: "block-2"}) ) d.addCallback(_check2) return d def test_overdue(self): node = FakeNode() sf = MySegmentFetcher(node, 0, 3, None) shares = [MyShare(i, make_server(b"peer-%d" % i), i) for i in range(10)] sf.add_shares(shares) d = flushEventualQueue() def _check1(ign): self.failUnlessEqual(node.want_more, 0) self.failUnlessEqual(sf._test_start_shares, shares[:3]) for sh in sf._test_start_shares: sf._block_request_activity(sh, sh._shnum, OVERDUE) return flushEventualQueue() d.addCallback(_check1) def _check2(ign): self.failUnlessEqual(sf._test_start_shares, shares[:6]) for sh in sf._test_start_shares[3:]: sf._block_request_activity(sh, sh._shnum, COMPLETE, "block-%d" % sh._shnum) return flushEventualQueue() d.addCallback(_check2) def _check3(ign): self.failIfEqual(node.processed, None) self.failUnlessEqual(node.processed, (0, {3: "block-3", 4: "block-4", 5: "block-5"}) ) d.addCallback(_check3) return d def test_overdue_fails(self): node = FakeNode() sf = MySegmentFetcher(node, 0, 3, None) servers = make_servers([b"peer-%d" % i for i in range(6)]) shares = [MyShare(i, servers[b"peer-%d" % i], i) for i in range(6)] sf.add_shares(shares) sf.no_more_shares() d = flushEventualQueue() def _check1(ign): self.failUnlessEqual(node.want_more, 0) self.failUnlessEqual(sf._test_start_shares, shares[:3]) for sh in sf._test_start_shares: sf._block_request_activity(sh, sh._shnum, OVERDUE) return flushEventualQueue() d.addCallback(_check1) def _check2(ign): self.failUnlessEqual(sf._test_start_shares, shares[:6]) for sh in sf._test_start_shares[3:]: sf._block_request_activity(sh, sh._shnum, DEAD) return flushEventualQueue() d.addCallback(_check2) def _check3(ign): # we're still waiting self.failUnlessEqual(node.processed, None) self.failUnlessEqual(node.failed, None) # now complete one of the overdue ones, and kill one of the other # ones, leaving one hanging. This should trigger a failure, since # we cannot succeed. live = sf._test_start_shares[0] die = sf._test_start_shares[1] sf._block_request_activity(live, live._shnum, COMPLETE, "block") sf._block_request_activity(die, die._shnum, DEAD) return flushEventualQueue() d.addCallback(_check3) def _check4(ign): self.failUnless(node.failed) self.failUnless(node.failed.check(NotEnoughSharesError)) sname = servers[b"peer-2"].get_name() self.failUnlessIn("complete=sh0 pending= overdue=sh2-on-%s unused=" % str(sname, "ascii"), str(node.failed)) d.addCallback(_check4) return d def test_avoid_redundancy(self): node = FakeNode() sf = MySegmentFetcher(node, 0, 3, None) # we could satisfy the read entirely from the first server, but we'd # prefer not to. Instead, we expect to only pull one share from the # first server servers = make_servers([b"peer-A", b"peer-B", b"peer-C", b"peer-D", b"peer-E"]) shares = [MyShare(0, servers[b"peer-A"],0.0), MyShare(1, servers[b"peer-B"],1.0), MyShare(0, servers[b"peer-C"],2.0), # this will be skipped MyShare(1, servers[b"peer-D"],3.0), MyShare(2, servers[b"peer-E"],4.0), ] sf.add_shares(shares[:3]) d = flushEventualQueue() def _check1(ign): self.failUnlessEqual(node.want_more, 1) self.failUnlessEqual(sf._test_start_shares, [shares[0], shares[1]]) # allow sh1 to retire sf._block_request_activity(shares[1], 1, COMPLETE, "block-1") return flushEventualQueue() d.addCallback(_check1) def _check2(ign): # and then feed in the remaining shares sf.add_shares(shares[3:]) sf.no_more_shares() return flushEventualQueue() d.addCallback(_check2) def _check3(ign): self.failUnlessEqual(sf._test_start_shares, [shares[0], shares[1], shares[4]]) sf._block_request_activity(shares[0], 0, COMPLETE, "block-0") sf._block_request_activity(shares[4], 2, COMPLETE, "block-2") return flushEventualQueue() d.addCallback(_check3) def _check4(ign): self.failIfEqual(node.processed, None) self.failUnlessEqual(node.processed, (0, {0: "block-0", 1: "block-1", 2: "block-2"}) ) d.addCallback(_check4) return d tahoe_lafs-1.20.0/src/allmydata/test/test_eliotutil.py0000644000000000000000000002300213615410400017755 0ustar00""" Tests for ``allmydata.util.eliotutil``. """ from sys import stdout import logging from unittest import ( skip, ) from fixtures import ( TempDir, ) from testtools import ( TestCase, TestResult, ) from testtools.matchers import ( Is, IsInstance, Not, MatchesStructure, Equals, HasLength, AfterPreprocessing, ) from testtools.twistedsupport import ( succeeded, failed, ) from eliot import ( Message, MessageType, fields, MemoryLogger, ) from eliot.twisted import DeferredContext from eliot.testing import ( capture_logging, assertHasAction, swap_logger, ) from twisted.internet.defer import ( succeed, ) from twisted.internet.task import deferLater from twisted.internet import reactor from ..util.eliotutil import ( log_call_deferred, _parse_destination_description, _EliotLogging, ) from ..util.deferredutil import async_to_deferred from .common import ( SyncTestCase, AsyncTestCase, ) def passes(): """ Create a matcher that matches a ``TestCase`` that runs without failures or errors. """ def run(case): result = TestResult() case.run(result) return result.wasSuccessful() return AfterPreprocessing(run, Equals(True)) class EliotLoggedTestTests(TestCase): """ Tests for the automatic log-related provided by ``AsyncTestCase``. This class uses ``testtools.TestCase`` because it is inconvenient to nest ``AsyncTestCase`` inside ``AsyncTestCase`` (in particular, Eliot messages emitted by the inner test case get observed by the outer test case and if an inner case emits invalid messages they cause the outer test case to fail). """ def test_fails(self): """ A test method of an ``AsyncTestCase`` subclass can fail. """ class UnderTest(AsyncTestCase): def test_it(self): self.fail("make sure it can fail") self.assertThat(UnderTest("test_it"), Not(passes())) def test_unserializable_fails(self): """ A test method of an ``AsyncTestCase`` subclass that logs an unserializable value with Eliot fails. """ class world(object): """ an unserializable object """ class UnderTest(AsyncTestCase): def test_it(self): Message.log(hello=world) self.assertThat(UnderTest("test_it"), Not(passes())) def test_logs_non_utf_8_byte(self): """ A test method of an ``AsyncTestCase`` subclass can log a message that contains a non-UTF-8 byte string and return ``None`` and pass. """ class UnderTest(AsyncTestCase): def test_it(self): Message.log(hello=b"\xFF") self.assertThat(UnderTest("test_it"), passes()) def test_returns_none(self): """ A test method of an ``AsyncTestCase`` subclass can log a message and return ``None`` and pass. """ class UnderTest(AsyncTestCase): def test_it(self): Message.log(hello="world") self.assertThat(UnderTest("test_it"), passes()) def test_returns_fired_deferred(self): """ A test method of an ``AsyncTestCase`` subclass can log a message and return an already-fired ``Deferred`` and pass. """ class UnderTest(AsyncTestCase): def test_it(self): Message.log(hello="world") return succeed(None) self.assertThat(UnderTest("test_it"), passes()) def test_returns_unfired_deferred(self): """ A test method of an ``AsyncTestCase`` subclass can log a message and return an unfired ``Deferred`` and pass when the ``Deferred`` fires. """ class UnderTest(AsyncTestCase): def test_it(self): Message.log(hello="world") # @eliot_logged_test automatically gives us an action context # but it's still our responsibility to maintain it across # stack-busting operations. d = DeferredContext(deferLater(reactor, 0.0, lambda: None)) d.addCallback(lambda ignored: Message.log(goodbye="world")) # We didn't start an action. We're not finishing an action. return d.result self.assertThat(UnderTest("test_it"), passes()) class ParseDestinationDescriptionTests(SyncTestCase): """ Tests for ``_parse_destination_description``. """ def test_stdout(self): """ A ``file:`` description with a path of ``-`` causes logs to be written to stdout. """ reactor = object() self.assertThat( _parse_destination_description("file:-")(reactor).file, Equals(stdout), ) def test_regular_file(self): """ A ``file:`` description with any path other than ``-`` causes logs to be written to a file with that name. """ tempdir = TempDir() self.useFixture(tempdir) reactor = object() path = tempdir.join("regular_file") self.assertThat( _parse_destination_description("file:{}".format(path))(reactor), MatchesStructure( file=MatchesStructure( path=Equals(path), rotateLength=AfterPreprocessing(bool, Equals(True)), maxRotatedFiles=AfterPreprocessing(bool, Equals(True)), ), ), ) # We need AsyncTestCase because logging happens in a thread tied to the # reactor. class EliotLoggingTests(AsyncTestCase): """ Tests for ``_EliotLogging``. """ @async_to_deferred async def test_stdlib_event_relayed(self): """ An event logged using the stdlib logging module is delivered to the Eliot destination. """ collected = [] service = _EliotLogging([collected.append]) service.startService() logging.critical("oh no") await service.stopService() self.assertTrue( "oh no" in str(collected[-1]), collected ) @async_to_deferred async def test_twisted_event_relayed(self): """ An event logged with a ``twisted.logger.Logger`` is delivered to the Eliot destination. """ collected = [] service = _EliotLogging([collected.append]) service.startService() from twisted.logger import Logger Logger().critical("oh no") await service.stopService() self.assertTrue( "oh no" in str(collected[-1]), collected ) def test_validation_failure(self): """ If a test emits a log message that fails validation then an error is added to the result. """ # Make sure we preserve the original global Eliot state. original = swap_logger(MemoryLogger()) self.addCleanup(lambda: swap_logger(original)) class ValidationFailureProbe(SyncTestCase): def test_bad_message(self): # This message does not validate because "Hello" is not an # int. MSG = MessageType("test:eliotutil", fields(foo=int)) MSG(foo="Hello").write() result = TestResult() case = ValidationFailureProbe("test_bad_message") case.run(result) self.assertThat( result.errors, HasLength(1), ) def test_skip_cleans_up(self): """ After a skipped test the global Eliot logging state is restored. """ # Save the logger that's active before we do anything so that we can # restore it later. Also install another logger so we can compare it # to the active logger later. expected = MemoryLogger() original = swap_logger(expected) # Restore it, whatever else happens. self.addCleanup(lambda: swap_logger(original)) class SkipProbe(SyncTestCase): @skip("It's a skip test.") def test_skipped(self): pass case = SkipProbe("test_skipped") case.run() # Retrieve the logger that's active now that the skipped test is done # so we can check it against the expected value. actual = swap_logger(MemoryLogger()) self.assertThat( actual, Is(expected), ) class LogCallDeferredTests(TestCase): """ Tests for ``log_call_deferred``. """ @capture_logging( lambda self, logger: assertHasAction(self, logger, u"the-action", succeeded=True), ) def test_return_value(self, logger): """ The decorated function's return value is passed through. """ result = object() @log_call_deferred(action_type=u"the-action") def f(): return result self.assertThat(f(), succeeded(Is(result))) @capture_logging( lambda self, logger: assertHasAction(self, logger, u"the-action", succeeded=False), ) def test_raise_exception(self, logger): """ An exception raised by the decorated function is passed through. """ class Result(Exception): pass @log_call_deferred(action_type=u"the-action") def f(): raise Result() self.assertThat( f(), failed( AfterPreprocessing( lambda f: f.value, IsInstance(Result), ), ), ) tahoe_lafs-1.20.0/src/allmydata/test/test_encode.py0000644000000000000000000003662013615410400017212 0ustar00""" Ported to Python 3. """ from past.builtins import chr as byteschr from zope.interface import implementer from twisted.trial import unittest from twisted.internet import defer from twisted.python.failure import Failure from foolscap.api import fireEventually from allmydata import uri from allmydata.immutable import encode, upload, checker from allmydata.util import hashutil from allmydata.util.assertutil import _assert from allmydata.util.consumer import download_to_data from allmydata.interfaces import IStorageBucketWriter, IStorageBucketReader from allmydata.test.no_network import GridTestMixin class LostPeerError(Exception): pass def flip_bit(good): # flips the last bit return good[:-1] + byteschr(ord(good[-1]) ^ 0x01) @implementer(IStorageBucketWriter, IStorageBucketReader) class FakeBucketReaderWriterProxy(object): # these are used for both reading and writing def __init__(self, mode="good", peerid="peer"): self.mode = mode self.blocks = {} self.plaintext_hashes = [] self.crypttext_hashes = [] self.block_hashes = None self.share_hashes = None self.closed = False self.peerid = peerid def get_peerid(self): return self.peerid def _start(self): if self.mode == "lost-early": f = Failure(LostPeerError("I went away early")) return fireEventually(f) return defer.succeed(self) def put_header(self): return self._start() def put_block(self, segmentnum, data): if self.mode == "lost-early": f = Failure(LostPeerError("I went away early")) return fireEventually(f) def _try(): assert not self.closed assert segmentnum not in self.blocks if self.mode == "lost" and segmentnum >= 1: raise LostPeerError("I'm going away now") self.blocks[segmentnum] = data return defer.maybeDeferred(_try) def put_crypttext_hashes(self, hashes): def _try(): assert not self.closed assert not self.crypttext_hashes self.crypttext_hashes = hashes return defer.maybeDeferred(_try) def put_block_hashes(self, blockhashes): def _try(): assert not self.closed assert self.block_hashes is None self.block_hashes = blockhashes return defer.maybeDeferred(_try) def put_share_hashes(self, sharehashes): def _try(): assert not self.closed assert self.share_hashes is None self.share_hashes = sharehashes return defer.maybeDeferred(_try) def put_uri_extension(self, uri_extension): def _try(): assert not self.closed self.uri_extension = uri_extension return defer.maybeDeferred(_try) def close(self): def _try(): assert not self.closed self.closed = True return defer.maybeDeferred(_try) def abort(self): return defer.succeed(None) def get_block_data(self, blocknum, blocksize, size): d = self._start() def _try(unused=None): assert isinstance(blocknum, int) if self.mode == "bad block": return flip_bit(self.blocks[blocknum]) return self.blocks[blocknum] d.addCallback(_try) return d def get_plaintext_hashes(self): d = self._start() def _try(unused=None): hashes = self.plaintext_hashes[:] return hashes d.addCallback(_try) return d def get_crypttext_hashes(self): d = self._start() def _try(unused=None): hashes = self.crypttext_hashes[:] if self.mode == "bad crypttext hashroot": hashes[0] = flip_bit(hashes[0]) if self.mode == "bad crypttext hash": hashes[1] = flip_bit(hashes[1]) return hashes d.addCallback(_try) return d def get_block_hashes(self, at_least_these=()): d = self._start() def _try(unused=None): if self.mode == "bad blockhash": hashes = self.block_hashes[:] hashes[1] = flip_bit(hashes[1]) return hashes return self.block_hashes d.addCallback(_try) return d def get_share_hashes(self, at_least_these=()): d = self._start() def _try(unused=None): if self.mode == "bad sharehash": hashes = self.share_hashes[:] hashes[1] = (hashes[1][0], flip_bit(hashes[1][1])) return hashes if self.mode == "missing sharehash": # one sneaky attack would be to pretend we don't know our own # sharehash, which could manage to frame someone else. # download.py is supposed to guard against this case. return [] return self.share_hashes d.addCallback(_try) return d def get_uri_extension(self): d = self._start() def _try(unused=None): if self.mode == "bad uri_extension": return flip_bit(self.uri_extension) return self.uri_extension d.addCallback(_try) return d def make_data(length): data = b"happy happy joy joy" * 100 assert length <= len(data) return data[:length] class ValidatedExtendedURIProxy(unittest.TestCase): K = 4 M = 10 SIZE = 200 SEGSIZE = 72 _TMP = SIZE%SEGSIZE if _TMP == 0: _TMP = SEGSIZE if _TMP % K != 0: _TMP += (K - (_TMP % K)) TAIL_SEGSIZE = _TMP _TMP = SIZE // SEGSIZE if SIZE % SEGSIZE != 0: _TMP += 1 NUM_SEGMENTS = _TMP mindict = { 'segment_size': SEGSIZE, 'crypttext_root_hash': b'0'*hashutil.CRYPTO_VAL_SIZE, 'share_root_hash': b'1'*hashutil.CRYPTO_VAL_SIZE } optional_consistent = { 'crypttext_hash': b'2'*hashutil.CRYPTO_VAL_SIZE, 'codec_name': b"crs", 'codec_params': b"%d-%d-%d" % (SEGSIZE, K, M), 'tail_codec_params': b"%d-%d-%d" % (TAIL_SEGSIZE, K, M), 'num_segments': NUM_SEGMENTS, 'size': SIZE, 'needed_shares': K, 'total_shares': M, 'plaintext_hash': b"anything", 'plaintext_root_hash': b"anything", } # optional_inconsistent = { 'crypttext_hash': ('2'*(hashutil.CRYPTO_VAL_SIZE-1), "", 77), optional_inconsistent = { 'crypttext_hash': (77,), 'codec_name': (b"digital fountain", b""), 'codec_params': (b"%d-%d-%d" % (SEGSIZE, K-1, M), b"%d-%d-%d" % (SEGSIZE-1, K, M), b"%d-%d-%d" % (SEGSIZE, K, M-1)), 'tail_codec_params': (b"%d-%d-%d" % (TAIL_SEGSIZE, K-1, M), b"%d-%d-%d" % (TAIL_SEGSIZE-1, K, M), b"%d-%d-%d" % (TAIL_SEGSIZE, K, M-1)), 'num_segments': (NUM_SEGMENTS-1,), 'size': (SIZE-1,), 'needed_shares': (K-1,), 'total_shares': (M-1,), } def _test(self, uebdict): uebstring = uri.pack_extension(uebdict) uebhash = hashutil.uri_extension_hash(uebstring) fb = FakeBucketReaderWriterProxy() fb.put_uri_extension(uebstring) verifycap = uri.CHKFileVerifierURI(storage_index=b'x'*16, uri_extension_hash=uebhash, needed_shares=self.K, total_shares=self.M, size=self.SIZE) vup = checker.ValidatedExtendedURIProxy(fb, verifycap) return vup.start() def _test_accept(self, uebdict): return self._test(uebdict) def _should_fail(self, res, expected_failures): if isinstance(res, Failure): res.trap(*expected_failures) else: self.fail("was supposed to raise %s, not get '%s'" % (expected_failures, res)) def _test_reject(self, uebdict): d = self._test(uebdict) d.addBoth(self._should_fail, (KeyError, checker.BadURIExtension)) return d def test_accept_minimal(self): return self._test_accept(self.mindict) def test_reject_insufficient(self): dl = [] for k in self.mindict.keys(): insuffdict = self.mindict.copy() del insuffdict[k] d = self._test_reject(insuffdict) dl.append(d) return defer.DeferredList(dl) def test_accept_optional(self): dl = [] for k in self.optional_consistent.keys(): mydict = self.mindict.copy() mydict[k] = self.optional_consistent[k] d = self._test_accept(mydict) dl.append(d) return defer.DeferredList(dl) def test_reject_optional(self): dl = [] for k in self.optional_inconsistent.keys(): for v in self.optional_inconsistent[k]: mydict = self.mindict.copy() mydict[k] = v d = self._test_reject(mydict) dl.append(d) return defer.DeferredList(dl) class Encode(unittest.TestCase): def do_encode(self, max_segment_size, datalen, NUM_SHARES, NUM_SEGMENTS, expected_block_hashes, expected_share_hashes): data = make_data(datalen) # force use of multiple segments e = encode.Encoder() u = upload.Data(data, convergence=b"some convergence string") u.set_default_encoding_parameters({'max_segment_size': max_segment_size, 'k': 25, 'happy': 75, 'n': 100}) eu = upload.EncryptAnUploadable(u) d = e.set_encrypted_uploadable(eu) all_shareholders = [] def _ready(res): k,happy,n = e.get_param("share_counts") _assert(n == NUM_SHARES) # else we'll be completely confused numsegs = e.get_param("num_segments") _assert(numsegs == NUM_SEGMENTS, numsegs, NUM_SEGMENTS) segsize = e.get_param("segment_size") _assert( (NUM_SEGMENTS-1)*segsize < len(data) <= NUM_SEGMENTS*segsize, NUM_SEGMENTS, segsize, (NUM_SEGMENTS-1)*segsize, len(data), NUM_SEGMENTS*segsize) shareholders = {} servermap = {} for shnum in range(NUM_SHARES): peer = FakeBucketReaderWriterProxy() shareholders[shnum] = peer servermap.setdefault(shnum, set()).add(peer.get_peerid()) all_shareholders.append(peer) e.set_shareholders(shareholders, servermap) return e.start() d.addCallback(_ready) def _check(res): verifycap = res self.failUnless(isinstance(verifycap.uri_extension_hash, bytes)) self.failUnlessEqual(len(verifycap.uri_extension_hash), 32) for i,peer in enumerate(all_shareholders): self.failUnless(peer.closed) self.failUnlessEqual(len(peer.blocks), NUM_SEGMENTS) # each peer gets a full tree of block hashes. For 3 or 4 # segments, that's 7 hashes. For 5 segments it's 15 hashes. self.failUnlessEqual(len(peer.block_hashes), expected_block_hashes) for h in peer.block_hashes: self.failUnlessEqual(len(h), 32) # each peer also gets their necessary chain of share hashes. # For 100 shares (rounded up to 128 leaves), that's 8 hashes self.failUnlessEqual(len(peer.share_hashes), expected_share_hashes) for (hashnum, h) in peer.share_hashes: self.failUnless(isinstance(hashnum, int)) self.failUnlessEqual(len(h), 32) d.addCallback(_check) return d def test_send_74(self): # 3 segments (25, 25, 24) return self.do_encode(25, 74, 100, 3, 7, 8) def test_send_75(self): # 3 segments (25, 25, 25) return self.do_encode(25, 75, 100, 3, 7, 8) def test_send_51(self): # 3 segments (25, 25, 1) return self.do_encode(25, 51, 100, 3, 7, 8) def test_send_76(self): # encode a 76 byte file (in 4 segments: 25,25,25,1) to 100 shares return self.do_encode(25, 76, 100, 4, 7, 8) def test_send_99(self): # 4 segments: 25,25,25,24 return self.do_encode(25, 99, 100, 4, 7, 8) def test_send_100(self): # 4 segments: 25,25,25,25 return self.do_encode(25, 100, 100, 4, 7, 8) def test_send_124(self): # 5 segments: 25, 25, 25, 25, 24 return self.do_encode(25, 124, 100, 5, 15, 8) def test_send_125(self): # 5 segments: 25, 25, 25, 25, 25 return self.do_encode(25, 125, 100, 5, 15, 8) def test_send_101(self): # 5 segments: 25, 25, 25, 25, 1 return self.do_encode(25, 101, 100, 5, 15, 8) class Roundtrip(GridTestMixin, unittest.TestCase): # a series of 3*3 tests to check out edge conditions. One axis is how the # plaintext is divided into segments: kn+(-1,0,1). Another way to express # this is n%k == -1 or 0 or 1. For example, for 25-byte segments, we # might test 74 bytes, 75 bytes, and 76 bytes. # on the other axis is how many leaves in the block hash tree we wind up # with, relative to a power of 2, so 2^a+(-1,0,1). Each segment turns # into a single leaf. So we'd like to check out, e.g., 3 segments, 4 # segments, and 5 segments. # that results in the following series of data lengths: # 3 segs: 74, 75, 51 # 4 segs: 99, 100, 76 # 5 segs: 124, 125, 101 # all tests encode to 100 shares, which means the share hash tree will # have 128 leaves, which means that buckets will be given an 8-long share # hash chain # all 3-segment files will have a 4-leaf blockhashtree, and thus expect # to get 7 blockhashes. 4-segment files will also get 4-leaf block hash # trees and 7 blockhashes. 5-segment files will get 8-leaf block hash # trees, which gets 15 blockhashes. def test_74(self): return self.do_test_size(74) def test_75(self): return self.do_test_size(75) def test_51(self): return self.do_test_size(51) def test_99(self): return self.do_test_size(99) def test_100(self): return self.do_test_size(100) def test_76(self): return self.do_test_size(76) def test_124(self): return self.do_test_size(124) def test_125(self): return self.do_test_size(125) def test_101(self): return self.do_test_size(101) def upload(self, data): u = upload.Data(data, None) u.max_segment_size = 25 u.encoding_param_k = 25 u.encoding_param_happy = 1 u.encoding_param_n = 100 d = self.c0.upload(u) d.addCallback(lambda ur: self.c0.create_node_from_uri(ur.get_uri())) # returns a FileNode return d def do_test_size(self, size): self.basedir = self.mktemp() self.set_up_grid() self.c0 = self.g.clients[0] DATA = b"p"*size d = self.upload(DATA) d.addCallback(lambda n: download_to_data(n)) def _downloaded(newdata): self.failUnlessEqual(newdata, DATA) d.addCallback(_downloaded) return d tahoe_lafs-1.20.0/src/allmydata/test/test_encodingutil.py0000644000000000000000000004316213615410400020440 0ustar00 lumiere_nfc = u"lumi\u00E8re" Artonwall_nfc = u"\u00C4rtonwall.mp3" Artonwall_nfd = u"A\u0308rtonwall.mp3" TEST_FILENAMES = ( Artonwall_nfc, u'test_file', u'Blah blah.txt', ) # The following main helps to generate a test class for other operating # systems. if __name__ == "__main__": import sys, os import tempfile import shutil import platform if len(sys.argv) != 2: print("Usage: %s lumire" % sys.argv[0]) sys.exit(1) if sys.platform == "win32": try: from allmydata.windows.fixups import initialize except ImportError: print("set PYTHONPATH to the src directory") sys.exit(1) initialize() print() print("class MyWeirdOS(EncodingUtil, unittest.TestCase):") print(" uname = '%s'" % ' '.join(platform.uname())) print(" argv = %s" % repr(sys.argv[1])) print(" platform = '%s'" % sys.platform) print(" filesystem_encoding = '%s'" % sys.getfilesystemencoding()) print(" io_encoding = '%s'" % sys.stdout.encoding) try: tmpdir = tempfile.mkdtemp() for fname in TEST_FILENAMES: open(os.path.join(tmpdir, fname), 'w').close() dirlist = os.listdir(tmpdir) print(" dirlist = %s" % repr(dirlist)) except: print(" # Oops, I cannot write filenames containing non-ascii characters") print() shutil.rmtree(tmpdir) sys.exit(0) import os, sys from twisted.trial import unittest from twisted.python.filepath import FilePath from allmydata.test.common_util import ( ReallyEqualMixin, skip_if_cannot_represent_filename, ) from allmydata.util import encodingutil, fileutil from allmydata.util.encodingutil import unicode_to_url, \ unicode_to_output, quote_output, quote_path, quote_local_unicode_path, \ quote_filepath, unicode_platform, listdir_unicode, \ get_filesystem_encoding, to_bytes, from_utf8_or_none, _reload, \ to_filepath, extend_filepath, unicode_from_filepath, unicode_segments_from, \ unicode_to_argv class MockStdout(object): pass class EncodingUtil(ReallyEqualMixin): def setUp(self): self.addCleanup(_reload) self.patch(sys, "platform", self.platform) def test_unicode_to_url(self): self.failUnless(unicode_to_url(lumiere_nfc), b"lumi\xc3\xa8re") def test_unicode_to_output_py3(self): self.failUnlessReallyEqual(unicode_to_output(lumiere_nfc), lumiere_nfc) def test_unicode_to_argv(self): """ unicode_to_argv() returns its unicode argument on Windows and Python 2 and converts to bytes using UTF-8 elsewhere. """ result = unicode_to_argv(lumiere_nfc) expected_value = lumiere_nfc self.assertIsInstance(result, type(expected_value)) self.assertEqual(result, expected_value) def test_unicode_platform_py3(self): _reload() self.failUnlessReallyEqual(unicode_platform(), True) def test_listdir_unicode(self): if 'dirlist' not in dir(self): return try: u"test".encode(self.filesystem_encoding) except (LookupError, AttributeError): raise unittest.SkipTest("This platform does not support the '%s' filesystem encoding " "that we are testing for the benefit of a different platform." % (self.filesystem_encoding,)) def call_os_listdir(path): # Python 3 always lists unicode filenames: return [d.decode(self.filesystem_encoding) if isinstance(d, bytes) else d for d in self.dirlist] self.patch(os, 'listdir', call_os_listdir) def call_sys_getfilesystemencoding(): return self.filesystem_encoding self.patch(sys, 'getfilesystemencoding', call_sys_getfilesystemencoding) _reload() filenames = listdir_unicode(u'/dummy') self.failUnlessEqual(set([encodingutil.normalize(fname) for fname in filenames]), set(TEST_FILENAMES)) class StdlibUnicode(unittest.TestCase): """This mainly tests that some of the stdlib functions support Unicode paths, but also that listdir_unicode works for valid filenames.""" def test_mkdir_open_exists_abspath_listdir_expanduser(self): skip_if_cannot_represent_filename(lumiere_nfc) try: os.mkdir(lumiere_nfc) except EnvironmentError as e: raise unittest.SkipTest("%r\nIt is possible that the filesystem on which this test is being run " "does not support Unicode, even though the platform does." % (e,)) fn = lumiere_nfc + u'/' + lumiere_nfc + u'.txt' open(fn, 'wb').close() self.failUnless(os.path.exists(fn)) getcwdu = os.getcwd self.failUnless(os.path.exists(os.path.join(getcwdu(), fn))) filenames = listdir_unicode(lumiere_nfc) # We only require that the listing includes a filename that is canonically equivalent # to lumiere_nfc (on Mac OS X, it will be the NFD equivalent). self.failUnlessIn(lumiere_nfc + u".txt", set([encodingutil.normalize(fname) for fname in filenames])) expanded = fileutil.expanduser(u"~/" + lumiere_nfc) self.failIfIn(u"~", expanded) self.failUnless(expanded.endswith(lumiere_nfc), expanded) def test_open_unrepresentable(self): if unicode_platform(): raise unittest.SkipTest("This test is not applicable to platforms that represent filenames as Unicode.") enc = get_filesystem_encoding() fn = u'\u2621.txt' try: fn.encode(enc) raise unittest.SkipTest("This test cannot be run unless we know a filename that is not representable.") except UnicodeEncodeError: self.failUnlessRaises(UnicodeEncodeError, open, fn, 'wb') class QuoteOutput(ReallyEqualMixin, unittest.TestCase): def tearDown(self): _reload() def _check(self, inp, out, enc, optional_quotes, quote_newlines): if isinstance(out, bytes): out = out.decode(enc or encodingutil.io_encoding) out2 = out if optional_quotes: out2 = out2[1:-1] self.failUnlessReallyEqual(quote_output(inp, encoding=enc, quote_newlines=quote_newlines), out) self.failUnlessReallyEqual(quote_output(inp, encoding=enc, quotemarks=False, quote_newlines=quote_newlines), out2) if out[0:2] == 'b"': pass elif isinstance(inp, bytes): try: unicode_inp = inp.decode("utf-8") except UnicodeDecodeError: # Some things decode on Python 2, but not Python 3... return self.failUnlessReallyEqual(quote_output(unicode_inp, encoding=enc, quote_newlines=quote_newlines), out) self.failUnlessReallyEqual(quote_output(unicode_inp, encoding=enc, quotemarks=False, quote_newlines=quote_newlines), out2) else: try: bytes_inp = inp.encode('utf-8') except UnicodeEncodeError: # Some things encode on Python 2, but not Python 3, e.g. # surrogates like u"\uDC00\uD800"... return self.failUnlessReallyEqual(quote_output(bytes_inp, encoding=enc, quote_newlines=quote_newlines), out) self.failUnlessReallyEqual(quote_output(bytes_inp, encoding=enc, quotemarks=False, quote_newlines=quote_newlines), out2) def _test_quote_output_all(self, enc): def check(inp, out, optional_quotes=False, quote_newlines=None): out = out.decode("ascii") self._check(inp, out, enc, optional_quotes, quote_newlines) # optional single quotes check(b"foo", b"'foo'", True) check(b"\\", b"'\\'", True) check(b"$\"`", b"'$\"`'", True) check(b"\n", b"'\n'", True, quote_newlines=False) # mandatory single quotes check(b"\"", b"'\"'") # double quotes check(b"'", b"\"'\"") check(b"\n", b"\"\\x0a\"", quote_newlines=True) check(b"\x00", b"\"\\x00\"") # invalid Unicode and astral planes check(u"\uFDD0\uFDEF", b"\"\\ufdd0\\ufdef\"") check(u"\uDC00\uD800", b"\"\\udc00\\ud800\"") check(u"\uDC00\uD800\uDC00", b"\"\\udc00\\U00010000\"") check(u"\uD800\uDC00", b"\"\\U00010000\"") check(u"\uD800\uDC01", b"\"\\U00010001\"") check(u"\uD801\uDC00", b"\"\\U00010400\"") check(u"\uDBFF\uDFFF", b"\"\\U0010ffff\"") check(u"'\uDBFF\uDFFF", b"\"'\\U0010ffff\"") check(u"\"\uDBFF\uDFFF", b"\"\\\"\\U0010ffff\"") # invalid UTF-8 check(b"\xFF", b"b\"\\xff\"") check(b"\x00\"$\\`\x80\xFF", b"b\"\\x00\\\"\\$\\\\\\`\\x80\\xff\"") def test_quote_output_ascii(self, enc='ascii'): def check(inp, out, optional_quotes=False, quote_newlines=None): self._check(inp, out, enc, optional_quotes, quote_newlines) self._test_quote_output_all(enc) check(u"\u00D7", b"\"\\xd7\"") check(u"'\u00D7", b"\"'\\xd7\"") check(u"\"\u00D7", b"\"\\\"\\xd7\"") check(u"\u2621", b"\"\\u2621\"") check(u"'\u2621", b"\"'\\u2621\"") check(u"\"\u2621", b"\"\\\"\\u2621\"") check(u"\n", b"'\n'", True, quote_newlines=False) check(u"\n", b"\"\\x0a\"", quote_newlines=True) def test_quote_output_latin1(self, enc='latin1'): def check(inp, out, optional_quotes=False, quote_newlines=None): self._check(inp, out.encode('latin1'), enc, optional_quotes, quote_newlines) self._test_quote_output_all(enc) check(u"\u00D7", u"'\u00D7'", True) check(u"'\u00D7", u"\"'\u00D7\"") check(u"\"\u00D7", u"'\"\u00D7'") check(u"\u00D7\"", u"'\u00D7\"'", True) check(u"\u2621", u"\"\\u2621\"") check(u"'\u2621", u"\"'\\u2621\"") check(u"\"\u2621", u"\"\\\"\\u2621\"") check(u"\n", u"'\n'", True, quote_newlines=False) check(u"\n", u"\"\\x0a\"", quote_newlines=True) def test_quote_output_utf8(self, enc='utf-8'): def check(inp, out, optional_quotes=False, quote_newlines=None): self._check(inp, out, enc, optional_quotes, quote_newlines) self._test_quote_output_all(enc) check(u"\u2621", u"'\u2621'", True) check(u"'\u2621", u"\"'\u2621\"") check(u"\"\u2621", u"'\"\u2621'") check(u"\u2621\"", u"'\u2621\"'", True) check(u"\n", u"'\n'", True, quote_newlines=False) check(u"\n", u"\"\\x0a\"", quote_newlines=True) def test_quote_output_default(self): """Default is the encoding of sys.stdout if known, otherwise utf-8.""" encoding = getattr(sys.stdout, "encoding") or "utf-8" self.assertEqual(quote_output(u"\u2621"), quote_output(u"\u2621", encoding=encoding)) def win32_other(win32, other): return win32 if sys.platform == "win32" else other class QuotePaths(ReallyEqualMixin, unittest.TestCase): def assertPathsEqual(self, actual, expected): expected = expected.decode("ascii") self.failUnlessReallyEqual(actual, expected) def test_quote_path(self): self.assertPathsEqual(quote_path([u'foo', u'bar']), b"'foo/bar'") self.assertPathsEqual(quote_path([u'foo', u'bar'], quotemarks=True), b"'foo/bar'") self.assertPathsEqual(quote_path([u'foo', u'bar'], quotemarks=False), b"foo/bar") self.assertPathsEqual(quote_path([u'foo', u'\nbar']), b'"foo/\\x0abar"') self.assertPathsEqual(quote_path([u'foo', u'\nbar'], quotemarks=True), b'"foo/\\x0abar"') self.assertPathsEqual(quote_path([u'foo', u'\nbar'], quotemarks=False), b'"foo/\\x0abar"') self.assertPathsEqual(quote_local_unicode_path(u"\\\\?\\C:\\foo"), win32_other(b"'C:\\foo'", b"'\\\\?\\C:\\foo'")) self.assertPathsEqual(quote_local_unicode_path(u"\\\\?\\C:\\foo", quotemarks=True), win32_other(b"'C:\\foo'", b"'\\\\?\\C:\\foo'")) self.assertPathsEqual(quote_local_unicode_path(u"\\\\?\\C:\\foo", quotemarks=False), win32_other(b"C:\\foo", b"\\\\?\\C:\\foo")) self.assertPathsEqual(quote_local_unicode_path(u"\\\\?\\UNC\\foo\\bar"), win32_other(b"'\\\\foo\\bar'", b"'\\\\?\\UNC\\foo\\bar'")) self.assertPathsEqual(quote_local_unicode_path(u"\\\\?\\UNC\\foo\\bar", quotemarks=True), win32_other(b"'\\\\foo\\bar'", b"'\\\\?\\UNC\\foo\\bar'")) self.assertPathsEqual(quote_local_unicode_path(u"\\\\?\\UNC\\foo\\bar", quotemarks=False), win32_other(b"\\\\foo\\bar", b"\\\\?\\UNC\\foo\\bar")) def test_quote_filepath(self): foo_bar_fp = FilePath(win32_other(u'C:\\foo\\bar', u'/foo/bar')) self.assertPathsEqual(quote_filepath(foo_bar_fp), win32_other(b"'C:\\foo\\bar'", b"'/foo/bar'")) self.assertPathsEqual(quote_filepath(foo_bar_fp, quotemarks=True), win32_other(b"'C:\\foo\\bar'", b"'/foo/bar'")) self.assertPathsEqual(quote_filepath(foo_bar_fp, quotemarks=False), win32_other(b"C:\\foo\\bar", b"/foo/bar")) if sys.platform == "win32": foo_longfp = FilePath(u'\\\\?\\C:\\foo') self.assertPathsEqual(quote_filepath(foo_longfp), b"'C:\\foo'") self.assertPathsEqual(quote_filepath(foo_longfp, quotemarks=True), b"'C:\\foo'") self.assertPathsEqual(quote_filepath(foo_longfp, quotemarks=False), b"C:\\foo") class FilePaths(ReallyEqualMixin, unittest.TestCase): def test_to_filepath(self): foo_u = win32_other(u'C:\\foo', u'/foo') nosep_fp = to_filepath(foo_u) sep_fp = to_filepath(foo_u + os.path.sep) for fp in (nosep_fp, sep_fp): self.failUnlessReallyEqual(fp, FilePath(foo_u)) self.failUnlessReallyEqual(fp.path, foo_u) if sys.platform == "win32": long_u = u'\\\\?\\C:\\foo' longfp = to_filepath(long_u + u'\\') self.failUnlessReallyEqual(longfp, FilePath(long_u)) self.failUnlessReallyEqual(longfp.path, long_u) def test_extend_filepath(self): foo_bfp = FilePath(win32_other(b'C:\\foo', b'/foo')) foo_ufp = FilePath(win32_other(u'C:\\foo', u'/foo')) foo_bar_baz_u = win32_other(u'C:\\foo\\bar\\baz', u'/foo/bar/baz') for foo_fp in (foo_bfp, foo_ufp): fp = extend_filepath(foo_fp, [u'bar', u'baz']) self.failUnlessReallyEqual(fp, FilePath(foo_bar_baz_u)) self.failUnlessReallyEqual(fp.path, foo_bar_baz_u) def test_unicode_from_filepath(self): foo_bfp = FilePath(win32_other(b'C:\\foo', b'/foo')) foo_ufp = FilePath(win32_other(u'C:\\foo', u'/foo')) foo_u = win32_other(u'C:\\foo', u'/foo') for foo_fp in (foo_bfp, foo_ufp): self.failUnlessReallyEqual(unicode_from_filepath(foo_fp), foo_u) def test_unicode_segments_from(self): foo_bfp = FilePath(win32_other(b'C:\\foo', b'/foo')) foo_ufp = FilePath(win32_other(u'C:\\foo', u'/foo')) foo_bar_baz_bfp = FilePath(win32_other(b'C:\\foo\\bar\\baz', b'/foo/bar/baz')) foo_bar_baz_ufp = FilePath(win32_other(u'C:\\foo\\bar\\baz', u'/foo/bar/baz')) for foo_fp in (foo_bfp, foo_ufp): for foo_bar_baz_fp in (foo_bar_baz_bfp, foo_bar_baz_ufp): self.failUnlessReallyEqual(unicode_segments_from(foo_bar_baz_fp, foo_fp), [u'bar', u'baz']) class UbuntuKarmicUTF8(EncodingUtil, unittest.TestCase): uname = 'Linux korn 2.6.31-14-generic #48-Ubuntu SMP Fri Oct 16 14:05:01 UTC 2009 x86_64' argv = b'lumi\xc3\xa8re' platform = 'linux2' filesystem_encoding = 'UTF-8' io_encoding = 'UTF-8' dirlist = [b'test_file', b'\xc3\x84rtonwall.mp3', b'Blah blah.txt'] class Windows(EncodingUtil, unittest.TestCase): uname = 'Windows XP 5.1.2600 x86 x86 Family 15 Model 75 Step ping 2, AuthenticAMD' argv = b'lumi\xc3\xa8re' platform = 'win32' filesystem_encoding = 'mbcs' io_encoding = 'utf-8' dirlist = [u'Blah blah.txt', u'test_file', u'\xc4rtonwall.mp3'] class MacOSXLeopard(EncodingUtil, unittest.TestCase): uname = 'Darwin g5.local 9.8.0 Darwin Kernel Version 9.8.0: Wed Jul 15 16:57:01 PDT 2009; root:xnu-1228.15.4~1/RELEASE_PPC Power Macintosh powerpc' output = b'lumi\xc3\xa8re' platform = 'darwin' filesystem_encoding = 'utf-8' io_encoding = 'UTF-8' dirlist = [u'A\u0308rtonwall.mp3', u'Blah blah.txt', u'test_file'] class TestToFromStr(ReallyEqualMixin, unittest.TestCase): def test_to_bytes(self): self.failUnlessReallyEqual(to_bytes(b"foo"), b"foo") self.failUnlessReallyEqual(to_bytes(b"lumi\xc3\xa8re"), b"lumi\xc3\xa8re") self.failUnlessReallyEqual(to_bytes(b"\xFF"), b"\xFF") # passes through invalid UTF-8 -- is this what we want? self.failUnlessReallyEqual(to_bytes(u"lumi\u00E8re"), b"lumi\xc3\xa8re") self.failUnlessReallyEqual(to_bytes(None), None) def test_from_utf8_or_none(self): self.failUnlessRaises(AssertionError, from_utf8_or_none, u"foo") self.failUnlessReallyEqual(from_utf8_or_none(b"lumi\xc3\xa8re"), u"lumi\u00E8re") self.failUnlessReallyEqual(from_utf8_or_none(None), None) self.failUnlessRaises(UnicodeDecodeError, from_utf8_or_none, b"\xFF") tahoe_lafs-1.20.0/src/allmydata/test/test_filenode.py0000644000000000000000000002036413615410400017540 0ustar00""" Ported to Python 3. """ from twisted.trial import unittest from allmydata import uri, client from allmydata.monitor import Monitor from allmydata.immutable.literal import LiteralFileNode from allmydata.immutable.filenode import ImmutableFileNode from allmydata.mutable.filenode import MutableFileNode from allmydata.util import hashutil from allmydata.util.consumer import download_to_data class NotANode(object): pass class FakeClient(object): # just enough to let the node acquire a downloader (which it won't use), # and to get default encoding parameters def getServiceNamed(self, name): return None def get_encoding_parameters(self): return {"k": 3, "n": 10} def get_storage_broker(self): return None def get_history(self): return None _secret_holder = client.SecretHolder(b"lease secret", b"convergence secret") class Node(unittest.TestCase): def test_chk_filenode(self): u = uri.CHKFileURI(key=b"\x00"*16, uri_extension_hash=b"\x00"*32, needed_shares=3, total_shares=10, size=1000) fn1 = ImmutableFileNode(u, None, None, None, None) fn2 = ImmutableFileNode(u, None, None, None, None) self.failUnlessEqual(fn1, fn2) self.failIfEqual(fn1, "I am not a filenode") self.failIfEqual(fn1, NotANode()) self.failUnlessEqual(fn1.get_uri(), u.to_string()) self.failUnlessEqual(fn1.get_cap(), u) self.failUnlessEqual(fn1.get_readcap(), u) self.failUnless(fn1.is_readonly()) self.failIf(fn1.is_mutable()) self.failIf(fn1.is_unknown()) self.failUnless(fn1.is_allowed_in_immutable_directory()) self.failUnlessEqual(fn1.get_write_uri(), None) self.failUnlessEqual(fn1.get_readonly_uri(), u.to_string()) self.failUnlessEqual(fn1.get_size(), 1000) self.failUnlessEqual(fn1.get_storage_index(), u.get_storage_index()) fn1.raise_error() fn2.raise_error() d = {} d[fn1] = 1 # exercise __hash__ v = fn1.get_verify_cap() self.failUnless(isinstance(v, uri.CHKFileVerifierURI)) self.failUnlessEqual(fn1.get_repair_cap(), v) self.failUnless(v.is_readonly()) self.failIf(v.is_mutable()) def test_literal_filenode(self): DATA = b"I am a short file." u = uri.LiteralFileURI(data=DATA) fn1 = LiteralFileNode(u) fn2 = LiteralFileNode(u) self.failUnlessEqual(fn1, fn2) self.failIfEqual(fn1, "I am not a filenode") self.failIfEqual(fn1, NotANode()) self.failUnlessEqual(fn1.get_uri(), u.to_string()) self.failUnlessEqual(fn1.get_cap(), u) self.failUnlessEqual(fn1.get_readcap(), u) self.failUnless(fn1.is_readonly()) self.failIf(fn1.is_mutable()) self.failIf(fn1.is_unknown()) self.failUnless(fn1.is_allowed_in_immutable_directory()) self.failUnlessEqual(fn1.get_write_uri(), None) self.failUnlessEqual(fn1.get_readonly_uri(), u.to_string()) self.failUnlessEqual(fn1.get_size(), len(DATA)) self.failUnlessEqual(fn1.get_storage_index(), None) fn1.raise_error() fn2.raise_error() d = {} d[fn1] = 1 # exercise __hash__ v = fn1.get_verify_cap() self.failUnlessEqual(v, None) self.failUnlessEqual(fn1.get_repair_cap(), None) d = download_to_data(fn1) def _check(res): self.failUnlessEqual(res, DATA) d.addCallback(_check) d.addCallback(lambda res: download_to_data(fn1, 1, 5)) def _check_segment(res): self.failUnlessEqual(res, DATA[1:1+5]) d.addCallback(_check_segment) d.addCallback(lambda ignored: fn1.get_best_readable_version()) d.addCallback(lambda fn2: self.failUnlessEqual(fn1, fn2)) d.addCallback(lambda ignored: fn1.get_size_of_best_version()) d.addCallback(lambda size: self.failUnlessEqual(size, len(DATA))) d.addCallback(lambda ignored: fn1.download_to_data()) d.addCallback(lambda data: self.failUnlessEqual(data, DATA)) d.addCallback(lambda ignored: fn1.download_best_version()) d.addCallback(lambda data: self.failUnlessEqual(data, DATA)) return d def test_mutable_filenode(self): client = FakeClient() wk = b"\x00"*16 rk = hashutil.ssk_readkey_hash(wk) si = hashutil.ssk_storage_index_hash(rk) u = uri.WriteableSSKFileURI(b"\x00"*16, b"\x00"*32) n = MutableFileNode(None, None, client.get_encoding_parameters(), None).init_from_cap(u) self.failUnlessEqual(n.get_writekey(), wk) self.failUnlessEqual(n.get_readkey(), rk) self.failUnlessEqual(n.get_storage_index(), si) # these items are populated on first read (or create), so until that # happens they'll be None self.failUnlessEqual(n.get_privkey(), None) self.failUnlessEqual(n.get_encprivkey(), None) self.failUnlessEqual(n.get_pubkey(), None) self.failUnlessEqual(n.get_uri(), u.to_string()) self.failUnlessEqual(n.get_write_uri(), u.to_string()) self.failUnlessEqual(n.get_readonly_uri(), u.get_readonly().to_string()) self.failUnlessEqual(n.get_cap(), u) self.failUnlessEqual(n.get_readcap(), u.get_readonly()) self.failUnless(n.is_mutable()) self.failIf(n.is_readonly()) self.failIf(n.is_unknown()) self.failIf(n.is_allowed_in_immutable_directory()) n.raise_error() n2 = MutableFileNode(None, None, client.get_encoding_parameters(), None).init_from_cap(u) self.failUnlessEqual(n, n2) self.failIfEqual(n, "not even the right type") self.failIfEqual(n, u) # not the right class n.raise_error() d = {n: "can these be used as dictionary keys?"} d[n2] = "replace the old one" self.failUnlessEqual(len(d), 1) nro = n.get_readonly() self.failUnless(isinstance(nro, MutableFileNode)) self.failUnlessEqual(nro.get_readonly(), nro) self.failUnlessEqual(nro.get_cap(), u.get_readonly()) self.failUnlessEqual(nro.get_readcap(), u.get_readonly()) self.failUnless(nro.is_mutable()) self.failUnless(nro.is_readonly()) self.failIf(nro.is_unknown()) self.failIf(nro.is_allowed_in_immutable_directory()) nro_u = nro.get_uri() self.failUnlessEqual(nro_u, nro.get_readonly_uri()) self.failUnlessEqual(nro_u, u.get_readonly().to_string()) self.failUnlessEqual(nro.get_write_uri(), None) self.failUnlessEqual(nro.get_repair_cap(), None) # RSAmut needs writecap nro.raise_error() v = n.get_verify_cap() self.failUnless(isinstance(v, uri.SSKVerifierURI)) self.failUnlessEqual(n.get_repair_cap(), n._uri) # TODO: n.get_uri() def test_mutable_filenode_equality(self): client = FakeClient() u = uri.WriteableSSKFileURI(b"\x00"*16, b"\x00"*32) n = MutableFileNode(None, None, client.get_encoding_parameters(), None).init_from_cap(u) u2 = uri.WriteableSSKFileURI(b"\x01"*16, b"\x01"*32) n2 = MutableFileNode(None, None, client.get_encoding_parameters(), None).init_from_cap(u2) n2b = MutableFileNode(None, None, client.get_encoding_parameters(), None).init_from_cap(u2) self.assertTrue(n2 == n2b) self.assertFalse(n2 != n2b) self.assertTrue(n2 != n) self.assertTrue(n != n2) self.assertFalse(n == n2) self.assertTrue(n != 3) self.assertFalse(n == 3) class LiteralChecker(unittest.TestCase): def test_literal_filenode(self): DATA = b"I am a short file." u = uri.LiteralFileURI(data=DATA) fn1 = LiteralFileNode(u) d = fn1.check(Monitor()) def _check_checker_results(cr): self.failUnlessEqual(cr, None) d.addCallback(_check_checker_results) d.addCallback(lambda res: fn1.check(Monitor(), verify=True)) d.addCallback(_check_checker_results) return d tahoe_lafs-1.20.0/src/allmydata/test/test_grid_manager.py0000644000000000000000000003504713615410400020376 0ustar00""" Tests for the grid manager. """ from datetime import ( timedelta, ) from twisted.python.filepath import ( FilePath, ) from hypothesis import given from allmydata.node import ( config_from_string, ) from allmydata.client import ( _valid_config as client_valid_config, ) from allmydata.crypto import ( ed25519, ) from allmydata.util import ( jsonbytes as json, ) from allmydata.grid_manager import ( load_grid_manager, save_grid_manager, create_grid_manager, parse_grid_manager_certificate, create_grid_manager_verifier, SignedCertificate, ) from allmydata.test.strategies import ( base32text, ) from .common import SyncTestCase class GridManagerUtilities(SyncTestCase): """ Confirm operation of utility functions used by GridManager """ def test_load_certificates(self): """ Grid Manager certificates are deserialized from config properly """ cert_path = self.mktemp() fake_cert = { "certificate": "{\"expires\":1601687822,\"public_key\":\"pub-v0-cbq6hcf3pxcz6ouoafrbktmkixkeuywpcpbcomzd3lqbkq4nmfga\",\"version\":1}", "signature": "fvjd3uvvupf2v6tnvkwjd473u3m3inyqkwiclhp7balmchkmn3px5pei3qyfjnhymq4cjcwvbpqmcwwnwswdtrfkpnlaxuih2zbdmda" } with open(cert_path, "wb") as f: f.write(json.dumps_bytes(fake_cert)) config_data = ( "[grid_managers]\n" "fluffy = pub-v0-vqimc4s5eflwajttsofisp5st566dbq36xnpp4siz57ufdavpvlq\n" "[grid_manager_certificates]\n" "ding = {}\n".format(cert_path) ) config = config_from_string("/foo", "portnum", config_data, client_valid_config()) self.assertEqual( {"fluffy": "pub-v0-vqimc4s5eflwajttsofisp5st566dbq36xnpp4siz57ufdavpvlq"}, config.enumerate_section("grid_managers") ) certs = config.get_grid_manager_certificates() self.assertEqual([fake_cert], certs) def test_load_certificates_invalid_version(self): """ An error is reported loading invalid certificate version """ gm_path = FilePath(self.mktemp()) gm_path.makedirs() config = { "grid_manager_config_version": 0, "private_key": "priv-v0-ub7knkkmkptqbsax4tznymwzc4nk5lynskwjsiubmnhcpd7lvlqa", "storage_servers": { "radia": { "public_key": "pub-v0-cbq6hcf3pxcz6ouoafrbktmkixkeuywpcpbcomzd3lqbkq4nmfga" } } } with gm_path.child("config.json").open("wb") as f: f.write(json.dumps_bytes(config)) fake_cert = { "certificate": "{\"expires\":1601687822,\"public_key\":\"pub-v0-cbq6hcf3pxcz6ouoafrbktmkixkeuywpcpbcomzd3lqbkq4nmfga\",\"version\":22}", "signature": "fvjd3uvvupf2v6tnvkwjd473u3m3inyqkwiclhp7balmchkmn3px5pei3qyfjnhymq4cjcwvbpqmcwwnwswdtrfkpnlaxuih2zbdmda" } with gm_path.child("radia.cert.0").open("wb") as f: f.write(json.dumps_bytes(fake_cert)) with self.assertRaises(ValueError) as ctx: load_grid_manager(gm_path) self.assertIn( "22", str(ctx.exception), ) def test_load_certificates_unknown_key(self): """ An error is reported loading certificates with invalid keys in them """ cert_path = self.mktemp() fake_cert = { "certificate": "{\"expires\":1601687822,\"public_key\":\"pub-v0-cbq6hcf3pxcz6ouoafrbktmkixkeuywpcpbcomzd3lqbkq4nmfga\",\"version\":22}", "signature": "fvjd3uvvupf2v6tnvkwjd473u3m3inyqkwiclhp7balmchkmn3px5pei3qyfjnhymq4cjcwvbpqmcwwnwswdtrfkpnlaxuih2zbdmda", "something-else": "not valid in a v0 certificate" } with open(cert_path, "wb") as f: f.write(json.dumps_bytes(fake_cert)) config_data = ( "[grid_manager_certificates]\n" "ding = {}\n".format(cert_path) ) config = config_from_string("/foo", "portnum", config_data, client_valid_config()) with self.assertRaises(ValueError) as ctx: config.get_grid_manager_certificates() self.assertIn( "Unknown key in Grid Manager certificate", str(ctx.exception) ) def test_load_certificates_missing(self): """ An error is reported for missing certificates """ cert_path = self.mktemp() config_data = ( "[grid_managers]\n" "fluffy = pub-v0-vqimc4s5eflwajttsofisp5st566dbq36xnpp4siz57ufdavpvlq\n" "[grid_manager_certificates]\n" "ding = {}\n".format(cert_path) ) config = config_from_string("/foo", "portnum", config_data, client_valid_config()) with self.assertRaises(ValueError) as ctx: config.get_grid_manager_certificates() # we don't reliably know how Windows or MacOS will represent # the path in the exception, so we don't check for the *exact* # message with full-path here.. self.assertIn( "Grid Manager certificate file", str(ctx.exception) ) self.assertIn( " doesn't exist", str(ctx.exception) ) class GridManagerVerifier(SyncTestCase): """ Tests related to rejecting or accepting Grid Manager certificates. """ def setUp(self): self.gm = create_grid_manager() return super(GridManagerVerifier, self).setUp() def test_sign_cert(self): """ For a storage server previously added to a grid manager, _GridManager.sign returns a dict with "certificate" and "signature" properties where the value of "signature" gives the ed25519 signature (using the grid manager's private key of the value) of "certificate". """ priv, pub = ed25519.create_signing_keypair() self.gm.add_storage_server("test", pub) cert0 = self.gm.sign("test", timedelta(seconds=86400)) cert1 = self.gm.sign("test", timedelta(seconds=3600)) self.assertNotEqual(cert0, cert1) self.assertIsInstance(cert0, SignedCertificate) gm_key = ed25519.verifying_key_from_string(self.gm.public_identity()) self.assertEqual( ed25519.verify_signature( gm_key, cert0.signature, cert0.certificate, ), None ) def test_sign_cert_wrong_name(self): """ Try to sign a storage-server that doesn't exist """ with self.assertRaises(KeyError): self.gm.sign("doesn't exist", timedelta(seconds=86400)) def test_add_cert(self): """ Add a storage-server and serialize it """ priv, pub = ed25519.create_signing_keypair() self.gm.add_storage_server("test", pub) data = self.gm.marshal() self.assertEqual( data["storage_servers"], { "test": { "public_key": ed25519.string_from_verifying_key(pub), } } ) def test_remove(self): """ Add then remove a storage-server """ priv, pub = ed25519.create_signing_keypair() self.gm.add_storage_server("test", pub) self.gm.remove_storage_server("test") self.assertEqual(len(self.gm.storage_servers), 0) def test_serialize(self): """ Write and then read a Grid Manager config """ priv0, pub0 = ed25519.create_signing_keypair() priv1, pub1 = ed25519.create_signing_keypair() self.gm.add_storage_server("test0", pub0) self.gm.add_storage_server("test1", pub1) tempdir = self.mktemp() fp = FilePath(tempdir) save_grid_manager(fp, self.gm) gm2 = load_grid_manager(fp) self.assertEqual( self.gm.public_identity(), gm2.public_identity(), ) self.assertEqual( len(self.gm.storage_servers), len(gm2.storage_servers), ) for name, ss0 in list(self.gm.storage_servers.items()): ss1 = gm2.storage_servers[name] self.assertEqual(ss0.name, ss1.name) self.assertEqual(ss0.public_key_string(), ss1.public_key_string()) self.assertEqual(self.gm.marshal(), gm2.marshal()) def test_invalid_no_version(self): """ Invalid Grid Manager config with no version """ tempdir = self.mktemp() fp = FilePath(tempdir) bad_config = { "private_key": "at least we have one", } fp.makedirs() with fp.child("config.json").open("w") as f: f.write(json.dumps_bytes(bad_config)) with self.assertRaises(ValueError) as ctx: load_grid_manager(fp) self.assertIn( "unknown version", str(ctx.exception), ) def test_invalid_certificate_bad_version(self): """ Invalid Grid Manager config containing a certificate with an illegal version """ tempdir = self.mktemp() fp = FilePath(tempdir) config = { "grid_manager_config_version": 0, "private_key": "priv-v0-ub7knkkmkptqbsax4tznymwzc4nk5lynskwjsiubmnhcpd7lvlqa", "storage_servers": { "alice": { "public_key": "pub-v0-cbq6hcf3pxcz6ouoafrbktmkixkeuywpcpbcomzd3lqbkq4nmfga" } } } bad_cert = { "certificate": "{\"expires\":1601687822,\"public_key\":\"pub-v0-cbq6hcf3pxcz6ouoafrbktmkixkeuywpcpbcomzd3lqbkq4nmfga\",\"version\":0}", "signature": "fvjd3uvvupf2v6tnvkwjd473u3m3inyqkwiclhp7balmchkmn3px5pei3qyfjnhymq4cjcwvbpqmcwwnwswdtrfkpnlaxuih2zbdmda" } fp.makedirs() with fp.child("config.json").open("w") as f: f.write(json.dumps_bytes(config)) with fp.child("alice.cert.0").open("w") as f: f.write(json.dumps_bytes(bad_cert)) with self.assertRaises(ValueError) as ctx: load_grid_manager(fp) self.assertIn( "Unknown certificate version", str(ctx.exception), ) def test_invalid_no_private_key(self): """ Invalid Grid Manager config with no private key """ tempdir = self.mktemp() fp = FilePath(tempdir) bad_config = { "grid_manager_config_version": 0, } fp.makedirs() with fp.child("config.json").open("w") as f: f.write(json.dumps_bytes(bad_config)) with self.assertRaises(ValueError) as ctx: load_grid_manager(fp) self.assertIn( "'private_key' required", str(ctx.exception), ) def test_invalid_bad_private_key(self): """ Invalid Grid Manager config with bad private-key """ tempdir = self.mktemp() fp = FilePath(tempdir) bad_config = { "grid_manager_config_version": 0, "private_key": "not actually encoded key", } fp.makedirs() with fp.child("config.json").open("w") as f: f.write(json.dumps_bytes(bad_config)) with self.assertRaises(ValueError) as ctx: load_grid_manager(fp) self.assertIn( "Invalid Grid Manager private_key", str(ctx.exception), ) def test_invalid_storage_server(self): """ Invalid Grid Manager config with missing public-key for storage-server """ tempdir = self.mktemp() fp = FilePath(tempdir) bad_config = { "grid_manager_config_version": 0, "private_key": "priv-v0-ub7knkkmkptqbsax4tznymwzc4nk5lynskwjsiubmnhcpd7lvlqa", "storage_servers": { "bad": {} } } fp.makedirs() with fp.child("config.json").open("w") as f: f.write(json.dumps_bytes(bad_config)) with self.assertRaises(ValueError) as ctx: load_grid_manager(fp) self.assertIn( "No 'public_key' for storage server", str(ctx.exception), ) def test_parse_cert(self): """ Parse an ostensibly valid storage certificate """ js = parse_grid_manager_certificate('{"certificate": "", "signature": ""}') self.assertEqual( set(js.keys()), {"certificate", "signature"} ) # the signature isn't *valid*, but that's checked in a # different function def test_parse_cert_not_dict(self): """ Certificate data not even a dict """ with self.assertRaises(ValueError) as ctx: parse_grid_manager_certificate("[]") self.assertIn( "must be a dict", str(ctx.exception), ) def test_parse_cert_missing_signature(self): """ Missing the signature """ with self.assertRaises(ValueError) as ctx: parse_grid_manager_certificate('{"certificate": ""}') self.assertIn( "must contain", str(ctx.exception), ) def test_validate_cert(self): """ Validate a correctly-signed certificate """ priv0, pub0 = ed25519.create_signing_keypair() self.gm.add_storage_server("test0", pub0) cert0 = self.gm.sign("test0", timedelta(seconds=86400)) verify = create_grid_manager_verifier( [self.gm._public_key], [cert0], ed25519.string_from_verifying_key(pub0), ) self.assertTrue(verify()) class GridManagerInvalidVerifier(SyncTestCase): """ Invalid certificate rejection tests """ def setUp(self): self.gm = create_grid_manager() self.priv0, self.pub0 = ed25519.create_signing_keypair() self.gm.add_storage_server("test0", self.pub0) self.cert0 = self.gm.sign("test0", timedelta(seconds=86400)) return super(GridManagerInvalidVerifier, self).setUp() @given( base32text(), ) def test_validate_cert_invalid(self, invalid_signature): """ An incorrect signature is rejected """ # make signature invalid invalid_cert = SignedCertificate( self.cert0.certificate, invalid_signature.encode("ascii"), ) verify = create_grid_manager_verifier( [self.gm._public_key], [invalid_cert], ed25519.string_from_verifying_key(self.pub0), bad_cert = lambda key, cert: None, ) self.assertFalse(verify()) tahoe_lafs-1.20.0/src/allmydata/test/test_happiness.py0000644000000000000000000004236513615410400017752 0ustar00# -*- coding: utf-8 -*- """ Tests for allmydata.immutable.happiness_upload and allmydata.util.happinessutil. Ported to Python 3. """ from twisted.trial import unittest from hypothesis import given from hypothesis.strategies import text, sets from allmydata.immutable import happiness_upload from allmydata.util.happinessutil import servers_of_happiness, \ shares_by_server, merge_servers from allmydata.test.common import ShouldFailMixin class HappinessUploadUtils(unittest.TestCase): """ test-cases for happiness_upload utility functions augmenting_path_for and residual_network. """ def test_residual_0(self): graph = happiness_upload._servermap_flow_graph( ['peer0'], ['share0'], servermap={ 'peer0': ['share0'], } ) flow = [[0 for _ in graph] for _ in graph] residual, capacity = happiness_upload.residual_network(graph, flow) # XXX no idea if these are right; hand-verify self.assertEqual(residual, [[1], [2], [3], []]) self.assertEqual(capacity, [[0, 1, 0, 0], [-1, 0, 1, 0], [0, -1, 0, 1], [0, 0, -1, 0]]) def test_trivial_maximum_graph(self): self.assertEqual( {}, happiness_upload._compute_maximum_graph([], {}) ) def test_trivial_flow_graph(self): self.assertEqual( [], happiness_upload._servermap_flow_graph(set(), set(), {}) ) class Happiness(unittest.TestCase): def test_placement_simple(self): shares = {'share0', 'share1', 'share2'} peers = {'peer0', 'peer1'} readonly_peers = {'peer0'} peers_to_shares = { 'peer0': {'share2'}, 'peer1': [], } places = happiness_upload.share_placement(peers, readonly_peers, shares, peers_to_shares) self.assertEqual( places, { 'share0': 'peer1', 'share1': 'peer1', 'share2': 'peer0', } ) def test_placement_1(self): shares = { 'share0', 'share1', 'share2', 'share3', 'share4', 'share5', 'share6', 'share7', 'share8', 'share9', } peers = { 'peer0', 'peer1', 'peer2', 'peer3', 'peer4', 'peer5', 'peer6', 'peer7', 'peer8', 'peer9', 'peerA', 'peerB', } readonly_peers = {'peer0', 'peer1', 'peer2', 'peer3'} peers_to_shares = { 'peer0': {'share0'}, 'peer1': {'share1'}, 'peer2': {'share2'}, 'peer3': {'share3'}, 'peer4': {'share4'}, 'peer5': {'share5'}, 'peer6': {'share6'}, 'peer7': {'share7'}, 'peer8': {'share8'}, 'peer9': {'share9'}, 'peerA': set(), 'peerB': set(), } places = happiness_upload.share_placement(peers, readonly_peers, shares, peers_to_shares) # actually many valid answers for this, so long as peer's 0, # 1, 2, 3 all have share 0, 1, 2 3. # share N maps to peer N # i.e. this says that share0 should be on peer0, share1 should # be on peer1, etc. expected = { 'share{}'.format(i): 'peer{}'.format(i) for i in range(10) } self.assertEqual(expected, places) def test_unhappy(self): shares = { 'share1', 'share2', 'share3', 'share4', 'share5', } peers = { 'peer1', 'peer2', 'peer3', 'peer4', } readonly_peers = set() peers_to_shares = {} places = happiness_upload.share_placement(peers, readonly_peers, shares, peers_to_shares) happiness = happiness_upload.calculate_happiness(places) self.assertEqual(4, happiness) def test_hypothesis0(self): peers={u'0', u'00'} shares={u'0', u'1'} readonly_peers = set() peers_to_shares = dict() #h = happiness_upload.HappinessUpload(peers, readonly_peers, shares, peers_to_shares) #places = h.generate_mappings() #happiness = h.happiness() places = happiness_upload.share_placement(peers, readonly_peers, shares, peers_to_shares) happiness = happiness_upload.calculate_happiness(places) self.assertEqual(2, happiness) def test_100(self): peers = set(['peer{}'.format(x) for x in range(100)]) shares = set(['share{}'.format(x) for x in range(100)]) readonly_peers = set() peers_to_shares = dict() places = happiness_upload.share_placement(peers, readonly_peers, shares, peers_to_shares) happiness = happiness_upload.calculate_happiness(places) self.assertEqual(100, happiness) def test_redistribute(self): """ with existing shares 0, 3 on a single servers we can achieve higher happiness by moving one of those shares to a new server """ peers = {'a', 'b', 'c', 'd'} shares = {'0', '1', '2', '3'} readonly_peers = set() peers_to_shares = { 'a': set(['0']), 'b': set(['1']), 'c': set(['2', '3']), } # we can achieve more happiness by moving "2" or "3" to server "d" places = happiness_upload.share_placement(peers, readonly_peers, shares, peers_to_shares) #print("places %s" % places) #places = happiness_upload.slow_share_placement(peers, readonly_peers, shares, peers_to_shares) #print("places %s" % places) happiness = happiness_upload.calculate_happiness(places) self.assertEqual(4, happiness) def test_calc_happy(self): # share -> server share_placements = { 0: "\x0e\xd6\xb3>\xd6\x85\x9d\x94')'\xf03:R\x88\xf1\x04\x1b\xa4", 1: '\xb9\xa3N\x80u\x9c_\xf7\x97FSS\xa7\xbd\x02\xf9f$:\t', 2: '\xb9\xa3N\x80u\x9c_\xf7\x97FSS\xa7\xbd\x02\xf9f$:\t', 3: '\xb9\xa3N\x80u\x9c_\xf7\x97FSS\xa7\xbd\x02\xf9f$:\t', 4: '\xb9\xa3N\x80u\x9c_\xf7\x97FSS\xa7\xbd\x02\xf9f$:\t', 5: '\xb9\xa3N\x80u\x9c_\xf7\x97FSS\xa7\xbd\x02\xf9f$:\t', 6: '\xb9\xa3N\x80u\x9c_\xf7\x97FSS\xa7\xbd\x02\xf9f$:\t', 7: '\xb9\xa3N\x80u\x9c_\xf7\x97FSS\xa7\xbd\x02\xf9f$:\t', 8: '\xb9\xa3N\x80u\x9c_\xf7\x97FSS\xa7\xbd\x02\xf9f$:\t', 9: '\xb9\xa3N\x80u\x9c_\xf7\x97FSS\xa7\xbd\x02\xf9f$:\t', } happy = happiness_upload.calculate_happiness(share_placements) self.assertEqual(2, happy) def test_hypothesis_0(self): """ an error-case Hypothesis found """ peers={u'0'} shares={u'0', u'1'} places = happiness_upload.share_placement(peers, set(), shares, {}) happiness = happiness_upload.calculate_happiness(places) assert set(places.values()).issubset(peers) assert happiness == min(len(peers), len(shares)) def test_hypothesis_1(self): """ an error-case Hypothesis found """ peers = {u'0', u'1', u'2', u'3'} shares = {u'0', u'1', u'2', u'3', u'4', u'5', u'6', u'7', u'8'} places = happiness_upload.share_placement(peers, set(), shares, {}) happiness = happiness_upload.calculate_happiness(places) assert set(places.values()).issubset(peers) assert happiness == min(len(peers), len(shares)) def test_everything_broken(self): peers = set() shares = {u'0', u'1', u'2', u'3'} places = happiness_upload.share_placement(peers, set(), shares, {}) self.assertEqual(places, dict()) class PlacementTests(unittest.TestCase): @given( sets(elements=text(min_size=1, max_size=30), min_size=4, max_size=4), sets(elements=text(min_size=1, max_size=30), min_size=4), ) def test_hypothesis_unhappy(self, peers, shares): """ similar to test_unhappy we test that the resulting happiness is always 4 since the size of peers is 4. """ # https://hypothesis.readthedocs.io/en/latest/data.html#hypothesis.strategies.sets # hypothesis.strategies.sets(elements=None, min_size=None, average_size=None, max_size=None)[source] readonly_peers = set() peers_to_shares = {} places = happiness_upload.share_placement(peers, readonly_peers, shares, peers_to_shares) happiness = happiness_upload.calculate_happiness(places) assert set(places.keys()) == shares assert happiness == 4 @given( sets(elements=text(min_size=1, max_size=30), min_size=1, max_size=10), # can we make a readonly_peers that's a subset of ^ sets(elements=text(min_size=1, max_size=30), min_size=1, max_size=20), ) def test_more_hypothesis(self, peers, shares): """ similar to test_unhappy we test that the resulting happiness is always either the number of peers or the number of shares whichever is smaller. """ # https://hypothesis.readthedocs.io/en/latest/data.html#hypothesis.strategies.sets # hypothesis.strategies.sets(elements=None, min_size=None, average_size=None, max_size=None)[source] # XXX would be nice to paramaterize these by hypothesis too readonly_peers = set() peers_to_shares = {} places = happiness_upload.share_placement(peers, readonly_peers, set(list(shares)), peers_to_shares) happiness = happiness_upload.calculate_happiness(places) # every share should get placed assert set(places.keys()) == shares # we should only use peers that exist assert set(places.values()).issubset(peers) # if we have more shares than peers, happiness is at most # of # peers; if we have fewer shares than peers happiness is capped at # # of peers. assert happiness == min(len(peers), len(shares)) class FakeServerTracker(object): def __init__(self, serverid, buckets): self._serverid = serverid self.buckets = buckets def get_serverid(self): return self._serverid class HappinessUtilTests(unittest.TestCase, ShouldFailMixin): """Tests for happinesutil.py.""" def test_merge_servers(self): # merge_servers merges a list of upload_servers and a dict of # shareid -> serverid mappings. shares = { 1 : set(["server1"]), 2 : set(["server2"]), 3 : set(["server3"]), 4 : set(["server4", "server5"]), 5 : set(["server1", "server2"]), } # if not provided with a upload_servers argument, it should just # return the first argument unchanged. self.failUnlessEqual(shares, merge_servers(shares, set([]))) trackers = [] for (i, server) in [(i, "server%d" % i) for i in range(5, 9)]: t = FakeServerTracker(server, [i]) trackers.append(t) expected = { 1 : set(["server1"]), 2 : set(["server2"]), 3 : set(["server3"]), 4 : set(["server4", "server5"]), 5 : set(["server1", "server2", "server5"]), 6 : set(["server6"]), 7 : set(["server7"]), 8 : set(["server8"]), } self.failUnlessEqual(expected, merge_servers(shares, set(trackers))) shares2 = {} expected = { 5 : set(["server5"]), 6 : set(["server6"]), 7 : set(["server7"]), 8 : set(["server8"]), } self.failUnlessEqual(expected, merge_servers(shares2, set(trackers))) shares3 = {} trackers = [] expected = {} for (i, server) in [(i, "server%d" % i) for i in range(10)]: shares3[i] = set([server]) t = FakeServerTracker(server, [i]) trackers.append(t) expected[i] = set([server]) self.failUnlessEqual(expected, merge_servers(shares3, set(trackers))) def test_servers_of_happiness_utility_function(self): # These tests are concerned with the servers_of_happiness() # utility function, and its underlying matching algorithm. Other # aspects of the servers_of_happiness behavior are tested # elsehwere These tests exist to ensure that # servers_of_happiness doesn't under or overcount the happiness # value for given inputs. # servers_of_happiness expects a dict of # shnum => set(serverids) as a preexisting shares argument. test1 = { 1 : set(["server1"]), 2 : set(["server2"]), 3 : set(["server3"]), 4 : set(["server4"]) } happy = servers_of_happiness(test1) self.failUnlessEqual(4, happy) test1[4] = set(["server1"]) # We've added a duplicate server, so now servers_of_happiness # should be 3 instead of 4. happy = servers_of_happiness(test1) self.failUnlessEqual(3, happy) # The second argument of merge_servers should be a set of objects with # serverid and buckets as attributes. In actual use, these will be # ServerTracker instances, but for testing it is fine to make a # FakeServerTracker whose job is to hold those instance variables to # test that part. trackers = [] for (i, server) in [(i, "server%d" % i) for i in range(5, 9)]: t = FakeServerTracker(server, [i]) trackers.append(t) # Recall that test1 is a server layout with servers_of_happiness # = 3. Since there isn't any overlap between the shnum -> # set([serverid]) correspondences in test1 and those in trackers, # the result here should be 7. test2 = merge_servers(test1, set(trackers)) happy = servers_of_happiness(test2) self.failUnlessEqual(7, happy) # Now add an overlapping server to trackers. This is redundant, # so it should not cause the previously reported happiness value # to change. t = FakeServerTracker("server1", [1]) trackers.append(t) test2 = merge_servers(test1, set(trackers)) happy = servers_of_happiness(test2) self.failUnlessEqual(7, happy) test = {} happy = servers_of_happiness(test) self.failUnlessEqual(0, happy) # Test a more substantial overlap between the trackers and the # existing assignments. test = { 1 : set(['server1']), 2 : set(['server2']), 3 : set(['server3']), 4 : set(['server4']), } trackers = [] t = FakeServerTracker('server5', [4]) trackers.append(t) t = FakeServerTracker('server6', [3, 5]) trackers.append(t) # The value returned by servers_of_happiness is the size # of a maximum matching in the bipartite graph that # servers_of_happiness() makes between serverids and share # numbers. It should find something like this: # (server 1, share 1) # (server 2, share 2) # (server 3, share 3) # (server 5, share 4) # (server 6, share 5) # # and, since there are 5 edges in this matching, it should # return 5. test2 = merge_servers(test, set(trackers)) happy = servers_of_happiness(test2) self.failUnlessEqual(5, happy) # Zooko's first puzzle: # (from http://allmydata.org/trac/tahoe-lafs/ticket/778#comment:156) # # server 1: shares 0, 1 # server 2: shares 1, 2 # server 3: share 2 # # This should yield happiness of 3. test = { 0 : set(['server1']), 1 : set(['server1', 'server2']), 2 : set(['server2', 'server3']), } self.failUnlessEqual(3, servers_of_happiness(test)) # Zooko's second puzzle: # (from http://allmydata.org/trac/tahoe-lafs/ticket/778#comment:158) # # server 1: shares 0, 1 # server 2: share 1 # # This should yield happiness of 2. test = { 0 : set(['server1']), 1 : set(['server1', 'server2']), } self.failUnlessEqual(2, servers_of_happiness(test)) def test_shares_by_server(self): test = dict([(i, set(["server%d" % i])) for i in range(1, 5)]) sbs = shares_by_server(test) self.failUnlessEqual(set([1]), sbs["server1"]) self.failUnlessEqual(set([2]), sbs["server2"]) self.failUnlessEqual(set([3]), sbs["server3"]) self.failUnlessEqual(set([4]), sbs["server4"]) test1 = { 1 : set(["server1"]), 2 : set(["server1"]), 3 : set(["server1"]), 4 : set(["server2"]), 5 : set(["server2"]) } sbs = shares_by_server(test1) self.failUnlessEqual(set([1, 2, 3]), sbs["server1"]) self.failUnlessEqual(set([4, 5]), sbs["server2"]) # This should fail unless the serverid part of the mapping is a set test2 = {1: "server1"} self.shouldFail(AssertionError, "test_shares_by_server", "", shares_by_server, test2) tahoe_lafs-1.20.0/src/allmydata/test/test_hashtree.py0000644000000000000000000002273413615410400017561 0ustar00""" Tests for allmydata.hashtree. """ from __future__ import annotations from .common import SyncTestCase from base64 import b32encode from allmydata.util.hashutil import tagged_hash from allmydata import hashtree def make_tree(numleaves): leaves = [b"%d" % i for i in range(numleaves)] leaf_hashes = [tagged_hash(b"tag", leaf) for leaf in leaves] ht = hashtree.HashTree(leaf_hashes) return ht class Complete(SyncTestCase): def test_create(self): # try out various sizes, since we pad to a power of two ht = make_tree(6) ht = make_tree(9) ht = make_tree(8) root = ht[0] self.failUnlessEqual(len(root), 32) self.failUnlessEqual(ht.get_leaf(0), tagged_hash(b"tag", b"0")) self.failUnlessRaises(IndexError, ht.get_leaf, 8) self.failUnlessEqual(ht.get_leaf_index(0), 7) self.failUnlessRaises(IndexError, ht.parent, 0) self.failUnlessRaises(IndexError, ht.needed_for, -1) def test_well_known_tree(self): self.assertEqual( [b32encode(s).strip(b"=").lower() for s in make_tree(3)], [b'vxuqudnucceja4pqkdqy5txapagxubm5moupzqywkbg2jrjkaola', b'weycjri4jlcaunca2jyx2kr7sbtb7qdriog3f26g5jpc5awfeazq', b'5ovy3g2wwjnxoqtja4licckxkbqjef4xsjtclk6gxnsl66kvow6a', b'esd34nbzri75l3j2vwetpk3dvlvsxstkbaktomonrulpks3df3sq', b'jkxbwa2tppyfax35o72tbjecxvaa4xphma6zbyfbkkku3ed2657a', b'wfisavaqgab2raihe7dld2qjps4rtxyiubgfs5enziokey2msjwa', b't3kza5vwx3tlowdemmgdyigp62ju57qduyfh7uulnfkc7mj2ncrq'], ) def test_needed_hashes(self): ht = make_tree(8) self.failUnlessEqual(ht.needed_hashes(0), set([8, 4, 2])) self.failUnlessEqual(ht.needed_hashes(0, True), set([7, 8, 4, 2])) self.failUnlessEqual(ht.needed_hashes(1), set([7, 4, 2])) self.failUnlessEqual(ht.needed_hashes(7), set([13, 5, 1])) self.failUnlessEqual(ht.needed_hashes(7, False), set([13, 5, 1])) self.failUnlessEqual(ht.needed_hashes(7, True), set([14, 13, 5, 1])) def test_dump(self): ht = make_tree(6) expected = [(0,0), (1,1), (3,2), (7,3), (8,3), (4,2), (9,3), (10,3), (2,1), (5,2), (11,3), (12,3), (6,2), (13,3), (14,3), ] self.failUnlessEqual(list(ht.depth_first()), expected) d = "\n" + ht.dump() #print(d) self.failUnless("\n 0:" in d) self.failUnless("\n 1:" in d) self.failUnless("\n 3:" in d) self.failUnless("\n 7:" in d) self.failUnless("\n 8:" in d) self.failUnless("\n 4:" in d) class Incomplete(SyncTestCase): def test_create(self): ht = hashtree.IncompleteHashTree(6) ht = hashtree.IncompleteHashTree(9) ht = hashtree.IncompleteHashTree(8) self.failUnlessEqual(ht[0], None) self.failUnlessEqual(ht.get_leaf(0), None) self.failUnlessRaises(IndexError, ht.get_leaf, 8) self.failUnlessEqual(ht.get_leaf_index(0), 7) def test_needed_hashes(self): ht = hashtree.IncompleteHashTree(8) self.failUnlessEqual(ht.needed_hashes(0), set([8, 4, 2])) self.failUnlessEqual(ht.needed_hashes(0, True), set([7, 8, 4, 2])) self.failUnlessEqual(ht.needed_hashes(1), set([7, 4, 2])) self.failUnlessEqual(ht.needed_hashes(7), set([13, 5, 1])) self.failUnlessEqual(ht.needed_hashes(7, False), set([13, 5, 1])) self.failUnlessEqual(ht.needed_hashes(7, True), set([14, 13, 5, 1])) ht = hashtree.IncompleteHashTree(1) self.failUnlessEqual(ht.needed_hashes(0), set([])) ht = hashtree.IncompleteHashTree(6) self.failUnlessEqual(ht.needed_hashes(0), set([8, 4, 2])) self.failUnlessEqual(ht.needed_hashes(0, True), set([7, 8, 4, 2])) self.failUnlessEqual(ht.needed_hashes(1), set([7, 4, 2])) self.failUnlessEqual(ht.needed_hashes(5), set([11, 6, 1])) self.failUnlessEqual(ht.needed_hashes(5, False), set([11, 6, 1])) self.failUnlessEqual(ht.needed_hashes(5, True), set([12, 11, 6, 1])) def test_depth_of(self): hashtree.IncompleteHashTree(8) self.failUnlessEqual(hashtree.depth_of(0), 0) for i in [1,2]: self.failUnlessEqual(hashtree.depth_of(i), 1, "i=%d"%i) for i in [3,4,5,6]: self.failUnlessEqual(hashtree.depth_of(i), 2, "i=%d"%i) for i in [7,8,9,10,11,12,13,14]: self.failUnlessEqual(hashtree.depth_of(i), 3, "i=%d"%i) def test_large(self): # IncompleteHashTree.set_hashes() used to take O(N**2). This test is # meant to show that it now takes O(N) or maybe O(N*ln(N)). I wish # there were a good way to assert this (like counting VM operations # or something): the problem was inside list.sort(), so there's no # good way to instrument set_hashes() to count what we care about. On # my laptop, 10k leaves takes 1.1s in this fixed version, and 11.6s # in the old broken version. An 80k-leaf test (corresponding to a # 10GB file with a 128KiB segsize) 10s in the fixed version, and # several hours in the broken version, but 10s on my laptop (plus the # 20s of setup code) probably means 200s on our dapper buildslave, # which is painfully long for a unit test. self.do_test_speed(10000) def do_test_speed(self, SIZE): # on my laptop, SIZE=80k (corresponding to a 10GB file with a 128KiB # segsize) takes: # 7s to build the (complete) HashTree # 13s to set up the dictionary # 10s to run set_hashes() ht = make_tree(SIZE) iht = hashtree.IncompleteHashTree(SIZE) needed = set() for i in range(SIZE): needed.update(ht.needed_hashes(i, True)) all = dict([ (i, ht[i]) for i in needed]) iht.set_hashes(hashes=all) def test_check(self): # first create a complete hash tree ht = make_tree(6) # then create a corresponding incomplete tree iht = hashtree.IncompleteHashTree(6) # suppose we wanted to validate leaf[0] # leaf[0] is the same as node[7] self.failUnlessEqual(iht.needed_hashes(0), set([8, 4, 2])) self.failUnlessEqual(iht.needed_hashes(0, True), set([7, 8, 4, 2])) self.failUnlessEqual(iht.needed_hashes(1), set([7, 4, 2])) iht[0] = ht[0] # set the root self.failUnlessEqual(iht.needed_hashes(0), set([8, 4, 2])) self.failUnlessEqual(iht.needed_hashes(1), set([7, 4, 2])) iht[5] = ht[5] self.failUnlessEqual(iht.needed_hashes(0), set([8, 4, 2])) self.failUnlessEqual(iht.needed_hashes(1), set([7, 4, 2])) # reset iht = hashtree.IncompleteHashTree(6) current_hashes = list(iht) # this should fail because there aren't enough hashes known try: iht.set_hashes(leaves={0: tagged_hash(b"tag", b"0")}) except hashtree.NotEnoughHashesError: pass else: self.fail("didn't catch not enough hashes") # and the set of hashes stored in the tree should still be the same self.failUnlessEqual(list(iht), current_hashes) # and we should still need the same self.failUnlessEqual(iht.needed_hashes(0), set([8, 4, 2])) chain = {0: ht[0], 2: ht[2], 4: ht[4], 8: ht[8]} # this should fail because the leaf hash is just plain wrong try: iht.set_hashes(chain, leaves={0: tagged_hash(b"bad tag", b"0")}) except hashtree.BadHashError: pass else: self.fail("didn't catch bad hash") # this should fail because we give it conflicting hashes: one as an # internal node, another as a leaf try: iht.set_hashes(chain, leaves={1: tagged_hash(b"bad tag", b"1")}) except hashtree.BadHashError: pass else: self.fail("didn't catch bad hash") bad_chain = chain.copy() bad_chain[2] = ht[2] + b"BOGUS" # this should fail because the internal hash is wrong try: iht.set_hashes(bad_chain, leaves={0: tagged_hash(b"tag", b"0")}) except hashtree.BadHashError: pass else: self.fail("didn't catch bad hash") # this should succeed try: iht.set_hashes(chain, leaves={0: tagged_hash(b"tag", b"0")}) except hashtree.BadHashError as e: self.fail("bad hash: %s" % e) self.failUnlessEqual(ht.get_leaf(0), tagged_hash(b"tag", b"0")) self.failUnlessRaises(IndexError, ht.get_leaf, 8) # this should succeed too try: iht.set_hashes(leaves={1: tagged_hash(b"tag", b"1")}) except hashtree.BadHashError: self.fail("bad hash") # this should fail because we give it hashes that conflict with some # that we added successfully before try: iht.set_hashes(leaves={1: tagged_hash(b"bad tag", b"1")}) except hashtree.BadHashError: pass else: self.fail("didn't catch bad hash") # now that leaves 0 and 1 are known, some of the internal nodes are # known self.failUnlessEqual(iht.needed_hashes(4), set([12, 6])) chain = {6: ht[6], 12: ht[12]} # this should succeed try: iht.set_hashes(chain, leaves={4: tagged_hash(b"tag", b"4")}) except hashtree.BadHashError as e: self.fail("bad hash: %s" % e) tahoe_lafs-1.20.0/src/allmydata/test/test_hashutil.py0000644000000000000000000002315413615410400017574 0ustar00""" Tests for allmydata.util.hashutil. Ported to Python 3. """ from twisted.trial import unittest from allmydata.util import hashutil, base32 class HashUtilTests(unittest.TestCase): def test_random_key(self): k = hashutil.random_key() self.failUnlessEqual(len(k), hashutil.KEYLEN) self.assertIsInstance(k, bytes) def test_sha256d(self): h1 = hashutil.tagged_hash(b"tag1", b"value") self.assertIsInstance(h1, bytes) h2 = hashutil.tagged_hasher(b"tag1") h2.update(b"value") h2a = h2.digest() h2b = h2.digest() self.assertIsInstance(h2a, bytes) self.failUnlessEqual(h1, h2a) self.failUnlessEqual(h2a, h2b) def test_sha256d_truncated(self): h1 = hashutil.tagged_hash(b"tag1", b"value", 16) h2 = hashutil.tagged_hasher(b"tag1", 16) h2.update(b"value") h2 = h2.digest() self.failUnlessEqual(len(h1), 16) self.failUnlessEqual(len(h2), 16) self.failUnlessEqual(h1, h2) def test_well_known_tagged_hash(self): self.assertEqual( b"yra322btzoqjp4ts2jon5dztgnilcdg6jgztgk7joi6qpjkitg2q", base32.b2a(hashutil.tagged_hash(b"tag", b"hello world")), ) self.assertEqual( b"kfbsfssrv2bvtp3regne6j7gpdjcdjwncewriyfdtt764o5oa7ta", base32.b2a(hashutil.tagged_hash(b"different", b"hello world")), ) self.assertEqual( b"z34pzkgo36chbjz2qykonlxthc4zdqqquapw4bcaoogzvmmcr3zq", base32.b2a(hashutil.tagged_hash(b"different", b"goodbye world")), ) def test_well_known_tagged_pair_hash(self): self.assertEqual( b"wmto44q3shtezwggku2fxztfkwibvznkfu6clatnvfog527sb6dq", base32.b2a(hashutil.tagged_pair_hash(b"tag", b"hello", b"world")), ) self.assertEqual( b"lzn27njx246jhijpendqrxlk4yb23nznbcrihommbymg5e7quh4a", base32.b2a(hashutil.tagged_pair_hash(b"different", b"hello", b"world")), ) self.assertEqual( b"qnehpoypxxdhjheqq7dayloghtu42yr55uylc776zt23ii73o3oq", base32.b2a(hashutil.tagged_pair_hash(b"different", b"goodbye", b"world")), ) def test_chk(self): h1 = hashutil.convergence_hash(3, 10, 1000, b"data", b"secret") h2 = hashutil.convergence_hasher(3, 10, 1000, b"secret") h2.update(b"data") h2 = h2.digest() self.failUnlessEqual(h1, h2) self.assertIsInstance(h1, bytes) self.assertIsInstance(h2, bytes) def test_hashers(self): h1 = hashutil.block_hash(b"foo") h2 = hashutil.block_hasher() h2.update(b"foo") self.failUnlessEqual(h1, h2.digest()) self.assertIsInstance(h1, bytes) h1 = hashutil.uri_extension_hash(b"foo") h2 = hashutil.uri_extension_hasher() h2.update(b"foo") self.failUnlessEqual(h1, h2.digest()) self.assertIsInstance(h1, bytes) h1 = hashutil.plaintext_hash(b"foo") h2 = hashutil.plaintext_hasher() h2.update(b"foo") self.failUnlessEqual(h1, h2.digest()) self.assertIsInstance(h1, bytes) h1 = hashutil.crypttext_hash(b"foo") h2 = hashutil.crypttext_hasher() h2.update(b"foo") self.failUnlessEqual(h1, h2.digest()) self.assertIsInstance(h1, bytes) h1 = hashutil.crypttext_segment_hash(b"foo") h2 = hashutil.crypttext_segment_hasher() h2.update(b"foo") self.failUnlessEqual(h1, h2.digest()) self.assertIsInstance(h1, bytes) h1 = hashutil.plaintext_segment_hash(b"foo") h2 = hashutil.plaintext_segment_hasher() h2.update(b"foo") self.failUnlessEqual(h1, h2.digest()) self.assertIsInstance(h1, bytes) def test_timing_safe_compare(self): self.failUnless(hashutil.timing_safe_compare(b"a", b"a")) self.failUnless(hashutil.timing_safe_compare(b"ab", b"ab")) self.failIf(hashutil.timing_safe_compare(b"a", b"b")) self.failIf(hashutil.timing_safe_compare(b"a", b"aa")) def _testknown(self, hashf, expected_a, *args): got = hashf(*args) self.assertIsInstance(got, bytes) got_a = base32.b2a(got) self.failUnlessEqual(got_a, expected_a) def test_storage_index_hash_known_answers(self): """ Verify backwards compatibility by comparing ``storage_index_hash`` outputs for some well-known (to us) inputs. """ # This is a marginal case. b"" is not a valid aes 128 key. The # implementation does nothing to avoid producing a result for it, # though. self._testknown(hashutil.storage_index_hash, b"qb5igbhcc5esa6lwqorsy7e6am", b"") # This is a little bit more realistic though clearly this is a poor key choice. self._testknown(hashutil.storage_index_hash, b"wvggbrnrezdpa5yayrgiw5nzja", b"x" * 16) # Here's a much more realistic key that I generated by reading some # bytes from /dev/urandom. I computed the expected hash value twice. # First using hashlib.sha256 and then with sha256sum(1). The input # string given to the hash function was "43:," # in each case. self._testknown( hashutil.storage_index_hash, b"aarbseqqrpsfowduchcjbonscq", base32.a2b(b"2ckv3dfzh6rgjis6ogfqhyxnzy"), ) def test_convergence_hasher_tag(self): """ ``_convergence_hasher_tag`` constructs the convergence hasher tag from a unique prefix, the required, total, and segment size parameters, and a convergence secret. """ self.assertEqual( b"allmydata_immutable_content_to_key_with_added_secret_v1+" b"16:\x42\x42\x42\x42\x42\x42\x42\x42\x42\x42\x42\x42\x42\x42\x42\x42," b"9:3,10,1024,", hashutil._convergence_hasher_tag( k=3, n=10, segsize=1024, convergence=b"\x42" * 16, ), ) def test_convergence_hasher_out_of_bounds(self): """ ``_convergence_hasher_tag`` raises ``ValueError`` if k or n is not between 1 and 256 inclusive or if k is greater than n. """ segsize = 1024 secret = b"\x42" * 16 for bad_k in (0, 2, 257): with self.assertRaises(ValueError): hashutil._convergence_hasher_tag( k=bad_k, n=1, segsize=segsize, convergence=secret, ) for bad_n in (0, 1, 257): with self.assertRaises(ValueError): hashutil._convergence_hasher_tag( k=2, n=bad_n, segsize=segsize, convergence=secret, ) def test_known_answers(self): """ Verify backwards compatibility by comparing hash outputs for some well-known (to us) inputs. """ self._testknown(hashutil.block_hash, b"msjr5bh4evuh7fa3zw7uovixfbvlnstr5b65mrerwfnvjxig2jvq", b"") self._testknown(hashutil.uri_extension_hash, b"wthsu45q7zewac2mnivoaa4ulh5xvbzdmsbuyztq2a5fzxdrnkka", b"") self._testknown(hashutil.plaintext_hash, b"5lz5hwz3qj3af7n6e3arblw7xzutvnd3p3fjsngqjcb7utf3x3da", b"") self._testknown(hashutil.crypttext_hash, b"itdj6e4njtkoiavlrmxkvpreosscssklunhwtvxn6ggho4rkqwga", b"") self._testknown(hashutil.crypttext_segment_hash, b"aovy5aa7jej6ym5ikgwyoi4pxawnoj3wtaludjz7e2nb5xijb7aa", b"") self._testknown(hashutil.plaintext_segment_hash, b"4fdgf6qruaisyukhqcmoth4t3li6bkolbxvjy4awwcpprdtva7za", b"") self._testknown(hashutil.convergence_hash, b"3mo6ni7xweplycin6nowynw2we", 3, 10, 100, b"", b"converge") self._testknown(hashutil.my_renewal_secret_hash, b"ujhr5k5f7ypkp67jkpx6jl4p47pyta7hu5m527cpcgvkafsefm6q", b"") self._testknown(hashutil.my_cancel_secret_hash, b"rjwzmafe2duixvqy6h47f5wfrokdziry6zhx4smew4cj6iocsfaa", b"") self._testknown(hashutil.file_renewal_secret_hash, b"hzshk2kf33gzbd5n3a6eszkf6q6o6kixmnag25pniusyaulqjnia", b"", b"si") self._testknown(hashutil.file_cancel_secret_hash, b"bfciwvr6w7wcavsngxzxsxxaszj72dej54n4tu2idzp6b74g255q", b"", b"si") self._testknown(hashutil.bucket_renewal_secret_hash, b"e7imrzgzaoashsncacvy3oysdd2m5yvtooo4gmj4mjlopsazmvuq", b"", b"\x00"*20) self._testknown(hashutil.bucket_cancel_secret_hash, b"dvdujeyxeirj6uux6g7xcf4lvesk632aulwkzjar7srildvtqwma", b"", b"\x00"*20) self._testknown(hashutil.hmac, b"c54ypfi6pevb3nvo6ba42jtglpkry2kbdopqsi7dgrm4r7tw5sra", b"tag", b"") self._testknown(hashutil.mutable_rwcap_key_hash, b"6rvn2iqrghii5n4jbbwwqqsnqu", b"iv", b"wk") self._testknown(hashutil.ssk_writekey_hash, b"ykpgmdbpgbb6yqz5oluw2q26ye", b"") self._testknown(hashutil.ssk_write_enabler_master_hash, b"izbfbfkoait4dummruol3gy2bnixrrrslgye6ycmkuyujnenzpia", b"") self._testknown(hashutil.ssk_write_enabler_hash, b"fuu2dvx7g6gqu5x22vfhtyed7p4pd47y5hgxbqzgrlyvxoev62tq", b"wk", b"\x00"*20) self._testknown(hashutil.ssk_pubkey_fingerprint_hash, b"3opzw4hhm2sgncjx224qmt5ipqgagn7h5zivnfzqycvgqgmgz35q", b"") self._testknown(hashutil.ssk_readkey_hash, b"vugid4as6qbqgeq2xczvvcedai", b"") self._testknown(hashutil.ssk_readkey_data_hash, b"73wsaldnvdzqaf7v4pzbr2ae5a", b"iv", b"rk") self._testknown(hashutil.ssk_storage_index_hash, b"j7icz6kigb6hxrej3tv4z7ayym", b"") self._testknown(hashutil.permute_server_hash, b"kb4354zeeurpo3ze5e275wzbynm6hlap", # b32(expected) b"SI", # peer selection index == storage_index base32.a2b(b"u33m4y7klhz3bypswqkozwetvabelhxt"), # seed ) tahoe_lafs-1.20.0/src/allmydata/test/test_helper.py0000644000000000000000000004410313615410400017227 0ustar00""" Ported to Python 3. """ from __future__ import annotations import os from struct import ( pack, ) from functools import ( partial, ) import attr from twisted.internet import defer from twisted.trial import unittest from twisted.application import service from foolscap.api import Tub, fireEventually, flushEventualQueue from eliot.twisted import ( inline_callbacks, ) from allmydata.introducer.client import IntroducerClient from allmydata.crypto import aes from allmydata.storage.server import ( si_b2a, StorageServer, FoolscapStorageServer, ) from allmydata.storage_client import StorageFarmBroker from allmydata.immutable.layout import ( make_write_bucket_proxy, ) from allmydata.immutable import offloaded, upload from allmydata import uri, client from allmydata.util import hashutil, fileutil, mathutil, dictutil from .no_network import ( NoNetworkServer, LocalWrapper, fireNow, ) from .common import ( EMPTY_CLIENT_CONFIG, SyncTestCase, ) from testtools.matchers import ( Equals, MatchesListwise, IsInstance, ) from testtools.twistedsupport import ( succeeded, ) MiB = 1024*1024 DATA = b"I need help\n" * 1000 class CHKUploadHelper_fake(offloaded.CHKUploadHelper): def start_encrypted(self, eu): d = eu.get_size() def _got_size(size): d2 = eu.get_all_encoding_parameters() def _got_parms(parms): # just pretend we did the upload needed_shares, happy, total_shares, segsize = parms ueb_data = {"needed_shares": needed_shares, "total_shares": total_shares, "segment_size": segsize, "size": size, } ueb_hash = b"fake" v = uri.CHKFileVerifierURI(self._storage_index, b"x"*32, needed_shares, total_shares, size) _UR = upload.UploadResults ur = _UR(file_size=size, ciphertext_fetched=0, preexisting_shares=0, pushed_shares=total_shares, sharemap={}, servermap={}, timings={}, uri_extension_data=ueb_data, uri_extension_hash=ueb_hash, verifycapstr=v.to_string()) self._upload_status.set_results(ur) return ur d2.addCallback(_got_parms) return d2 d.addCallback(_got_size) return d @attr.s class FakeCHKCheckerAndUEBFetcher(object): """ A fake of ``CHKCheckerAndUEBFetcher`` which hard-codes some check result. """ peer_getter = attr.ib() storage_index = attr.ib() logparent = attr.ib() _sharemap = attr.ib() _ueb_data = attr.ib() @property def _ueb_hash(self): return hashutil.uri_extension_hash( uri.pack_extension(self._ueb_data), ) def check(self): return defer.succeed(( self._sharemap, self._ueb_data, self._ueb_hash, )) class FakeClient(service.MultiService): introducer_clients : list[IntroducerClient] = [] DEFAULT_ENCODING_PARAMETERS = {"k":25, "happy": 75, "n": 100, "max_segment_size": 1*MiB, } def get_encoding_parameters(self): return self.DEFAULT_ENCODING_PARAMETERS def get_storage_broker(self): return self.storage_broker def flush_but_dont_ignore(res): d = flushEventualQueue() def _done(ignored): return res d.addCallback(_done) return d def wait_a_few_turns(ignored=None): d = fireEventually() d.addCallback(fireEventually) d.addCallback(fireEventually) d.addCallback(fireEventually) d.addCallback(fireEventually) d.addCallback(fireEventually) return d def upload_data(uploader, data, convergence): u = upload.Data(data, convergence=convergence) return uploader.upload(u) def make_uploader(helper_furl, parent, override_name=None): """ Make an ``upload.Uploader`` service pointed at the given helper and with the given service parent. :param bytes helper_furl: The Foolscap URL of the upload helper. :param IServiceCollection parent: A parent to assign to the new uploader. :param str override_name: If not ``None``, a new name for the uploader service. Multiple services cannot coexist with the same name. """ u = upload.Uploader(helper_furl) if override_name is not None: u.name = override_name u.setServiceParent(parent) return u class AssistedUpload(unittest.TestCase): def setUp(self): self.tub = t = Tub() t.setOption("expose-remote-exception-types", False) self.s = FakeClient() self.s.storage_broker = StorageFarmBroker( True, lambda h: self.tub, EMPTY_CLIENT_CONFIG, ) self.s.secret_holder = client.SecretHolder(b"lease secret", b"converge") self.s.startService() t.setServiceParent(self.s) self.s.tub = t # we never actually use this for network traffic, so it can use a # bogus host/port t.setLocation(b"bogus:1234") def setUpHelper(self, basedir, chk_upload=CHKUploadHelper_fake, chk_checker=None): fileutil.make_dirs(basedir) self.helper = offloaded.Helper( basedir, self.s.storage_broker, self.s.secret_holder, None, None, ) if chk_upload is not None: self.helper.chk_upload = chk_upload if chk_checker is not None: self.helper.chk_checker = chk_checker self.helper_furl = self.tub.registerReference(self.helper) def tearDown(self): d = self.s.stopService() d.addCallback(fireEventually) d.addBoth(flush_but_dont_ignore) return d def test_one(self): """ Some data that has never been uploaded before can be uploaded in CHK format using the ``RIHelper`` provider and ``Uploader.upload``. """ self.basedir = "helper/AssistedUpload/test_one" self.setUpHelper(self.basedir) u = make_uploader(self.helper_furl, self.s) d = wait_a_few_turns() def _ready(res): self.assertTrue( u._helper, "Expected uploader to have a helper reference, had {} instead.".format( u._helper, ), ) return upload_data(u, DATA, convergence=b"some convergence string") d.addCallback(_ready) def _uploaded(results): the_uri = results.get_uri() self.assertIn(b"CHK", the_uri) self.assertNotEqual( results.get_pushed_shares(), 0, ) d.addCallback(_uploaded) def _check_empty(res): # Make sure the intermediate artifacts aren't left lying around. files = os.listdir(os.path.join(self.basedir, "CHK_encoding")) self.assertEqual(files, []) files = os.listdir(os.path.join(self.basedir, "CHK_incoming")) self.assertEqual(files, []) d.addCallback(_check_empty) return d @inline_callbacks def test_concurrent(self): """ The same data can be uploaded by more than one ``Uploader`` at a time. """ self.basedir = "helper/AssistedUpload/test_concurrent" self.setUpHelper(self.basedir) u1 = make_uploader(self.helper_furl, self.s, "u1") u2 = make_uploader(self.helper_furl, self.s, "u2") yield wait_a_few_turns() for u in [u1, u2]: self.assertTrue( u._helper, "Expected uploader to have a helper reference, had {} instead.".format( u._helper, ), ) uploads = list( upload_data(u, DATA, convergence=b"some convergence string") for u in [u1, u2] ) result1, result2 = yield defer.gatherResults(uploads) self.assertEqual( result1.get_uri(), result2.get_uri(), ) # It would be really cool to assert that result1.get_pushed_shares() + # result2.get_pushed_shares() == total_shares here. However, we're # faking too much for that to be meaningful here. Also it doesn't # hold because we don't actually push _anything_, we just lie about # having pushed stuff. def test_previous_upload_failed(self): self.basedir = "helper/AssistedUpload/test_previous_upload_failed" self.setUpHelper(self.basedir) # we want to make sure that an upload which fails (leaving the # ciphertext in the CHK_encoding/ directory) does not prevent a later # attempt to upload that file from working. We simulate this by # populating the directory manually. The hardest part is guessing the # storage index. k = FakeClient.DEFAULT_ENCODING_PARAMETERS["k"] n = FakeClient.DEFAULT_ENCODING_PARAMETERS["n"] max_segsize = FakeClient.DEFAULT_ENCODING_PARAMETERS["max_segment_size"] segsize = min(max_segsize, len(DATA)) # this must be a multiple of 'required_shares'==k segsize = mathutil.next_multiple(segsize, k) key = hashutil.convergence_hash(k, n, segsize, DATA, b"test convergence string") assert len(key) == 16 encryptor = aes.create_encryptor(key) SI = hashutil.storage_index_hash(key) SI_s = str(si_b2a(SI), "utf-8") encfile = os.path.join(self.basedir, "CHK_encoding", SI_s) f = open(encfile, "wb") f.write(aes.encrypt_data(encryptor, DATA)) f.close() u = make_uploader(self.helper_furl, self.s) d = wait_a_few_turns() def _ready(res): assert u._helper return upload_data(u, DATA, convergence=b"test convergence string") d.addCallback(_ready) def _uploaded(results): the_uri = results.get_uri() assert b"CHK" in the_uri d.addCallback(_uploaded) def _check_empty(res): files = os.listdir(os.path.join(self.basedir, "CHK_encoding")) self.failUnlessEqual(files, []) files = os.listdir(os.path.join(self.basedir, "CHK_incoming")) self.failUnlessEqual(files, []) d.addCallback(_check_empty) return d @inline_callbacks def test_already_uploaded(self): """ If enough shares to satisfy the needed parameter already exist, the upload succeeds without pushing any shares. """ params = FakeClient.DEFAULT_ENCODING_PARAMETERS chk_checker = partial( FakeCHKCheckerAndUEBFetcher, sharemap=dictutil.DictOfSets({ 0: {b"server0"}, 1: {b"server1"}, }), ueb_data={ "size": len(DATA), "segment_size": min(params["max_segment_size"], len(DATA)), "needed_shares": params["k"], "total_shares": params["n"], }, ) self.basedir = "helper/AssistedUpload/test_already_uploaded" self.setUpHelper( self.basedir, chk_checker=chk_checker, ) u = make_uploader(self.helper_furl, self.s) yield wait_a_few_turns() assert u._helper results = yield upload_data(u, DATA, convergence=b"some convergence string") the_uri = results.get_uri() assert b"CHK" in the_uri files = os.listdir(os.path.join(self.basedir, "CHK_encoding")) self.failUnlessEqual(files, []) files = os.listdir(os.path.join(self.basedir, "CHK_incoming")) self.failUnlessEqual(files, []) self.assertEqual( results.get_pushed_shares(), 0, ) class CHKCheckerAndUEBFetcherTests(SyncTestCase): """ Tests for ``CHKCheckerAndUEBFetcher``. """ def test_check_no_peers(self): """ If the supplied "peer getter" returns no peers then ``CHKCheckerAndUEBFetcher.check`` returns a ``Deferred`` that fires with ``False``. """ storage_index = b"a" * 16 peers = {storage_index: []} caf = offloaded.CHKCheckerAndUEBFetcher( peers.get, storage_index, None, ) self.assertThat( caf.check(), succeeded(Equals(False)), ) @inline_callbacks def test_check_ueb_unavailable(self): """ If the UEB cannot be read from any of the peers supplied by the "peer getter" then ``CHKCheckerAndUEBFetcher.check`` returns a ``Deferred`` that fires with ``False``. """ storage_index = b"a" * 16 serverid = b"b" * 20 storage = FoolscapStorageServer(StorageServer(self.mktemp(), serverid)) rref_without_ueb = LocalWrapper(storage, fireNow) yield write_bad_share(rref_without_ueb, storage_index) server_without_ueb = NoNetworkServer(serverid, rref_without_ueb) peers = {storage_index: [server_without_ueb]} caf = offloaded.CHKCheckerAndUEBFetcher( peers.get, storage_index, None, ) self.assertThat( caf.check(), succeeded(Equals(False)), ) @inline_callbacks def test_not_enough_shares(self): """ If fewer shares are found than are required to reassemble the data then ``CHKCheckerAndUEBFetcher.check`` returns a ``Deferred`` that fires with ``False``. """ storage_index = b"a" * 16 serverid = b"b" * 20 storage = FoolscapStorageServer(StorageServer(self.mktemp(), serverid)) rref_with_ueb = LocalWrapper(storage, fireNow) ueb = { "needed_shares": 2, "total_shares": 2, "segment_size": 128 * 1024, "size": 1024, } yield write_good_share(rref_with_ueb, storage_index, ueb, [0]) server_with_ueb = NoNetworkServer(serverid, rref_with_ueb) peers = {storage_index: [server_with_ueb]} caf = offloaded.CHKCheckerAndUEBFetcher( peers.get, storage_index, None, ) self.assertThat( caf.check(), succeeded(Equals(False)), ) @inline_callbacks def test_enough_shares(self): """ If enough shares are found to reassemble the data then ``CHKCheckerAndUEBFetcher.check`` returns a ``Deferred`` that fires with share and share placement information. """ storage_index = b"a" * 16 serverids = list( ch * 20 for ch in [b"b", b"c"] ) storages = list( FoolscapStorageServer(StorageServer(self.mktemp(), serverid)) for serverid in serverids ) rrefs_with_ueb = list( LocalWrapper(storage, fireNow) for storage in storages ) ueb = { "needed_shares": len(serverids), "total_shares": len(serverids), "segment_size": 128 * 1024, "size": 1024, } for n, rref_with_ueb in enumerate(rrefs_with_ueb): yield write_good_share(rref_with_ueb, storage_index, ueb, [n]) servers_with_ueb = list( NoNetworkServer(serverid, rref_with_ueb) for (serverid, rref_with_ueb) in zip(serverids, rrefs_with_ueb) ) peers = {storage_index: servers_with_ueb} caf = offloaded.CHKCheckerAndUEBFetcher( peers.get, storage_index, None, ) self.assertThat( caf.check(), succeeded(MatchesListwise([ Equals({ n: {serverid} for (n, serverid) in enumerate(serverids) }), Equals(ueb), IsInstance(bytes), ])), ) def write_bad_share(storage_rref, storage_index): """ Write a share with a corrupt URI extension block. """ # Write some trash to the right bucket on this storage server. It won't # have a recoverable UEB block. return write_share(storage_rref, storage_index, [0], b"\0" * 1024) def write_good_share(storage_rref, storage_index, ueb, sharenums): """ Write a valid share with the given URI extension block. """ write_proxy = make_write_bucket_proxy( storage_rref, None, 1024, ueb["segment_size"], 1, 1, ueb["size"], ) # See allmydata/immutable/layout.py offset = write_proxy._offsets["uri_extension"] filler = b"\0" * (offset - len(write_proxy._offset_data)) ueb_data = uri.pack_extension(ueb) data = ( write_proxy._offset_data + filler + pack(write_proxy.fieldstruct, len(ueb_data)) + ueb_data ) return write_share(storage_rref, storage_index, sharenums, data) @inline_callbacks def write_share(storage_rref, storage_index, sharenums, sharedata): """ Write the given share data to the given storage index using the given IStorageServer remote reference. :param foolscap.ipb.IRemoteReference storage_rref: A remote reference to an IStorageServer. :param bytes storage_index: The storage index to which to write the share data. :param [int] sharenums: The share numbers to which to write this sharedata. :param bytes sharedata: The ciphertext to write as the share. """ ignored, writers = yield storage_rref.callRemote( "allocate_buckets", storage_index, b"x" * 16, b"x" * 16, sharenums, len(sharedata), LocalWrapper(None), ) [writer] = writers.values() yield writer.callRemote("write", 0, sharedata) yield writer.callRemote("close") tahoe_lafs-1.20.0/src/allmydata/test/test_humanreadable.py0000644000000000000000000000376713615410400020553 0ustar00""" Tests for allmydata.util.humanreadable. This module has been ported to Python 3. """ from twisted.trial import unittest from allmydata.util import humanreadable def foo(): pass # FYI foo()'s line number is used in the test below class NoArgumentException(Exception): def __init__(self): pass class HumanReadable(unittest.TestCase): def test_repr(self): hr = humanreadable.hr # we match on regex so this test isn't fragile about line-numbers self.assertRegex(hr(foo), r"") self.failUnlessEqual(hr(self.test_repr), ">") self.failUnlessEqual(hr(1), "1") self.assertIn(hr(10**40), ["100000000000000000...000000000000000000", "100000000000000000...0000000000000000000"]) self.failUnlessEqual(hr(self), "") self.failUnlessEqual(hr([1,2]), "[1, 2]") self.failUnlessEqual(hr({1:2}), "{1:2}") try: raise ValueError except Exception as e: self.failUnless( hr(e) == "" # python-2.4 or hr(e) == "ValueError()") # python-2.5 try: raise ValueError("oops") except Exception as e: self.failUnless( hr(e) == "" # python-2.4 or hr(e) == "ValueError('oops',)" # python-2.5 or hr(e) == "ValueError(u'oops',)" # python 2 during py3 transition ) try: raise NoArgumentException except Exception as e: self.failUnless( hr(e) == "" # python-2.4 or hr(e) == "NoArgumentException()" # python-2.5 or hr(e) == "", hr(e)) # python-3 tahoe_lafs-1.20.0/src/allmydata/test/test_hung_server.py0000644000000000000000000003533113615410400020302 0ustar00# -*- coding: utf-8 -*- """ Ported to Python 3. """ import os, shutil from twisted.trial import unittest from twisted.internet import defer from allmydata import uri from allmydata.util.consumer import download_to_data from allmydata.immutable import upload from allmydata.mutable.common import UnrecoverableFileError from allmydata.mutable.publish import MutableData from allmydata.storage.common import storage_index_to_dir from allmydata.test.no_network import GridTestMixin from allmydata.test.common import ShouldFailMixin from allmydata.util.pollmixin import PollMixin from allmydata.interfaces import NotEnoughSharesError immutable_plaintext = b"data" * 10000 mutable_plaintext = b"muta" * 10000 class HungServerDownloadTest(GridTestMixin, ShouldFailMixin, PollMixin, unittest.TestCase): def _break(self, servers): for (id, ss) in servers: self.g.break_server(id) def _hang(self, servers, **kwargs): for (id, ss) in servers: self.g.hang_server(id, **kwargs) def _unhang(self, servers, **kwargs): for (id, ss) in servers: self.g.unhang_server(id, **kwargs) def _hang_shares(self, shnums, **kwargs): # hang all servers who are holding the given shares hung_serverids = set() for (i_shnum, i_serverid, i_sharefile) in self.shares: if i_shnum in shnums: if i_serverid not in hung_serverids: self.g.hang_server(i_serverid, **kwargs) hung_serverids.add(i_serverid) def _delete_all_shares_from(self, servers): serverids = [id for (id, ss) in servers] for (i_shnum, i_serverid, i_sharefile) in self.shares: if i_serverid in serverids: os.unlink(i_sharefile) def _corrupt_all_shares_in(self, servers, corruptor_func): serverids = [id for (id, ss) in servers] for (i_shnum, i_serverid, i_sharefile) in self.shares: if i_serverid in serverids: self._corrupt_share((i_shnum, i_sharefile), corruptor_func) def _copy_all_shares_from(self, from_servers, to_server): serverids = [id for (id, ss) in from_servers] for (i_shnum, i_serverid, i_sharefile) in self.shares: if i_serverid in serverids: self._copy_share((i_shnum, i_sharefile), to_server) def _copy_share(self, share, to_server): (sharenum, sharefile) = share (id, ss) = to_server shares_dir = os.path.join(ss.original._server.storedir, "shares") si = uri.from_string(self.uri).get_storage_index() si_dir = os.path.join(shares_dir, storage_index_to_dir(si)) if not os.path.exists(si_dir): os.makedirs(si_dir) new_sharefile = os.path.join(si_dir, str(sharenum)) shutil.copy(sharefile, new_sharefile) self.shares = self.find_uri_shares(self.uri) # Make sure that the storage server has the share. self.failUnless((sharenum, ss.original._server.my_nodeid, new_sharefile) in self.shares) def _corrupt_share(self, share, corruptor_func): (sharenum, sharefile) = share data = open(sharefile, "rb").read() newdata = corruptor_func(data) os.unlink(sharefile) wf = open(sharefile, "wb") wf.write(newdata) wf.close() def _set_up(self, mutable, testdir, num_clients=1, num_servers=10): self.mutable = mutable if mutable: self.basedir = "hung_server/mutable_" + testdir else: self.basedir = "hung_server/immutable_" + testdir self.set_up_grid(num_clients=num_clients, num_servers=num_servers) self.c0 = self.g.clients[0] nm = self.c0.nodemaker self.servers = sorted([(s.get_serverid(), s.get_rref()) for s in nm.storage_broker.get_connected_servers()]) self.servers = self.servers[5:] + self.servers[:5] if mutable: uploadable = MutableData(mutable_plaintext) d = nm.create_mutable_file(uploadable) def _uploaded_mutable(node): self.uri = node.get_uri() self.shares = self.find_uri_shares(self.uri) d.addCallback(_uploaded_mutable) else: data = upload.Data(immutable_plaintext, convergence=b"") d = self.c0.upload(data) def _uploaded_immutable(upload_res): self.uri = upload_res.get_uri() self.shares = self.find_uri_shares(self.uri) d.addCallback(_uploaded_immutable) return d def _start_download(self): n = self.c0.create_node_from_uri(self.uri) if self.mutable: d = n.download_best_version() else: d = download_to_data(n) return d def _wait_for_data(self, n): if self.mutable: d = n.download_best_version() else: d = download_to_data(n) return d def _check(self, resultingdata): if self.mutable: self.failUnlessEqual(resultingdata, mutable_plaintext) else: self.failUnlessEqual(resultingdata, immutable_plaintext) def _download_and_check(self): d = self._start_download() d.addCallback(self._check) return d def _should_fail_download(self): if self.mutable: return self.shouldFail(UnrecoverableFileError, self.basedir, "no recoverable versions", self._download_and_check) else: return self.shouldFail(NotEnoughSharesError, self.basedir, "ran out of shares", self._download_and_check) def test_10_good_sanity_check(self): d = defer.succeed(None) for mutable in [False, True]: d.addCallback(lambda ign, mutable=mutable: self._set_up(mutable, "test_10_good_sanity_check")) d.addCallback(lambda ign: self._download_and_check()) return d def test_10_good_copied_share(self): d = defer.succeed(None) for mutable in [False, True]: d.addCallback(lambda ign, mutable=mutable: self._set_up(mutable, "test_10_good_copied_share")) d.addCallback(lambda ign: self._copy_all_shares_from(self.servers[2:3], self.servers[0])) d.addCallback(lambda ign: self._download_and_check()) return d def test_3_good_7_noshares(self): d = defer.succeed(None) for mutable in [False, True]: d.addCallback(lambda ign, mutable=mutable: self._set_up(mutable, "test_3_good_7_noshares")) d.addCallback(lambda ign: self._delete_all_shares_from(self.servers[3:])) d.addCallback(lambda ign: self._download_and_check()) return d def test_2_good_8_broken_fail(self): d = defer.succeed(None) for mutable in [False, True]: d.addCallback(lambda ign, mutable=mutable: self._set_up(mutable, "test_2_good_8_broken_fail")) d.addCallback(lambda ign: self._break(self.servers[2:])) d.addCallback(lambda ign: self._should_fail_download()) return d def test_2_good_8_noshares_fail(self): d = defer.succeed(None) for mutable in [False, True]: d.addCallback(lambda ign, mutable=mutable: self._set_up(mutable, "test_2_good_8_noshares_fail")) d.addCallback(lambda ign: self._delete_all_shares_from(self.servers[2:])) d.addCallback(lambda ign: self._should_fail_download()) return d def test_2_good_8_broken_copied_share(self): d = defer.succeed(None) for mutable in [False, True]: d.addCallback(lambda ign, mutable=mutable: self._set_up(mutable, "test_2_good_8_broken_copied_share")) d.addCallback(lambda ign: self._copy_all_shares_from(self.servers[2:3], self.servers[0])) d.addCallback(lambda ign: self._break(self.servers[2:])) d.addCallback(lambda ign: self._download_and_check()) return d def test_2_good_8_broken_duplicate_share_fail(self): d = defer.succeed(None) for mutable in [False, True]: d.addCallback(lambda ign, mutable=mutable: self._set_up(mutable, "test_2_good_8_broken_duplicate_share_fail")) d.addCallback(lambda ign: self._copy_all_shares_from(self.servers[1:2], self.servers[0])) d.addCallback(lambda ign: self._break(self.servers[2:])) d.addCallback(lambda ign: self._should_fail_download()) return d def test_3_good_7_hung_immutable(self): d = defer.succeed(None) d.addCallback(lambda ign: self._set_up(False, "test_3_good_7_hung")) d.addCallback(lambda ign: self._hang(self.servers[3:])) d.addCallback(lambda ign: self._download_and_check()) return d def test_5_overdue_immutable(self): # restrict the ShareFinder to only allow 5 outstanding requests, and # arrange for the first 5 servers to hang. Then trigger the OVERDUE # timers (simulating 10 seconds passed), at which point the # ShareFinder should send additional queries and finish the download # quickly. If we didn't have OVERDUE timers, this test would fail by # timing out. done = [] d = self._set_up(False, "test_5_overdue_immutable") def _reduce_max_outstanding_requests_and_download(ign): # we need to hang the first 5 servers, so we have to # figure out where the shares were placed. si = uri.from_string(self.uri).get_storage_index() placed = self.c0.storage_broker.get_servers_for_psi(si) self._hang([(s.get_serverid(), s) for s in placed[:5]]) n = self.c0.create_node_from_uri(self.uri) n._cnode._maybe_create_download_node() self._sf = n._cnode._node._sharefinder self._sf.max_outstanding_requests = 5 self._sf.OVERDUE_TIMEOUT = 1000.0 d2 = download_to_data(n) # start download, but don't wait for it to complete yet def _done(res): done.append(res) # we will poll for this later d2.addBoth(_done) d.addCallback(_reduce_max_outstanding_requests_and_download) from foolscap.eventual import fireEventually, flushEventualQueue # wait here a while d.addCallback(lambda res: fireEventually(res)) d.addCallback(lambda res: flushEventualQueue()) d.addCallback(lambda ign: self.failIf(done)) def _check_waiting(ign): # all the share requests should now be stuck waiting self.failUnlessEqual(len(self._sf.pending_requests), 5) # but none should be marked as OVERDUE until the timers expire self.failUnlessEqual(len(self._sf.overdue_requests), 0) d.addCallback(_check_waiting) def _mark_overdue(ign): # declare four requests overdue, allowing new requests to take # their place, and leaving one stuck. The finder will keep # sending requests until there are 5 non-overdue ones # outstanding, at which point we'll have 4 OVERDUE, 1 # stuck-but-not-overdue, and 4 live requests. All 4 live requests # will retire before the download is complete and the ShareFinder # is shut off. That will leave 4 OVERDUE and 1 # stuck-but-not-overdue, for a total of 5 requests in in # _sf.pending_requests for t in list(self._sf.overdue_timers.values())[:4]: t.reset(-1.0) # the timers ought to fire before the eventual-send does return fireEventually() d.addCallback(_mark_overdue) def _we_are_done(): return bool(done) d.addCallback(lambda ign: self.poll(_we_are_done)) def _check_done(ign): self.failUnlessEqual(done, [immutable_plaintext]) self.failUnlessEqual(len(self._sf.pending_requests), 5) self.failUnlessEqual(len(self._sf.overdue_requests), 4) d.addCallback(_check_done) return d def test_2_good_8_hung_then_1_recovers_immutable(self): d = defer.succeed(None) d.addCallback(lambda ign: self._set_up(False, "test_2_good_8_hung_then_1_recovers")) d.addCallback(lambda ign: self._hang(self.servers[2:3])) d.addCallback(lambda ign: self._hang(self.servers[3:])) d.addCallback(lambda ign: self._unhang(self.servers[2:3])) d.addCallback(lambda ign: self._download_and_check()) return d def test_2_good_8_hung_then_1_recovers_with_2_shares_immutable(self): d = defer.succeed(None) d.addCallback(lambda ign: self._set_up(False, "test_2_good_8_hung_then_1_recovers_with_2_shares")) d.addCallback(lambda ign: self._copy_all_shares_from(self.servers[0:1], self.servers[2])) d.addCallback(lambda ign: self._hang(self.servers[2:3])) d.addCallback(lambda ign: self._hang(self.servers[3:])) d.addCallback(lambda ign: self._unhang(self.servers[2:3])) d.addCallback(lambda ign: self._download_and_check()) return d # The tests below do not currently pass for mutable files. The # mutable-file downloader does not yet handle hung servers, and the tests # hang forever (hence the use of SkipTest rather than .todo) def test_3_good_7_hung_mutable(self): raise unittest.SkipTest("still broken") d = defer.succeed(None) d.addCallback(lambda ign: self._set_up(True, "test_3_good_7_hung")) d.addCallback(lambda ign: self._hang(self.servers[3:])) d.addCallback(lambda ign: self._download_and_check()) return d def test_2_good_8_hung_then_1_recovers_mutable(self): raise unittest.SkipTest("still broken") d = defer.succeed(None) d.addCallback(lambda ign: self._set_up(True, "test_2_good_8_hung_then_1_recovers")) d.addCallback(lambda ign: self._hang(self.servers[2:3])) d.addCallback(lambda ign: self._hang(self.servers[3:])) d.addCallback(lambda ign: self._unhang(self.servers[2:3])) d.addCallback(lambda ign: self._download_and_check()) return d def test_2_good_8_hung_then_1_recovers_with_2_shares_mutable(self): raise unittest.SkipTest("still broken") d = defer.succeed(None) d.addCallback(lambda ign: self._set_up(True, "test_2_good_8_hung_then_1_recovers_with_2_shares")) d.addCallback(lambda ign: self._copy_all_shares_from(self.servers[0:1], self.servers[2])) d.addCallback(lambda ign: self._hang(self.servers[2:3])) d.addCallback(lambda ign: self._hang(self.servers[3:])) d.addCallback(lambda ign: self._unhang(self.servers[2:3])) d.addCallback(lambda ign: self._download_and_check()) return d tahoe_lafs-1.20.0/src/allmydata/test/test_i2p_provider.py0000644000000000000000000003720713615410400020363 0ustar00""" Ported to Python 3. """ import os from twisted.trial import unittest from twisted.internet import defer, error from twisted.python.usage import UsageError from io import StringIO from unittest import mock from ..util import i2p_provider from ..scripts import create_node, runner def mock_txi2p(txi2p): return mock.patch("allmydata.util.i2p_provider._import_txi2p", return_value=txi2p) def mock_i2p(i2p): return mock.patch("allmydata.util.i2p_provider._import_i2p", return_value=i2p) def make_cli_config(basedir, *argv): parent = runner.Options() cli_config = create_node.CreateNodeOptions() cli_config.parent = parent cli_config.parseOptions(argv) cli_config["basedir"] = basedir cli_config.stdout = StringIO() return cli_config class TryToConnect(unittest.TestCase): def test_try(self): reactor = object() txi2p = mock.Mock() d = defer.succeed(True) txi2p.testAPI = mock.Mock(return_value=d) ep = object() stdout = StringIO() with mock.patch("allmydata.util.i2p_provider.clientFromString", return_value=ep) as cfs: d = i2p_provider._try_to_connect(reactor, "desc", stdout, txi2p) r = self.successResultOf(d) self.assertTrue(r) cfs.assert_called_with(reactor, "desc") txi2p.testAPI.assert_called_with(reactor, 'SAM', ep) def test_try_handled_error(self): reactor = object() txi2p = mock.Mock() d = defer.fail(error.ConnectError("oops")) txi2p.testAPI = mock.Mock(return_value=d) ep = object() stdout = StringIO() with mock.patch("allmydata.util.i2p_provider.clientFromString", return_value=ep) as cfs: d = i2p_provider._try_to_connect(reactor, "desc", stdout, txi2p) r = self.successResultOf(d) self.assertIs(r, None) cfs.assert_called_with(reactor, "desc") txi2p.testAPI.assert_called_with(reactor, 'SAM', ep) self.assertEqual(stdout.getvalue(), "Unable to reach I2P SAM API at 'desc': " "An error occurred while connecting: oops.\n") def test_try_unhandled_error(self): reactor = object() txi2p = mock.Mock() d = defer.fail(ValueError("oops")) txi2p.testAPI = mock.Mock(return_value=d) ep = object() stdout = StringIO() with mock.patch("allmydata.util.i2p_provider.clientFromString", return_value=ep) as cfs: d = i2p_provider._try_to_connect(reactor, "desc", stdout, txi2p) f = self.failureResultOf(d) self.assertIsInstance(f.value, ValueError) self.assertEqual(str(f.value), "oops") cfs.assert_called_with(reactor, "desc") txi2p.testAPI.assert_called_with(reactor, 'SAM', ep) self.assertEqual(stdout.getvalue(), "") class ConnectToI2P(unittest.TestCase): def _do_test_connect(self, endpoint, reachable): reactor = object() txi2p = object() args = [] if endpoint: args = ["--i2p-sam-port=%s" % endpoint] cli_config = make_cli_config("basedir", "--listen=i2p", *args) stdout = cli_config.stdout expected_port = "tcp:127.0.0.1:7656" if endpoint: expected_port = endpoint tried = [] def _try_to_connect(reactor, port, stdout, txi2p): tried.append( (reactor, port, stdout, txi2p) ) if not reachable: return defer.succeed(None) if port == expected_port: return defer.succeed(True) return defer.succeed(None) with mock.patch("allmydata.util.i2p_provider._try_to_connect", _try_to_connect): d = i2p_provider._connect_to_i2p(reactor, cli_config, txi2p) if not reachable: f = self.failureResultOf(d) self.assertIsInstance(f.value, ValueError) self.assertEqual(str(f.value), "unable to reach any default I2P SAM port") return successful_port = self.successResultOf(d) self.assertEqual(successful_port, expected_port) expected = [(reactor, "tcp:127.0.0.1:7656", stdout, txi2p)] if endpoint: expected = [(reactor, endpoint, stdout, txi2p)] self.assertEqual(tried, expected) def test_connect(self): return self._do_test_connect(None, True) def test_connect_endpoint(self): return self._do_test_connect("tcp:other:port", True) def test_connect_unreachable(self): return self._do_test_connect(None, False) class CreateDest(unittest.TestCase): def test_no_txi2p(self): with mock.patch("allmydata.util.i2p_provider._import_txi2p", return_value=None): d = i2p_provider.create_config("reactor", "cli_config") f = self.failureResultOf(d) self.assertIsInstance(f.value, ValueError) self.assertEqual(str(f.value), "Cannot create I2P Destination without txi2p. " "Please 'pip install tahoe-lafs[i2p]' to fix this.") def _do_test_launch(self, executable): basedir = self.mktemp() os.mkdir(basedir) args = ["--listen=i2p", "--i2p-launch"] if executable: args.append("--i2p-executable=%s" % executable) self.assertRaises(UsageError, make_cli_config, basedir, *args) def test_launch(self): return self._do_test_launch(None) def test_launch_executable(self): return self._do_test_launch("myi2p") def test_sam_endpoint(self): basedir = self.mktemp() os.mkdir(basedir) private_dir = os.path.join(basedir, "private") os.mkdir(private_dir) privkeyfile = os.path.abspath(os.path.join(private_dir, "i2p_dest.privkey")) reactor = object() cli_config = make_cli_config(basedir, "--listen=i2p") connect_to_i2p = mock.Mock(return_value=defer.succeed("goodport")) txi2p = mock.Mock() ep = object() dest = mock.Mock() dest.host = "FOOBAR.b32.i2p" txi2p.generateDestination = mock.Mock(return_value=defer.succeed(dest)) with mock_txi2p(txi2p): with mock.patch("allmydata.util.i2p_provider._connect_to_i2p", connect_to_i2p): with mock.patch("allmydata.util.i2p_provider.clientFromString", return_value=ep) as cfs: d = i2p_provider.create_config(reactor, cli_config) i2p_config = self.successResultOf(d) connect_to_i2p.assert_called_with(reactor, cli_config, txi2p) cfs.assert_called_with(reactor, "goodport") txi2p.generateDestination.assert_called_with(reactor, privkeyfile, 'SAM', ep) expected = {"sam.port": "goodport", "dest": "true", "dest.port": "3457", "dest.private_key_file": os.path.join("private", "i2p_dest.privkey"), } self.assertEqual(dict(i2p_config.node_config["i2p"]), expected) self.assertEqual(i2p_config.tub_ports, ["listen:i2p"]) self.assertEqual(i2p_config.tub_locations, ["i2p:FOOBAR.b32.i2p:3457"]) _None = object() class FakeConfig(dict): def get_config(self, section, option, default=_None, boolean=False): if section != "i2p": raise ValueError(section) value = self.get(option, default) if value is _None: raise KeyError return value class Provider(unittest.TestCase): def test_build(self): i2p_provider.create("reactor", FakeConfig()) def test_handler_disabled(self): p = i2p_provider.create("reactor", FakeConfig(enabled=False)) self.assertEqual(p.get_i2p_handler(), None) def test_handler_no_i2p(self): with mock_i2p(None): p = i2p_provider.create("reactor", FakeConfig()) self.assertEqual(p.get_i2p_handler(), None) def test_handler_sam_endpoint(self): i2p = mock.Mock() handler = object() i2p.sam_endpoint = mock.Mock(return_value=handler) ep = object() reactor = object() with mock_i2p(i2p): p = i2p_provider.create(reactor, FakeConfig(**{"sam.port": "ep_desc"})) with mock.patch("allmydata.util.i2p_provider.clientFromString", return_value=ep) as cfs: h = p.get_i2p_handler() cfs.assert_called_with(reactor, "ep_desc") self.assertIs(h, handler) i2p.sam_endpoint.assert_called_with(ep, keyfile=None) def test_handler_launch(self): i2p = mock.Mock() handler = object() i2p.launch = mock.Mock(return_value=handler) reactor = object() with mock_i2p(i2p): p = i2p_provider.create(reactor, FakeConfig(launch=True)) h = p.get_i2p_handler() self.assertIs(h, handler) i2p.launch.assert_called_with(i2p_configdir=None, i2p_binary=None) def test_handler_launch_configdir(self): i2p = mock.Mock() handler = object() i2p.launch = mock.Mock(return_value=handler) reactor = object() with mock_i2p(i2p): p = i2p_provider.create(reactor, FakeConfig(launch=True, **{"i2p.configdir": "configdir"})) h = p.get_i2p_handler() self.assertIs(h, handler) i2p.launch.assert_called_with(i2p_configdir="configdir", i2p_binary=None) def test_handler_launch_configdir_executable(self): i2p = mock.Mock() handler = object() i2p.launch = mock.Mock(return_value=handler) reactor = object() with mock_i2p(i2p): p = i2p_provider.create(reactor, FakeConfig(launch=True, **{"i2p.configdir": "configdir", "i2p.executable": "myi2p", })) h = p.get_i2p_handler() self.assertIs(h, handler) i2p.launch.assert_called_with(i2p_configdir="configdir", i2p_binary="myi2p") def test_handler_configdir(self): i2p = mock.Mock() handler = object() i2p.local_i2p = mock.Mock(return_value=handler) reactor = object() with mock_i2p(i2p): p = i2p_provider.create(reactor, FakeConfig(**{"i2p.configdir": "configdir"})) h = p.get_i2p_handler() i2p.local_i2p.assert_called_with("configdir") self.assertIs(h, handler) def test_handler_launch_executable(self): i2p = mock.Mock() handler = object() i2p.launch = mock.Mock(return_value=handler) reactor = object() with mock_i2p(i2p): p = i2p_provider.create(reactor, FakeConfig(launch=True, **{"i2p.executable": "myi2p"})) h = p.get_i2p_handler() self.assertIs(h, handler) i2p.launch.assert_called_with(i2p_configdir=None, i2p_binary="myi2p") def test_handler_default(self): i2p = mock.Mock() handler = object() i2p.default = mock.Mock(return_value=handler) reactor = object() with mock_i2p(i2p): p = i2p_provider.create(reactor, FakeConfig()) h = p.get_i2p_handler() self.assertIs(h, handler) i2p.default.assert_called_with(reactor, keyfile=None) class ProviderListener(unittest.TestCase): def test_listener(self): """Does the I2P Provider object's get_listener() method correctly convert the [i2p] section of tahoe.cfg into an endpoint/descriptor? """ i2p = mock.Mock() handler = object() i2p.local_i2p = mock.Mock(return_value=handler) reactor = object() privkeyfile = os.path.join("private", "i2p_dest.privkey") with mock_i2p(i2p): p = i2p_provider.create(reactor, FakeConfig(**{ "i2p.configdir": "configdir", "sam.port": "good:port", "dest": "true", "dest.port": "3457", "dest.private_key_file": privkeyfile, })) endpoint_or_description = p.get_listener() self.assertEqual(endpoint_or_description, "i2p:%s:3457:api=SAM:apiEndpoint=good\\:port" % privkeyfile) class Provider_CheckI2PConfig(unittest.TestCase): def test_default(self): # default config doesn't start an I2P service, so it should be # happy both with and without txi2p p = i2p_provider.create("reactor", FakeConfig()) p.check_dest_config() with mock_txi2p(None): p = i2p_provider.create("reactor", FakeConfig()) p.check_dest_config() def test_no_txi2p(self): with mock_txi2p(None): with self.assertRaises(ValueError) as ctx: i2p_provider.create("reactor", FakeConfig(dest=True)) self.assertEqual( str(ctx.exception), "Cannot create I2P Destination without txi2p. " "Please 'pip install tahoe-lafs[i2p]' to fix." ) def test_no_launch_no_control(self): with self.assertRaises(ValueError) as ctx: i2p_provider.create("reactor", FakeConfig(dest=True)) self.assertEqual( str(ctx.exception), "[i2p] dest = true, but we have neither " "sam.port= nor launch=true nor configdir=" ) def test_missing_keys(self): with self.assertRaises(ValueError) as ctx: i2p_provider.create("reactor", FakeConfig( dest=True, **{"sam.port": "x", } )) self.assertEqual(str(ctx.exception), "[i2p] dest = true, " "but dest.port= is missing") with self.assertRaises(ValueError) as ctx: i2p_provider.create("reactor", FakeConfig(dest=True, **{"sam.port": "x", "dest.port": "y", })) self.assertEqual( str(ctx.exception), "[i2p] dest = true, " "but dest.private_key_file= is missing" ) def test_launch_not_implemented(self): with self.assertRaises(NotImplementedError) as ctx: i2p_provider.create("reactor", FakeConfig(dest=True, launch=True, **{"dest.port": "x", "dest.private_key_file": "y", })) self.assertEqual( str(ctx.exception), "[i2p] launch is under development." ) def test_ok(self): i2p_provider.create( "reactor", FakeConfig( dest=True, **{ "sam.port": "x", "dest.port": "y", "dest.private_key_file": "z", } ) ) tahoe_lafs-1.20.0/src/allmydata/test/test_immutable.py0000644000000000000000000003426713615410400017741 0ustar00""" This module has been ported to Python 3. """ import random from twisted.trial import unittest from twisted.internet import defer from foolscap.api import eventually from allmydata.test import common from allmydata.test.no_network import GridTestMixin from allmydata.test.common import TEST_DATA from allmydata import uri from allmydata.util import log from allmydata.util.consumer import download_to_data from allmydata.interfaces import NotEnoughSharesError from allmydata.immutable.upload import Data from allmydata.immutable.downloader import finder from allmydata.immutable.literal import LiteralFileNode from .no_network import ( NoNetworkServer, ) class MockShareHashTree(object): def needed_hashes(self): return False class MockNode(object): def __init__(self, check_reneging, check_fetch_failed): self.got = 0 self.finished_d = defer.Deferred() self.segment_size = 78 self.guessed_segment_size = 78 self._no_more_shares = False self.check_reneging = check_reneging self.check_fetch_failed = check_fetch_failed self._si_prefix='aa' self.have_UEB = True self.share_hash_tree = MockShareHashTree() self.on_want_more_shares = None def when_finished(self): return self.finished_d def get_num_segments(self): return (5, True) def _calculate_sizes(self, guessed_segment_size): return {'block_size': 4, 'num_segments': 5} def no_more_shares(self): self._no_more_shares = True def got_shares(self, shares): if self.check_reneging: if self._no_more_shares: self.finished_d.errback(unittest.FailTest("The node was told by the share finder that it is destined to remain hungry, then was given another share.")) return self.got += len(shares) log.msg("yyy 3 %s.got_shares(%s) got: %s" % (self, shares, self.got)) if self.got == 3: self.finished_d.callback(True) def get_desired_ciphertext_hashes(self, *args, **kwargs): return iter([]) def fetch_failed(self, *args, **kwargs): if self.check_fetch_failed: if self.finished_d: self.finished_d.errback(unittest.FailTest("The node was told by the segment fetcher that the download failed.")) self.finished_d = None def want_more_shares(self): if self.on_want_more_shares: self.on_want_more_shares() def process_blocks(self, *args, **kwargs): if self.finished_d: self.finished_d.callback(None) class TestShareFinder(unittest.TestCase): def test_no_reneging_on_no_more_shares_ever(self): # ticket #1191 # Suppose that K=3 and you send two DYHB requests, the first # response offers two shares, and then the last offers one # share. If you tell your share consumer "no more shares, # ever", and then immediately tell them "oh, and here's # another share", then you lose. rcap = uri.CHKFileURI(b'a'*32, b'a'*32, 3, 99, 100) vcap = rcap.get_verify_cap() class MockBuckets(object): pass class MockServer(object): def __init__(self, buckets): self.version = { b'http://allmydata.org/tahoe/protocols/storage/v1': { b"tolerates-immutable-read-overrun": True } } self.buckets = buckets self.d = defer.Deferred() self.s = None def callRemote(self, methname, *args, **kwargs): d = defer.Deferred() # Even after the 3rd answer we're still hungry because # we're interested in finding a share on a 3rd server # so we don't have to download more than one share # from the first server. This is actually necessary to # trigger the bug. def _give_buckets_and_hunger_again(): d.callback(self.buckets) self.s.hungry() eventually(_give_buckets_and_hunger_again) return d class MockStorageBroker(object): def __init__(self, servers): self.servers = servers def get_servers_for_psi(self, si): return self.servers class MockDownloadStatus(object): def add_dyhb_request(self, server, when): return MockDYHBEvent() class MockDYHBEvent(object): def finished(self, shnums, when): pass mockserver1 = MockServer({1: MockBuckets(), 2: MockBuckets()}) mockserver2 = MockServer({}) mockserver3 = MockServer({3: MockBuckets()}) servers = [ NoNetworkServer(b"ms1", mockserver1), NoNetworkServer(b"ms2", mockserver2), NoNetworkServer(b"ms3", mockserver3), ] mockstoragebroker = MockStorageBroker(servers) mockdownloadstatus = MockDownloadStatus() mocknode = MockNode(check_reneging=True, check_fetch_failed=True) s = finder.ShareFinder(mockstoragebroker, vcap, mocknode, mockdownloadstatus) mockserver1.s = s mockserver2.s = s mockserver3.s = s s.hungry() return mocknode.when_finished() class Test(GridTestMixin, unittest.TestCase, common.ShouldFailMixin): def startup(self, basedir): self.basedir = basedir self.set_up_grid(num_clients=2, num_servers=5) c1 = self.g.clients[1] # We need multiple segments to test crypttext hash trees that are # non-trivial (i.e. they have more than just one hash in them). c1.encoding_params['max_segment_size'] = 12 # Tests that need to test servers of happiness using this should # set their own value for happy -- the default (7) breaks stuff. c1.encoding_params['happy'] = 1 d = c1.upload(Data(TEST_DATA, convergence=b"")) def _after_upload(ur): self.uri = ur.get_uri() self.filenode = self.g.clients[0].create_node_from_uri(ur.get_uri()) return self.uri d.addCallback(_after_upload) return d def _stash_shares(self, shares): self.shares = shares def _download_and_check_plaintext(self, ign=None): num_reads = self._count_reads() d = download_to_data(self.filenode) def _after_download(result): self.failUnlessEqual(result, TEST_DATA) return self._count_reads() - num_reads d.addCallback(_after_download) return d def _shuffled(self, num_shnums): shnums = list(range(10)) random.shuffle(shnums) return shnums[:num_shnums] def _count_reads(self): return sum([s.stats_provider.get_stats() ['counters'].get('storage_server.read', 0) for s in self.g.servers_by_number.values()]) def _count_allocates(self): return sum([s.stats_provider.get_stats() ['counters'].get('storage_server.allocate', 0) for s in self.g.servers_by_number.values()]) def _count_writes(self): return sum([s.stats_provider.get_stats() ['counters'].get('storage_server.write', 0) for s in self.g.servers_by_number.values()]) def test_test_code(self): # The following process of stashing the shares, running # replace_shares, and asserting that the new set of shares equals the # old is more to test this test code than to test the Tahoe code... d = self.startup("immutable/Test/code") d.addCallback(self.copy_shares) d.addCallback(self._stash_shares) d.addCallback(self._download_and_check_plaintext) # The following process of deleting 8 of the shares and asserting # that you can't download it is more to test this test code than to # test the Tahoe code... def _then_delete_8(ign): self.restore_all_shares(self.shares) self.delete_shares_numbered(self.uri, range(8)) d.addCallback(_then_delete_8) d.addCallback(lambda ign: self.shouldFail(NotEnoughSharesError, "download-2", "ran out of shares", download_to_data, self.filenode)) return d def test_download(self): """ Basic download. (This functionality is more or less already tested by test code in other modules, but this module is also going to test some more specific things about immutable download.) """ d = self.startup("immutable/Test/download") d.addCallback(self._download_and_check_plaintext) def _after_download(ign): num_reads = self._count_reads() #print(num_reads) self.failIf(num_reads > 41, num_reads) d.addCallback(_after_download) return d def test_download_from_only_3_remaining_shares(self): """ Test download after 7 random shares (of the 10) have been removed.""" d = self.startup("immutable/Test/download_from_only_3_remaining_shares") d.addCallback(lambda ign: self.delete_shares_numbered(self.uri, range(7))) d.addCallback(self._download_and_check_plaintext) def _after_download(num_reads): #print(num_reads) self.failIf(num_reads > 41, num_reads) d.addCallback(_after_download) return d def test_download_from_only_3_shares_with_good_crypttext_hash(self): """ Test download after 7 random shares (of the 10) have had their crypttext hash tree corrupted.""" d = self.startup("download_from_only_3_shares_with_good_crypttext_hash") def _corrupt_7(ign): c = common._corrupt_offset_of_block_hashes_to_truncate_crypttext_hashes self.corrupt_shares_numbered(self.uri, self._shuffled(7), c) d.addCallback(_corrupt_7) d.addCallback(self._download_and_check_plaintext) return d def test_download_abort_if_too_many_missing_shares(self): """ Test that download gives up quickly when it realizes there aren't enough shares out there.""" d = self.startup("download_abort_if_too_many_missing_shares") d.addCallback(lambda ign: self.delete_shares_numbered(self.uri, range(8))) d.addCallback(lambda ign: self.shouldFail(NotEnoughSharesError, "delete 8", "Last failure: None", download_to_data, self.filenode)) # the new downloader pipelines a bunch of read requests in parallel, # so don't bother asserting anything about the number of reads return d def test_download_abort_if_too_many_corrupted_shares(self): """Test that download gives up quickly when it realizes there aren't enough uncorrupted shares out there. It should be able to tell because the corruption occurs in the sharedata version number, which it checks first.""" d = self.startup("download_abort_if_too_many_corrupted_shares") def _corrupt_8(ign): c = common._corrupt_sharedata_version_number self.corrupt_shares_numbered(self.uri, self._shuffled(8), c) d.addCallback(_corrupt_8) def _try_download(ign): start_reads = self._count_reads() d2 = self.shouldFail(NotEnoughSharesError, "corrupt 8", "LayoutInvalid", download_to_data, self.filenode) def _check_numreads(ign): num_reads = self._count_reads() - start_reads #print(num_reads) # To pass this test, you are required to give up before # reading all of the share data. Actually, we could give up # sooner than 45 reads, but currently our download code does # 45 reads. This test then serves as a "performance # regression detector" -- if you change download code so that # it takes *more* reads, then this test will fail. self.failIf(num_reads > 45, num_reads) d2.addCallback(_check_numreads) return d2 d.addCallback(_try_download) return d def test_download_to_data(self): d = self.startup("download_to_data") d.addCallback(lambda ign: self.filenode.download_to_data()) d.addCallback(lambda data: self.failUnlessEqual(data, common.TEST_DATA)) return d def test_download_best_version(self): d = self.startup("download_best_version") d.addCallback(lambda ign: self.filenode.download_best_version()) d.addCallback(lambda data: self.failUnlessEqual(data, common.TEST_DATA)) return d def test_get_best_readable_version(self): d = self.startup("get_best_readable_version") d.addCallback(lambda ign: self.filenode.get_best_readable_version()) d.addCallback(lambda n2: self.failUnlessEqual(n2, self.filenode)) return d def test_get_size_of_best_version(self): d = self.startup("get_size_of_best_version") d.addCallback(lambda ign: self.filenode.get_size_of_best_version()) d.addCallback(lambda size: self.failUnlessEqual(size, len(common.TEST_DATA))) return d class LiteralFileNodeTests(unittest.TestCase): """Tests for LiteralFileNode.""" def test_equality(self): """LiteralFileNodes are equal iff they have the same URI.""" uri1 = uri.LiteralFileURI(b"1") uri2 = uri.LiteralFileURI(b"2") lfn1 = LiteralFileNode(uri1) lfn1b = LiteralFileNode(uri1) lfn2 = LiteralFileNode(uri2) self.assertTrue(lfn1 == lfn1b) self.assertFalse(lfn1 != lfn1b) self.assertTrue(lfn1 != lfn2) self.assertFalse(lfn1 == lfn2) self.assertTrue(lfn1 != 300) self.assertFalse(lfn1 == 300) # XXX extend these tests to show bad behavior of various kinds from servers: # raising exception from each remove_foo() method, for example # XXX test disconnect DeadReferenceError from get_buckets and get_block_whatsit # TODO: delete this whole file tahoe_lafs-1.20.0/src/allmydata/test/test_introducer.py0000644000000000000000000013234413615410400020133 0ustar00""" Ported to Python 3. """ from six import ensure_binary, ensure_text import os, re, itertools from base64 import b32decode import json from operator import ( setitem, ) from functools import ( partial, ) from testtools.matchers import ( Is, ) from twisted.internet import defer, address from twisted.python import log from twisted.python.filepath import FilePath from twisted.web.template import flattenString from foolscap.api import Tub, Referenceable, fireEventually, flushEventualQueue from twisted.application import service from allmydata.crypto import ed25519 from allmydata.crypto.util import remove_prefix from allmydata.crypto.error import BadSignature from allmydata.interfaces import InsufficientVersionError from allmydata.introducer.client import IntroducerClient from allmydata.introducer.server import IntroducerService, FurlFileConflictError from allmydata.introducer.common import get_tubid_string_from_ann, \ get_tubid_string, sign_to_foolscap, unsign_from_foolscap, \ UnknownKeyError from allmydata.node import ( create_node_dir, read_config, ) # the "new way" to create introducer node instance from allmydata.introducer.server import create_introducer from allmydata.web import introweb from allmydata.client import ( create_client, create_introducer_clients, ) from allmydata.util import pollmixin, idlib, fileutil, yamlutil from allmydata.util.iputil import ( listenOnUnused, ) from allmydata.scripts.common import ( write_introducer, ) import allmydata.test.common_util as testutil from .common import ( SyncTestCase, AsyncTestCase, AsyncBrokenTestCase, ) class LoggingMultiService(service.MultiService): def log(self, msg, **kw): log.msg(msg, **kw) class Node(testutil.SignalMixin, testutil.ReallyEqualMixin, AsyncTestCase): def test_backwards_compat_import(self): # for old introducer .tac files from allmydata.introducer import IntroducerNode IntroducerNode # pyflakes @defer.inlineCallbacks def test_create(self): """ A brand new introducer creates its config dir """ basedir = "introducer.IntroducerNode.test_create" yield create_introducer(basedir) self.assertTrue(os.path.exists(basedir)) def test_introducer_clients_unloadable(self): """ ``create_introducer_clients`` raises ``EnvironmentError`` if ``introducers.yaml`` exists but we can't read it. """ basedir = u"introducer.IntroducerNode.test_introducer_clients_unloadable" os.mkdir(basedir) os.mkdir(os.path.join(basedir, u"private")) yaml_fname = os.path.join(basedir, u"private", u"introducers.yaml") with open(yaml_fname, 'w') as f: f.write(u'---\n') os.chmod(yaml_fname, 0o000) self.addCleanup(lambda: os.chmod(yaml_fname, 0o700)) config = read_config(basedir, "portnum") with self.assertRaises(EnvironmentError): create_introducer_clients(config, Tub()) @defer.inlineCallbacks def test_furl(self): basedir = "introducer.IntroducerNode.test_furl" create_node_dir(basedir, "testing") public_fn = os.path.join(basedir, "introducer.furl") private_fn = os.path.join(basedir, "private", "introducer.furl") q1 = yield create_introducer(basedir) del q1 # new nodes create unguessable furls in private/introducer.furl ifurl = fileutil.read(private_fn, mode="r") self.failUnless(ifurl) ifurl = ifurl.strip() self.failIf(ifurl.endswith("/introducer"), ifurl) # old nodes created guessable furls in BASEDIR/introducer.furl guessable = ifurl[:ifurl.rfind("/")] + "/introducer" fileutil.write(public_fn, guessable+"\n", mode="w") # text # if we see both files, throw an error with self.assertRaises(FurlFileConflictError): yield create_introducer(basedir) # when we see only the public one, move it to private/ and use # the existing furl instead of creating a new one os.unlink(private_fn) q2 = yield create_introducer(basedir) del q2 self.failIf(os.path.exists(public_fn)) ifurl2 = fileutil.read(private_fn, mode="r") self.failUnless(ifurl2) self.failUnlessEqual(ifurl2.strip(), guessable) @defer.inlineCallbacks def test_web_static(self): basedir = u"introducer.Node.test_web_static" create_node_dir(basedir, "testing") fileutil.write(os.path.join(basedir, "tahoe.cfg"), "[node]\n" + "web.port = tcp:0:interface=127.0.0.1\n" + "web.static = relative\n") c = yield create_introducer(basedir) w = c.getServiceNamed("webish") abs_basedir = fileutil.abspath_expanduser_unicode(basedir) expected = fileutil.abspath_expanduser_unicode(u"relative", abs_basedir) self.failUnlessReallyEqual(w.staticdir, expected) class ServiceMixin(object): def setUp(self): self.parent = LoggingMultiService() self.parent.startService() return super(ServiceMixin, self).setUp() def tearDown(self): log.msg("TestIntroducer.tearDown") d = defer.maybeDeferred(super(ServiceMixin, self).tearDown) d.addCallback(lambda res: self.parent.stopService()) d.addCallback(flushEventualQueue) return d class Introducer(ServiceMixin, AsyncTestCase): def test_create(self): ic = IntroducerClient(None, "introducer.furl", u"my_nickname", "my_version", "oldest_version", fakeseq, FilePath(self.mktemp())) self.failUnless(isinstance(ic, IntroducerClient)) def test_listen(self): i = IntroducerService() i.setServiceParent(self.parent) def fakeseq(): return 1, "nonce" seqnum_counter = itertools.count(1) def realseq(): return next(seqnum_counter), str(os.randint(1,100000)) def make_ann(furl): ann = { "anonymous-storage-FURL": furl, "permutation-seed-base32": get_tubid_string(furl) } return ann def make_ann_t(ic, furl, privkey, seqnum): assert privkey ann_d = ic.create_announcement_dict("storage", make_ann(furl)) ann_d["seqnum"] = seqnum ann_d["nonce"] = "nonce" ann_t = sign_to_foolscap(ann_d, privkey) return ann_t class Client(AsyncTestCase): def test_duplicate_receive_v2(self): ic1 = IntroducerClient(None, "introducer.furl", u"my_nickname", "ver23", "oldest_version", fakeseq, FilePath(self.mktemp())) # we use a second client just to create a different-looking # announcement ic2 = IntroducerClient(None, "introducer.furl", u"my_nickname", "ver24","oldest_version",fakeseq, FilePath(self.mktemp())) announcements = [] def _received(key_s, ann): announcements.append( (key_s, ann) ) ic1.subscribe_to("storage", _received) furl1 = "pb://62ubehyunnyhzs7r6vdonnm2hpi52w6y@127.0.0.1:36106/gydnp" furl1a = "pb://62ubehyunnyhzs7r6vdonnm2hpi52w6y@127.0.0.1:7777/gydnp" furl2 = "pb://ttwwooyunnyhzs7r6vdonnm2hpi52w6y@127.0.0.1:36106/ttwwoo" private_key, public_key = ed25519.create_signing_keypair() public_key_str = ed25519.string_from_verifying_key(public_key) pubkey_s = remove_prefix(public_key_str, b"pub-") # ann1: ic1, furl1 # ann1a: ic1, furl1a (same SturdyRef, different connection hints) # ann1b: ic2, furl1 # ann2: ic2, furl2 self.ann1 = make_ann_t(ic1, furl1, private_key, seqnum=10) self.ann1old = make_ann_t(ic1, furl1, private_key, seqnum=9) self.ann1noseqnum = make_ann_t(ic1, furl1, private_key, seqnum=None) self.ann1b = make_ann_t(ic2, furl1, private_key, seqnum=11) self.ann1a = make_ann_t(ic1, furl1a, private_key, seqnum=12) self.ann2 = make_ann_t(ic2, furl2, private_key, seqnum=13) ic1.remote_announce_v2([self.ann1]) # queues eventual-send d = fireEventually() def _then1(ign): self.failUnlessEqual(len(announcements), 1) key_s,ann = announcements[0] self.failUnlessEqual(key_s, pubkey_s) self.failUnlessEqual(ann["anonymous-storage-FURL"], furl1) self.failUnlessEqual(ann["my-version"], "ver23") d.addCallback(_then1) # now send a duplicate announcement. This should not fire the # subscriber d.addCallback(lambda ign: ic1.remote_announce_v2([self.ann1])) d.addCallback(fireEventually) def _then2(ign): self.failUnlessEqual(len(announcements), 1) d.addCallback(_then2) # an older announcement shouldn't fire the subscriber either d.addCallback(lambda ign: ic1.remote_announce_v2([self.ann1old])) d.addCallback(fireEventually) def _then2a(ign): self.failUnlessEqual(len(announcements), 1) d.addCallback(_then2a) # announcement with no seqnum cannot replace one with-seqnum d.addCallback(lambda ign: ic1.remote_announce_v2([self.ann1noseqnum])) d.addCallback(fireEventually) def _then2b(ign): self.failUnlessEqual(len(announcements), 1) d.addCallback(_then2b) # and a replacement announcement: same FURL, new other stuff. The # subscriber *should* be fired. d.addCallback(lambda ign: ic1.remote_announce_v2([self.ann1b])) d.addCallback(fireEventually) def _then3(ign): self.failUnlessEqual(len(announcements), 2) key_s,ann = announcements[-1] self.failUnlessEqual(key_s, pubkey_s) self.failUnlessEqual(ann["anonymous-storage-FURL"], furl1) self.failUnlessEqual(ann["my-version"], "ver24") d.addCallback(_then3) # and a replacement announcement with a different FURL (it uses # different connection hints) d.addCallback(lambda ign: ic1.remote_announce_v2([self.ann1a])) d.addCallback(fireEventually) def _then4(ign): self.failUnlessEqual(len(announcements), 3) key_s,ann = announcements[-1] self.failUnlessEqual(key_s, pubkey_s) self.failUnlessEqual(ann["anonymous-storage-FURL"], furl1a) self.failUnlessEqual(ann["my-version"], "ver23") d.addCallback(_then4) # now add a new subscription, which should be called with the # backlog. The introducer only records one announcement per index, so # the backlog will only have the latest message. announcements2 = [] def _received2(key_s, ann): announcements2.append( (key_s, ann) ) d.addCallback(lambda ign: ic1.subscribe_to("storage", _received2)) d.addCallback(fireEventually) def _then5(ign): self.failUnlessEqual(len(announcements2), 1) key_s,ann = announcements2[-1] self.failUnlessEqual(key_s, pubkey_s) self.failUnlessEqual(ann["anonymous-storage-FURL"], furl1a) self.failUnlessEqual(ann["my-version"], "ver23") d.addCallback(_then5) return d class Server(AsyncTestCase): def test_duplicate(self): i = IntroducerService() ic1 = IntroducerClient(None, "introducer.furl", u"my_nickname", "ver23", "oldest_version", realseq, FilePath(self.mktemp())) furl1 = "pb://62ubehyunnyhzs7r6vdonnm2hpi52w6y@127.0.0.1:36106/gydnp" private_key, _ = ed25519.create_signing_keypair() ann1 = make_ann_t(ic1, furl1, private_key, seqnum=10) ann1_old = make_ann_t(ic1, furl1, private_key, seqnum=9) ann1_new = make_ann_t(ic1, furl1, private_key, seqnum=11) ann1_noseqnum = make_ann_t(ic1, furl1, private_key, seqnum=None) ann1_badseqnum = make_ann_t(ic1, furl1, private_key, seqnum="not an int") i.remote_publish_v2(ann1, None) all = i.get_announcements() self.failUnlessEqual(len(all), 1) self.failUnlessEqual(all[0].announcement["seqnum"], 10) self.failUnlessEqual(i._debug_counts["inbound_message"], 1) self.failUnlessEqual(i._debug_counts["inbound_duplicate"], 0) self.failUnlessEqual(i._debug_counts["inbound_no_seqnum"], 0) self.failUnlessEqual(i._debug_counts["inbound_old_replay"], 0) self.failUnlessEqual(i._debug_counts["inbound_update"], 0) i.remote_publish_v2(ann1, None) all = i.get_announcements() self.failUnlessEqual(len(all), 1) self.failUnlessEqual(all[0].announcement["seqnum"], 10) self.failUnlessEqual(i._debug_counts["inbound_message"], 2) self.failUnlessEqual(i._debug_counts["inbound_duplicate"], 1) self.failUnlessEqual(i._debug_counts["inbound_no_seqnum"], 0) self.failUnlessEqual(i._debug_counts["inbound_old_replay"], 0) self.failUnlessEqual(i._debug_counts["inbound_update"], 0) i.remote_publish_v2(ann1_old, None) all = i.get_announcements() self.failUnlessEqual(len(all), 1) self.failUnlessEqual(all[0].announcement["seqnum"], 10) self.failUnlessEqual(i._debug_counts["inbound_message"], 3) self.failUnlessEqual(i._debug_counts["inbound_duplicate"], 1) self.failUnlessEqual(i._debug_counts["inbound_no_seqnum"], 0) self.failUnlessEqual(i._debug_counts["inbound_old_replay"], 1) self.failUnlessEqual(i._debug_counts["inbound_update"], 0) i.remote_publish_v2(ann1_new, None) all = i.get_announcements() self.failUnlessEqual(len(all), 1) self.failUnlessEqual(all[0].announcement["seqnum"], 11) self.failUnlessEqual(i._debug_counts["inbound_message"], 4) self.failUnlessEqual(i._debug_counts["inbound_duplicate"], 1) self.failUnlessEqual(i._debug_counts["inbound_no_seqnum"], 0) self.failUnlessEqual(i._debug_counts["inbound_old_replay"], 1) self.failUnlessEqual(i._debug_counts["inbound_update"], 1) i.remote_publish_v2(ann1_noseqnum, None) all = i.get_announcements() self.failUnlessEqual(len(all), 1) self.failUnlessEqual(all[0].announcement["seqnum"], 11) self.failUnlessEqual(i._debug_counts["inbound_message"], 5) self.failUnlessEqual(i._debug_counts["inbound_duplicate"], 1) self.failUnlessEqual(i._debug_counts["inbound_no_seqnum"], 1) self.failUnlessEqual(i._debug_counts["inbound_old_replay"], 1) self.failUnlessEqual(i._debug_counts["inbound_update"], 1) i.remote_publish_v2(ann1_badseqnum, None) all = i.get_announcements() self.failUnlessEqual(len(all), 1) self.failUnlessEqual(all[0].announcement["seqnum"], 11) self.failUnlessEqual(i._debug_counts["inbound_message"], 6) self.failUnlessEqual(i._debug_counts["inbound_duplicate"], 1) self.failUnlessEqual(i._debug_counts["inbound_no_seqnum"], 2) self.failUnlessEqual(i._debug_counts["inbound_old_replay"], 1) self.failUnlessEqual(i._debug_counts["inbound_update"], 1) NICKNAME = u"n\u00EDickname-%s" # LATIN SMALL LETTER I WITH ACUTE class SystemTestMixin(ServiceMixin, pollmixin.PollMixin): def create_tub(self, portnum=None): tubfile = os.path.join(self.basedir, "tub.pem") self.central_tub = tub = Tub(certFile=tubfile) #tub.setOption("logLocalFailures", True) #tub.setOption("logRemoteFailures", True) tub.setOption("expose-remote-exception-types", False) tub.setServiceParent(self.parent) self.central_portnum = listenOnUnused(tub, portnum) class Queue(SystemTestMixin, AsyncTestCase): def test_queue_until_connected(self): self.basedir = "introducer/QueueUntilConnected/queued" os.makedirs(self.basedir) self.create_tub() introducer = IntroducerService() introducer.setServiceParent(self.parent) iff = os.path.join(self.basedir, "introducer.furl") ifurl = self.central_tub.registerReference(introducer, furlFile=iff) tub2 = Tub() tub2.setServiceParent(self.parent) c = IntroducerClient(tub2, ifurl, u"nickname", "version", "oldest", fakeseq, FilePath(self.mktemp())) furl1 = "pb://onug64tu@127.0.0.1:123/short" # base32("short") private_key, _ = ed25519.create_signing_keypair() d = introducer.disownServiceParent() def _offline(ign): # now that the introducer server is offline, create a client and # publish some messages c.setServiceParent(self.parent) # this starts the reconnector c.publish("storage", make_ann(furl1), private_key) introducer.setServiceParent(self.parent) # restart the server # now wait for the messages to be delivered def _got_announcement(): return bool(introducer.get_announcements()) return self.poll(_got_announcement) d.addCallback(_offline) def _done(ign): v = introducer.get_announcements()[0] furl = v.announcement["anonymous-storage-FURL"] self.failUnlessEqual(furl, furl1) d.addCallback(_done) # now let the ack get back def _wait_until_idle(ign): def _idle(): if c._debug_outstanding: return False if introducer._debug_outstanding: return False return True return self.poll(_idle) d.addCallback(_wait_until_idle) return d class SystemTest(SystemTestMixin, AsyncTestCase): def do_system_test(self): self.create_tub() introducer = IntroducerService() introducer.setServiceParent(self.parent) iff = os.path.join(self.basedir, "introducer.furl") tub = self.central_tub ifurl = self.central_tub.registerReference(introducer, furlFile=iff) self.introducer_furl = ifurl # we have 5 clients who publish themselves as storage servers, and a # sixth which does which not. All 6 clients subscriber to hear about # storage. When the connections are fully established, all six nodes # should have 5 connections each. NUM_STORAGE = 5 NUM_CLIENTS = 6 clients = [] tubs = {} received_announcements = {} subscribing_clients = [] publishing_clients = [] printable_serverids = {} self.the_introducer = introducer privkeys = {} pubkeys = {} expected_announcements = [0 for c in range(NUM_CLIENTS)] for i in range(NUM_CLIENTS): tub = Tub() #tub.setOption("logLocalFailures", True) #tub.setOption("logRemoteFailures", True) tub.setOption("expose-remote-exception-types", False) tub.setServiceParent(self.parent) listenOnUnused(tub) log.msg("creating client %d: %s" % (i, tub.getShortTubID())) c = IntroducerClient(tub, self.introducer_furl, NICKNAME % str(i), "version", "oldest", fakeseq, FilePath(self.mktemp())) received_announcements[c] = {} def got(key_s_or_tubid, ann, announcements): index = key_s_or_tubid or get_tubid_string_from_ann(ann) announcements[index] = ann c.subscribe_to("storage", got, received_announcements[c]) subscribing_clients.append(c) expected_announcements[i] += 1 # all expect a 'storage' announcement node_furl = tub.registerReference(Referenceable()) private_key, public_key = ed25519.create_signing_keypair() public_key_str = ed25519.string_from_verifying_key(public_key) privkeys[i] = private_key pubkeys[i] = public_key_str if i < NUM_STORAGE: # sign all announcements c.publish("storage", make_ann(node_furl), private_key) printable_serverids[i] = remove_prefix(public_key_str, b"pub-") publishing_clients.append(c) else: # the last one does not publish anything pass if i == 2: # also publish something that nobody cares about boring_furl = tub.registerReference(Referenceable()) c.publish("boring", make_ann(boring_furl), private_key) c.setServiceParent(self.parent) clients.append(c) tubs[c] = tub def _wait_for_connected(ign): def _connected(): for c in clients: if not c.connected_to_introducer(): return False return True return self.poll(_connected) # we watch the clients to determine when the system has settled down. # Then we can look inside the server to assert things about its # state. def _wait_for_expected_announcements(ign): def _got_expected_announcements(): for i,c in enumerate(subscribing_clients): if len(received_announcements[c]) < expected_announcements[i]: return False return True return self.poll(_got_expected_announcements) # before shutting down any Tub, we'd like to know that there are no # messages outstanding def _wait_until_idle(ign): def _idle(): for c in subscribing_clients + publishing_clients: if c._debug_outstanding: return False if self.the_introducer._debug_outstanding: return False return True return self.poll(_idle) d = defer.succeed(None) d.addCallback(_wait_for_connected) d.addCallback(_wait_for_expected_announcements) d.addCallback(_wait_until_idle) def _check1(res): log.msg("doing _check1") dc = self.the_introducer._debug_counts # each storage server publishes a record. There is also one # "boring" self.failUnlessEqual(dc["inbound_message"], NUM_STORAGE+1) self.failUnlessEqual(dc["inbound_duplicate"], 0) self.failUnlessEqual(dc["inbound_update"], 0) self.failUnlessEqual(dc["inbound_subscribe"], NUM_CLIENTS) # the number of outbound messages is tricky.. I think it depends # upon a race between the publish and the subscribe messages. self.failUnless(dc["outbound_message"] > 0) # each client subscribes to "storage", and each server publishes self.failUnlessEqual(dc["outbound_announcements"], NUM_STORAGE*NUM_CLIENTS) for c in subscribing_clients: cdc = c._debug_counts self.failUnless(cdc["inbound_message"]) self.failUnlessEqual(cdc["inbound_announcement"], NUM_STORAGE) self.failUnlessEqual(cdc["wrong_service"], 0) self.failUnlessEqual(cdc["duplicate_announcement"], 0) self.failUnlessEqual(cdc["update"], 0) self.failUnlessEqual(cdc["new_announcement"], NUM_STORAGE) anns = received_announcements[c] self.failUnlessEqual(len(anns), NUM_STORAGE) serverid0 = printable_serverids[0] ann = anns[serverid0] nick = ann["nickname"] self.assertIsInstance(nick, str) self.failUnlessEqual(nick, NICKNAME % "0") for c in publishing_clients: cdc = c._debug_counts expected = 1 if c in [clients[2], # boring ]: expected = 2 self.failUnlessEqual(cdc["outbound_message"], expected) # now check the web status, make sure it renders without error ir = introweb.IntroducerRoot(self.parent) self.parent.nodeid = b"NODEID" log.msg("_check1 done") return flattenString(None, ir._create_element()) d.addCallback(_check1) def _check2(flattened_bytes): text = flattened_bytes.decode("utf-8") self.assertIn(NICKNAME % "0", text) # a v2 client self.assertIn(NICKNAME % "1", text) # another v2 client for i in range(NUM_STORAGE): self.assertIn(ensure_text(printable_serverids[i]), text, (i,printable_serverids[i],text)) # make sure there isn't a double-base32ed string too self.assertNotIn(idlib.nodeid_b2a(printable_serverids[i]), text, (i,printable_serverids[i],text)) log.msg("_check2 done") d.addCallback(_check2) # force an introducer reconnect, by shutting down the Tub it's using # and starting a new Tub (with the old introducer). Everybody should # reconnect and republish, but the introducer should ignore the # republishes as duplicates. However, because the server doesn't know # what each client does and does not know, it will send them a copy # of the current announcement table anyway. d.addCallback(lambda _ign: log.msg("shutting down introducer's Tub")) d.addCallback(lambda _ign: self.central_tub.disownServiceParent()) def _wait_for_introducer_loss(ign): def _introducer_lost(): for c in clients: if c.connected_to_introducer(): return False return True return self.poll(_introducer_lost) d.addCallback(_wait_for_introducer_loss) def _restart_introducer_tub(_ign): log.msg("restarting introducer's Tub") # reset counters for i in range(NUM_CLIENTS): c = subscribing_clients[i] for k in c._debug_counts: c._debug_counts[k] = 0 for k in self.the_introducer._debug_counts: self.the_introducer._debug_counts[k] = 0 expected_announcements[i] += 1 # new 'storage' for everyone self.create_tub(self.central_portnum) newfurl = self.central_tub.registerReference(self.the_introducer, furlFile=iff) assert newfurl == self.introducer_furl d.addCallback(_restart_introducer_tub) d.addCallback(_wait_for_connected) d.addCallback(_wait_for_expected_announcements) d.addCallback(_wait_until_idle) d.addCallback(lambda _ign: log.msg(" reconnected")) # TODO: publish something while the introducer is offline, then # confirm it gets delivered when the connection is reestablished def _check2(res): log.msg("doing _check2") # assert that the introducer sent out new messages, one per # subscriber dc = self.the_introducer._debug_counts self.failUnlessEqual(dc["outbound_announcements"], NUM_STORAGE*NUM_CLIENTS) self.failUnless(dc["outbound_message"] > 0) self.failUnlessEqual(dc["inbound_subscribe"], NUM_CLIENTS) for c in subscribing_clients: cdc = c._debug_counts self.failUnlessEqual(cdc["inbound_message"], 1) self.failUnlessEqual(cdc["inbound_announcement"], NUM_STORAGE) self.failUnlessEqual(cdc["new_announcement"], 0) self.failUnlessEqual(cdc["wrong_service"], 0) self.failUnlessEqual(cdc["duplicate_announcement"], NUM_STORAGE) d.addCallback(_check2) # Then force an introducer restart, by shutting down the Tub, # destroying the old introducer, and starting a new Tub+Introducer. # Everybody should reconnect and republish, and the (new) introducer # will distribute the new announcements, but the clients should # ignore the republishes as duplicates. d.addCallback(lambda _ign: log.msg("shutting down introducer")) d.addCallback(lambda _ign: self.central_tub.disownServiceParent()) d.addCallback(_wait_for_introducer_loss) d.addCallback(lambda _ign: log.msg("introducer lost")) def _restart_introducer(_ign): log.msg("restarting introducer") self.create_tub(self.central_portnum) # reset counters for i in range(NUM_CLIENTS): c = subscribing_clients[i] for k in c._debug_counts: c._debug_counts[k] = 0 expected_announcements[i] += 1 # new 'storage' for everyone introducer = IntroducerService() self.the_introducer = introducer newfurl = self.central_tub.registerReference(self.the_introducer, furlFile=iff) assert newfurl == self.introducer_furl d.addCallback(_restart_introducer) d.addCallback(_wait_for_connected) d.addCallback(_wait_for_expected_announcements) d.addCallback(_wait_until_idle) def _check3(res): log.msg("doing _check3") dc = self.the_introducer._debug_counts self.failUnlessEqual(dc["outbound_announcements"], NUM_STORAGE*NUM_CLIENTS) self.failUnless(dc["outbound_message"] > 0) self.failUnlessEqual(dc["inbound_subscribe"], NUM_CLIENTS) for c in subscribing_clients: cdc = c._debug_counts self.failUnless(cdc["inbound_message"] > 0) self.failUnlessEqual(cdc["inbound_announcement"], NUM_STORAGE) self.failUnlessEqual(cdc["new_announcement"], 0) self.failUnlessEqual(cdc["wrong_service"], 0) self.failUnlessEqual(cdc["duplicate_announcement"], NUM_STORAGE) d.addCallback(_check3) return d def test_system_v2_server(self): self.basedir = "introducer/SystemTest/system_v2_server" os.makedirs(self.basedir) return self.do_system_test() class FakeRemoteReference(object): def notifyOnDisconnect(self, *args, **kwargs): pass def getRemoteTubID(self): return "62ubehyunnyhzs7r6vdonnm2hpi52w6y" def getPeer(self): return address.IPv4Address("TCP", "remote.example.com", 3456) class ClientInfo(AsyncTestCase): def test_client_v2(self): introducer = IntroducerService() tub = introducer_furl = None client_v2 = IntroducerClient(tub, introducer_furl, NICKNAME % u"v2", "my_version", "oldest", fakeseq, FilePath(self.mktemp())) #furl1 = "pb://62ubehyunnyhzs7r6vdonnm2hpi52w6y@127.0.0.1:0/swissnum" #ann_s = make_ann_t(client_v2, furl1, None, 10) #introducer.remote_publish_v2(ann_s, Referenceable()) subscriber = FakeRemoteReference() introducer.remote_subscribe_v2(subscriber, "storage", client_v2._my_subscriber_info) subs = introducer.get_subscribers() self.failUnlessEqual(len(subs), 1) s0 = subs[0] self.failUnlessEqual(s0.service_name, "storage") self.failUnlessEqual(s0.nickname, NICKNAME % u"v2") self.failUnlessEqual(s0.version, "my_version") class Announcements(AsyncTestCase): def test_client_v2_signed(self): introducer = IntroducerService() tub = introducer_furl = None client_v2 = IntroducerClient(tub, introducer_furl, u"nick-v2", "my_version", "oldest", fakeseq, FilePath(self.mktemp())) furl1 = "pb://62ubehyunnyhzs7r6vdonnm2hpi52w6y@127.0.0.1:0/swissnum" private_key, public_key = ed25519.create_signing_keypair() public_key_str = remove_prefix(ed25519.string_from_verifying_key(public_key), b"pub-") ann_t0 = make_ann_t(client_v2, furl1, private_key, 10) canary0 = Referenceable() introducer.remote_publish_v2(ann_t0, canary0) a = introducer.get_announcements() self.failUnlessEqual(len(a), 1) self.assertThat(a[0].canary, Is(canary0)) self.failUnlessEqual(a[0].index, ("storage", public_key_str)) self.failUnlessEqual(a[0].nickname, u"nick-v2") self.failUnlessEqual(a[0].service_name, "storage") self.failUnlessEqual(a[0].version, "my_version") self.failUnlessEqual(a[0].announcement["anonymous-storage-FURL"], furl1) def _load_cache(self, cache_filepath): with cache_filepath.open() as f: return yamlutil.safe_load(f) @defer.inlineCallbacks def test_client_cache(self): """ Announcements received by an introducer client are written to that introducer client's cache file. """ basedir = FilePath("introducer/ClientSeqnums/test_client_cache_1") private = basedir.child("private") private.makedirs() write_introducer(basedir, "default", "nope") cache_filepath = basedir.descendant([ "private", "introducer_default_cache.yaml", ]) # if storage is enabled, the Client will publish its storage server # during startup (although the announcement will wait in a queue # until the introducer connection is established). To avoid getting # confused by this, disable storage. with basedir.child("tahoe.cfg").open("w") as f: f.write(b"[storage]\n") f.write(b"enabled = false\n") c = yield create_client(basedir.path) ic = c.introducer_clients[0] private_key, public_key = ed25519.create_signing_keypair() public_key_str = remove_prefix(ed25519.string_from_verifying_key(public_key), b"pub-") furl1 = "pb://onug64tu@127.0.0.1:123/short" # base32("short") ann_t = make_ann_t(ic, furl1, private_key, 1) ic.got_announcements([ann_t]) yield flushEventualQueue() # check the cache for the announcement announcements = self._load_cache(cache_filepath) self.failUnlessEqual(len(announcements), 1) self.failUnlessEqual(ensure_binary(announcements[0]['key_s']), public_key_str) ann = announcements[0]["ann"] self.failUnlessEqual(ann["anonymous-storage-FURL"], furl1) self.failUnlessEqual(ann["seqnum"], 1) # a new announcement that replaces the first should replace the # cached entry, not duplicate it furl2 = furl1 + "er" ann_t2 = make_ann_t(ic, furl2, private_key, 2) ic.got_announcements([ann_t2]) yield flushEventualQueue() announcements = self._load_cache(cache_filepath) self.failUnlessEqual(len(announcements), 1) self.failUnlessEqual(ensure_binary(announcements[0]['key_s']), public_key_str) ann = announcements[0]["ann"] self.failUnlessEqual(ann["anonymous-storage-FURL"], furl2) self.failUnlessEqual(ann["seqnum"], 2) # but a third announcement with a different key should add to the # cache private_key2, public_key2 = ed25519.create_signing_keypair() public_key_str2 = remove_prefix(ed25519.string_from_verifying_key(public_key2), b"pub-") furl3 = "pb://onug64tu@127.0.0.1:456/short" ann_t3 = make_ann_t(ic, furl3, private_key2, 1) ic.got_announcements([ann_t3]) yield flushEventualQueue() announcements = self._load_cache(cache_filepath) self.failUnlessEqual(len(announcements), 2) self.failUnlessEqual(set([public_key_str, public_key_str2]), set([ensure_binary(a["key_s"]) for a in announcements])) self.failUnlessEqual(set([furl2, furl3]), set([a["ann"]["anonymous-storage-FURL"] for a in announcements])) # test loading yield flushEventualQueue() ic2 = IntroducerClient(None, "introducer.furl", u"my_nickname", "my_version", "oldest_version", fakeseq, ic._cache_filepath) announcements = {} def got(key_s, ann): announcements[key_s] = ann ic2.subscribe_to("storage", got) ic2._load_announcements() # normally happens when connection fails yield flushEventualQueue() self.failUnless(public_key_str in announcements) self.failUnlessEqual(announcements[public_key_str]["anonymous-storage-FURL"], furl2) self.failUnlessEqual(announcements[public_key_str2]["anonymous-storage-FURL"], furl3) c2 = yield create_client(basedir.path) c2.introducer_clients[0]._load_announcements() yield flushEventualQueue() self.assertEqual(c2.storage_broker.get_all_serverids(), frozenset([public_key_str, public_key_str2])) class ClientSeqnums(AsyncBrokenTestCase): @defer.inlineCallbacks def test_client(self): basedir = FilePath("introducer/ClientSeqnums/test_client") private = basedir.child("private") private.makedirs() write_introducer(basedir, "default", "nope") # if storage is enabled, the Client will publish its storage server # during startup (although the announcement will wait in a queue # until the introducer connection is established). To avoid getting # confused by this, disable storage. with basedir.child("tahoe.cfg").open("w") as f: f.write(b"[storage]\n") f.write(b"enabled = false\n") c = yield create_client(basedir.path) ic = c.introducer_clients[0] outbound = ic._outbound_announcements published = ic._published_announcements def read_seqnum(): seqnum = basedir.child("announcement-seqnum").getContent() return int(seqnum) ic.publish("sA", {"key": "value1"}, c._node_private_key) self.failUnlessEqual(read_seqnum(), 1) self.failUnless("sA" in outbound) self.failUnlessEqual(outbound["sA"]["seqnum"], 1) nonce1 = outbound["sA"]["nonce"] self.failUnless(isinstance(nonce1, bytes)) # Make nonce unicode, to match JSON: outbound["sA"]["nonce"] = str(nonce1, "utf-8") self.failUnlessEqual(json.loads(published["sA"][0]), outbound["sA"]) # [1] is the signature, [2] is the pubkey # publishing a second service causes both services to be # re-published, with the next higher sequence number ic.publish("sB", {"key": "value2"}, c._node_private_key) self.failUnlessEqual(read_seqnum(), 2) self.failUnless("sB" in outbound) self.failUnlessEqual(outbound["sB"]["seqnum"], 2) self.failUnless("sA" in outbound) self.failUnlessEqual(outbound["sA"]["seqnum"], 2) nonce2 = outbound["sA"]["nonce"] self.failUnless(isinstance(nonce2, bytes)) self.failIfEqual(nonce1, nonce2) # Make nonce unicode, to match JSON: outbound["sA"]["nonce"] = str(nonce2, "utf-8") outbound["sB"]["nonce"] = str(outbound["sB"]["nonce"], "utf-8") self.failUnlessEqual(json.loads(published["sA"][0]), outbound["sA"]) self.failUnlessEqual(json.loads(published["sB"][0]), outbound["sB"]) class TooNewServer(IntroducerService): VERSION = { "http://allmydata.org/tahoe/protocols/introducer/v999": { }, "application-version": "greetings from the crazy future", } class NonV1Server(SystemTestMixin, AsyncTestCase): # if the client connects to a server that doesn't provide the 'v2' # protocol, it is supposed to provide a useful error instead of a weird # exception. def test_failure(self): self.basedir = "introducer/NonV1Server/failure" os.makedirs(self.basedir) self.create_tub() i = TooNewServer() i.setServiceParent(self.parent) self.introducer_furl = self.central_tub.registerReference(i) tub = Tub() tub.setOption("expose-remote-exception-types", False) tub.setServiceParent(self.parent) listenOnUnused(tub) c = IntroducerClient(tub, self.introducer_furl, u"nickname-client", "version", "oldest", fakeseq, FilePath(self.mktemp())) announcements = {} def got(key_s, ann): announcements[key_s] = ann c.subscribe_to("storage", got) c.setServiceParent(self.parent) # now we wait for it to connect and notice the bad version def _got_bad(): return bool(c._introducer_error) or bool(c._publisher) d = self.poll(_got_bad) def _done(res): self.failUnless(c._introducer_error) self.failUnless(c._introducer_error.check(InsufficientVersionError), c._introducer_error) d.addCallback(_done) return d class DecodeFurl(SyncTestCase): def test_decode(self): # make sure we have a working base64.b32decode. The one in # python2.4.[01] was broken. furl = 'pb://t5g7egomnnktbpydbuijt6zgtmw4oqi5@127.0.0.1:51857/hfzv36i' m = re.match(r'pb://(\w+)@', furl) assert m nodeid = b32decode(m.group(1).upper().encode("ascii")) self.failUnlessEqual(nodeid, b"\x9fM\xf2\x19\xcckU0\xbf\x03\r\x10\x99\xfb&\x9b-\xc7A\x1d") class Signatures(SyncTestCase): def test_sign(self): ann = {"key1": "value1"} private_key, public_key = ed25519.create_signing_keypair() public_key_str = ed25519.string_from_verifying_key(public_key) ann_t = sign_to_foolscap(ann, private_key) (msg, sig, key) = ann_t self.failUnlessEqual(type(msg), type("".encode("utf-8"))) # bytes self.failUnlessEqual(json.loads(msg.decode("utf-8")), ann) self.failUnless(sig.startswith(b"v0-")) self.failUnless(key.startswith(b"v0-")) (ann2,key2) = unsign_from_foolscap(ann_t) self.failUnlessEqual(ann2, ann) self.failUnlessEqual(b"pub-" + key2, public_key_str) # not signed self.failUnlessRaises(UnknownKeyError, unsign_from_foolscap, (msg, None, key)) self.failUnlessRaises(UnknownKeyError, unsign_from_foolscap, (msg, sig, None)) # bad signature bad_ann = {"key1": "value2"} bad_msg = json.dumps(bad_ann).encode("utf-8") self.failUnlessRaises(BadSignature, unsign_from_foolscap, (bad_msg, sig, key)) # unrecognized signatures self.failUnlessRaises(UnknownKeyError, unsign_from_foolscap, (bad_msg, b"v999-sig", key)) self.failUnlessRaises(UnknownKeyError, unsign_from_foolscap, (bad_msg, sig, b"v999-key")) def test_unsigned_announcement(self): """ An incorrectly signed announcement is not delivered to subscribers. """ private_key, public_key = ed25519.create_signing_keypair() public_key_str = ed25519.string_from_verifying_key(public_key) ic = IntroducerClient( Tub(), "pb://", u"fake_nick", "0.0.0", "1.2.3", (0, u"i am a nonce"), FilePath(self.mktemp()), ) received = {} ic.subscribe_to("good-stuff", partial(setitem, received)) # Deliver a good message to prove our test code is valid. ann = {"service-name": "good-stuff", "payload": "hello"} ann_t = sign_to_foolscap(ann, private_key) ic.got_announcements([ann_t]) self.assertEqual( {public_key_str[len("pub-"):]: ann}, received, ) received.clear() # Now deliver one without a valid signature and observe that it isn't # delivered to the subscriber. ann = {"service-name": "good-stuff", "payload": "bad stuff"} (msg, sig, key) = sign_to_foolscap(ann, private_key) # Drop a base32 word from the middle of the key to invalidate the # signature. sig_a = bytearray(sig) sig_a[20:22] = [] sig = bytes(sig_a) ann_t = (msg, sig, key) ic.got_announcements([ann_t]) # The received announcements dict should remain empty because we # should not receive the announcement with the invalid signature. self.assertEqual( {}, received, ) # add tests of StorageFarmBroker: if it receives duplicate announcements, it # should leave the Reconnector in place, also if it receives # same-FURL-different-misc, but if it receives same-nodeid-different-FURL, it # should tear down the Reconnector and make a new one. This behavior used to # live in the IntroducerClient, and thus used to be tested by test_introducer # copying more tests from old branch: # then also add Upgrade test tahoe_lafs-1.20.0/src/allmydata/test/test_iputil.py0000644000000000000000000001151513615410400017257 0ustar00""" Tests for allmydata.util.iputil. Ported to Python 3. """ from __future__ import annotations import os, socket import gc from functools import wraps from typing import TypeVar, Callable from testtools.matchers import ( MatchesAll, IsInstance, AllMatch, MatchesPredicate, ) from twisted.trial import unittest from foolscap.api import Tub from allmydata.util import iputil, gcutil from ..util.iputil import ( get_local_addresses_sync, ) from .common import ( SyncTestCase, ) T = TypeVar("T", contravariant=True) U = TypeVar("U", covariant=True) def retry(stop: Callable[[], bool]) -> Callable[[Callable[[T], U]], Callable[[T], U]]: """ Call a function until the predicate says to stop or the function stops raising an exception. :param stop: A callable to call after the decorated function raises an exception. The decorated function will be called again if ``stop`` returns ``False``. :return: A decorator function. """ def decorate(f: Callable[[T], U]) -> Callable[[T], U]: @wraps(f) def decorator(self: T) -> U: while True: try: return f(self) except Exception: if stop(): raise return decorator return decorate def stop_after_attempt(limit: int) -> Callable[[], bool]: """ Stop after ``limit`` calls. """ counter = 0 def check(): nonlocal counter counter += 1 return counter < limit return check class ListenOnUsed(unittest.TestCase): """Tests for listenOnUnused.""" def create_tub(self, basedir): os.makedirs(basedir) tubfile = os.path.join(basedir, "tub.pem") tub = Tub(certFile=tubfile) tub.setOption("expose-remote-exception-types", False) tub.startService() self.addCleanup(tub.stopService) return tub @retry(stop=stop_after_attempt(7)) def test_random_port(self): """A random port is selected if none is given.""" tub = self.create_tub("utils/ListenOnUsed/test_randomport") self.assertEqual(len(tub.getListeners()), 0) portnum = iputil.listenOnUnused(tub) # We can connect to this port: s = socket.socket() s.connect(("127.0.0.1", portnum)) s.close() self.assertEqual(len(tub.getListeners()), 1) # Listen on another port: tub2 = self.create_tub("utils/ListenOnUsed/test_randomport_2") portnum2 = iputil.listenOnUnused(tub2) self.assertNotEqual(portnum, portnum2) @retry(stop=stop_after_attempt(7)) def test_specific_port(self): """The given port is used.""" tub = self.create_tub("utils/ListenOnUsed/test_givenport") s = socket.socket() s.bind(("127.0.0.1", 0)) port = s.getsockname()[1] s.close() port2 = iputil.listenOnUnused(tub, port) self.assertEqual(port, port2) class GcUtil(unittest.TestCase): """Tests for allmydata.util.gcutil, which is used only by listenOnUnused.""" def test_gc_after_allocations(self): """The resource tracker triggers allocations every 26 allocations.""" tracker = gcutil._ResourceTracker() collections = [] self.patch(gc, "collect", lambda: collections.append(1)) for _ in range(2): for _ in range(25): tracker.allocate() self.assertEqual(len(collections), 0) tracker.allocate() self.assertEqual(len(collections), 1) del collections[:] def test_release_delays_gc(self): """Releasing a file descriptor resource delays GC collection.""" tracker = gcutil._ResourceTracker() collections = [] self.patch(gc, "collect", lambda: collections.append(1)) for _ in range(2): tracker.allocate() for _ in range(3): tracker.release() for _ in range(25): tracker.allocate() self.assertEqual(len(collections), 0) tracker.allocate() self.assertEqual(len(collections), 1) class GetLocalAddressesSyncTests(SyncTestCase): """ Tests for ``get_local_addresses_sync``. """ def test_some_ipv4_addresses(self): """ ``get_local_addresses_sync`` returns a list of IPv4 addresses as native strings. """ self.assertThat( get_local_addresses_sync(), MatchesAll( IsInstance(list), AllMatch( MatchesAll( IsInstance(str), MatchesPredicate( lambda addr: socket.inet_pton(socket.AF_INET, addr), "%r is not an IPv4 address.", ), ), ), ), ) tahoe_lafs-1.20.0/src/allmydata/test/test_istorageserver.py0000644000000000000000000011376113615410400021023 0ustar00""" Tests for the ``IStorageServer`` interface. Keep in mind that ``IStorageServer`` is actually the storage _client_ interface. Note that for performance, in the future we might want the same node to be reused across tests, so each test should be careful to generate unique storage indexes. """ from __future__ import annotations from future.utils import bchr from random import Random from unittest import SkipTest from twisted.internet.defer import inlineCallbacks, returnValue from twisted.internet.task import Clock from foolscap.api import Referenceable, RemoteException # A better name for this would be IStorageClient... from allmydata.interfaces import IStorageServer from .common_system import SystemTestMixin from .common import AsyncTestCase from allmydata.storage.server import StorageServer # not a IStorageServer!! # Use random generator with known seed, so results are reproducible if tests # are run in the same order. _RANDOM = Random(0) def _randbytes(length): # type: (int) -> bytes """Return random bytes string of given length.""" return b"".join([bchr(_RANDOM.randrange(0, 256)) for _ in range(length)]) def new_storage_index(): # type: () -> bytes """Return a new random storage index.""" return _randbytes(16) def new_secret(): # type: () -> bytes """Return a new random secret (for lease renewal or cancellation).""" return _randbytes(32) class IStorageServerSharedAPIsTestsMixin(object): """ Tests for ``IStorageServer``'s shared APIs. ``self.storage_client`` is expected to provide ``IStorageServer``. """ @inlineCallbacks def test_version(self): """ ``IStorageServer`` returns a dictionary where the key is an expected protocol version. """ result = yield self.storage_client.get_version() self.assertIsInstance(result, dict) self.assertIn(b"http://allmydata.org/tahoe/protocols/storage/v1", result) class IStorageServerImmutableAPIsTestsMixin(object): """ Tests for ``IStorageServer``'s immutable APIs. ``self.storage_client`` is expected to provide ``IStorageServer``. ``self.disconnect()`` should disconnect and then reconnect, creating a new ``self.storage_client``. Some implementations may wish to skip tests using this; HTTP has no notion of disconnection. ``self.server`` is expected to be the corresponding ``allmydata.storage.server.StorageServer`` instance. Time should be instrumented, such that ``self.fake_time()`` and ``self.fake_sleep()`` return and advance the server time, respectively. """ @inlineCallbacks def test_allocate_buckets_new(self): """ allocate_buckets() with a new storage index returns the matching shares. """ (already_got, allocated) = yield self.storage_client.allocate_buckets( new_storage_index(), renew_secret=new_secret(), cancel_secret=new_secret(), sharenums=set(range(5)), allocated_size=1024, canary=Referenceable(), ) self.assertEqual(already_got, set()) self.assertEqual(set(allocated.keys()), set(range(5))) # We validate the bucket objects' interface in a later test. @inlineCallbacks def test_allocate_buckets_repeat(self): """ ``IStorageServer.allocate_buckets()`` with the same storage index does not return work-in-progress buckets, but will add any newly added buckets. """ storage_index, renew_secret, cancel_secret = ( new_storage_index(), new_secret(), new_secret(), ) (already_got, allocated) = yield self.storage_client.allocate_buckets( storage_index, renew_secret, cancel_secret, sharenums=set(range(4)), allocated_size=1024, canary=Referenceable(), ) (already_got2, allocated2) = yield self.storage_client.allocate_buckets( storage_index, renew_secret, cancel_secret, set(range(5)), 1024, Referenceable(), ) self.assertEqual(already_got, already_got2) self.assertEqual(set(allocated2.keys()), {4}) @inlineCallbacks def abort_or_disconnect_half_way(self, abort_or_disconnect): """ If we disconnect/abort in the middle of writing to a bucket, all data is wiped, and it's even possible to write different data to the bucket. (In the real world one shouldn't do that, but writing different data is a good way to test that the original data really was wiped.) ``abort_or_disconnect`` is a callback that takes a bucket and aborts up load, or perhaps disconnects the whole connection. """ storage_index, renew_secret, cancel_secret = ( new_storage_index(), new_secret(), new_secret(), ) (_, allocated) = yield self.storage_client.allocate_buckets( storage_index, renew_secret, cancel_secret, sharenums={0}, allocated_size=1024, canary=Referenceable(), ) # Bucket 1 get some data written (but not all, or HTTP implicitly # finishes the upload) yield allocated[0].callRemote("write", 0, b"1" * 1023) # Disconnect or abort, depending on the test: yield abort_or_disconnect(allocated[0]) # Write different data with no complaint: (_, allocated) = yield self.storage_client.allocate_buckets( storage_index, renew_secret, cancel_secret, sharenums={0}, allocated_size=1024, canary=Referenceable(), ) yield allocated[0].callRemote("write", 0, b"2" * 1024) @inlineCallbacks def test_written_shares_are_allocated(self): """ Shares that are fully written to show up as allocated in result from ``IStorageServer.allocate_buckets()``. Partially-written or empty shares don't. """ storage_index, renew_secret, cancel_secret = ( new_storage_index(), new_secret(), new_secret(), ) (_, allocated) = yield self.storage_client.allocate_buckets( storage_index, renew_secret, cancel_secret, sharenums=set(range(5)), allocated_size=1024, canary=Referenceable(), ) # Bucket 1 is fully written in one go. yield allocated[1].callRemote("write", 0, b"1" * 1024) yield allocated[1].callRemote("close") # Bucket 2 is fully written in two steps. yield allocated[2].callRemote("write", 0, b"1" * 512) yield allocated[2].callRemote("write", 512, b"2" * 512) yield allocated[2].callRemote("close") # Bucket 0 has partial write. yield allocated[0].callRemote("write", 0, b"1" * 512) (already_got, _) = yield self.storage_client.allocate_buckets( storage_index, renew_secret, cancel_secret, sharenums=set(range(5)), allocated_size=1024, canary=Referenceable(), ) self.assertEqual(already_got, {1, 2}) @inlineCallbacks def test_written_shares_are_readable(self): """ Shares that are fully written to can be read. The result is not affected by the order in which writes happened, only by their offsets. """ storage_index, renew_secret, cancel_secret = ( new_storage_index(), new_secret(), new_secret(), ) (_, allocated) = yield self.storage_client.allocate_buckets( storage_index, renew_secret, cancel_secret, sharenums=set(range(5)), allocated_size=1024, canary=Referenceable(), ) # Bucket 1 is fully written in order yield allocated[1].callRemote("write", 0, b"1" * 512) yield allocated[1].callRemote("write", 512, b"2" * 512) yield allocated[1].callRemote("close") # Bucket 2 is fully written in reverse. yield allocated[2].callRemote("write", 512, b"4" * 512) yield allocated[2].callRemote("write", 0, b"3" * 512) yield allocated[2].callRemote("close") buckets = yield self.storage_client.get_buckets(storage_index) self.assertEqual(set(buckets.keys()), {1, 2}) self.assertEqual( (yield buckets[1].callRemote("read", 0, 1024)), b"1" * 512 + b"2" * 512 ) self.assertEqual( (yield buckets[2].callRemote("read", 0, 1024)), b"3" * 512 + b"4" * 512 ) @inlineCallbacks def test_non_matching_overlapping_writes(self): """ When doing overlapping writes in immutable uploads, non-matching writes fail. """ storage_index, renew_secret, cancel_secret = ( new_storage_index(), new_secret(), new_secret(), ) (_, allocated) = yield self.storage_client.allocate_buckets( storage_index, renew_secret, cancel_secret, sharenums={0}, allocated_size=30, canary=Referenceable(), ) yield allocated[0].callRemote("write", 0, b"1" * 25) # Overlapping write that doesn't match: with self.assertRaises(RemoteException): yield allocated[0].callRemote("write", 20, b"2" * 10) @inlineCallbacks def test_matching_overlapping_writes(self): """ When doing overlapping writes in immutable uploads, matching writes succeed. """ storage_index, renew_secret, cancel_secret = ( new_storage_index(), new_secret(), new_secret(), ) (_, allocated) = yield self.storage_client.allocate_buckets( storage_index, renew_secret, cancel_secret, sharenums={0}, allocated_size=25, canary=Referenceable(), ) yield allocated[0].callRemote("write", 0, b"1" * 10) # Overlapping write that matches: yield allocated[0].callRemote("write", 5, b"1" * 20) yield allocated[0].callRemote("close") buckets = yield self.storage_client.get_buckets(storage_index) self.assertEqual(set(buckets.keys()), {0}) self.assertEqual((yield buckets[0].callRemote("read", 0, 25)), b"1" * 25) def test_abort(self): """ If we call ``abort`` on the ``RIBucketWriter`` to disconnect in the middle of writing to a bucket, all data is wiped, and it's even possible to write different data to the bucket. (In the real world one probably wouldn't do that, but writing different data is a good way to test that the original data really was wiped.) """ return self.abort_or_disconnect_half_way( lambda bucket: bucket.callRemote("abort") ) @inlineCallbacks def test_get_buckets_skips_unfinished_buckets(self): """ Buckets that are not fully written are not returned by ``IStorageServer.get_buckets()`` implementations. """ storage_index = new_storage_index() (_, allocated) = yield self.storage_client.allocate_buckets( storage_index, renew_secret=new_secret(), cancel_secret=new_secret(), sharenums=set(range(5)), allocated_size=10, canary=Referenceable(), ) # Bucket 1 is fully written yield allocated[1].callRemote("write", 0, b"1" * 10) yield allocated[1].callRemote("close") # Bucket 2 is partially written yield allocated[2].callRemote("write", 0, b"1" * 5) buckets = yield self.storage_client.get_buckets(storage_index) self.assertEqual(set(buckets.keys()), {1}) @inlineCallbacks def test_read_bucket_at_offset(self): """ Given a read bucket returned from ``IStorageServer.get_buckets()``, it is possible to read at different offsets and lengths, with reads past the end resulting in empty bytes. """ length = 256 * 17 storage_index = new_storage_index() (_, allocated) = yield self.storage_client.allocate_buckets( storage_index, renew_secret=new_secret(), cancel_secret=new_secret(), sharenums=set(range(1)), allocated_size=length, canary=Referenceable(), ) total_data = _randbytes(256 * 17) yield allocated[0].callRemote("write", 0, total_data) yield allocated[0].callRemote("close") buckets = yield self.storage_client.get_buckets(storage_index) bucket = buckets[0] for start, to_read in [ (0, 250), # fraction (0, length), # whole thing (100, 1024), # offset fraction (length + 1, 100), # completely out of bounds (length - 100, 200), # partially out of bounds ]: data = yield bucket.callRemote("read", start, to_read) self.assertEqual( data, total_data[start : start + to_read], "Didn't match for start {}, length {}".format(start, to_read), ) @inlineCallbacks def create_share(self): """Create a share, return the storage index.""" storage_index = new_storage_index() renew_secret = new_secret() cancel_secret = new_secret() (_, allocated) = yield self.storage_client.allocate_buckets( storage_index, renew_secret=renew_secret, cancel_secret=cancel_secret, sharenums=set(range(1)), allocated_size=10, canary=Referenceable(), ) yield allocated[0].callRemote("write", 0, b"0123456789") yield allocated[0].callRemote("close") returnValue((storage_index, renew_secret, cancel_secret)) @inlineCallbacks def test_bucket_advise_corrupt_share(self): """ Calling ``advise_corrupt_share()`` on a bucket returned by ``IStorageServer.get_buckets()`` does not result in error (other behavior is opaque at this level of abstraction). """ storage_index, _, _ = yield self.create_share() buckets = yield self.storage_client.get_buckets(storage_index) yield buckets[0].callRemote("advise_corrupt_share", b"OH NO") @inlineCallbacks def test_advise_corrupt_share(self): """ Calling ``advise_corrupt_share()`` on an immutable share does not result in error (other behavior is opaque at this level of abstraction). """ storage_index, _, _ = yield self.create_share() yield self.storage_client.advise_corrupt_share( b"immutable", storage_index, 0, b"ono" ) @inlineCallbacks def test_advise_corrupt_share_unknown_share_number(self): """ Calling ``advise_corrupt_share()`` on an immutable share, with an unknown share number, does not result in error. """ storage_index, _, _ = yield self.create_share() yield self.storage_client.advise_corrupt_share( b"immutable", storage_index, 999, b"ono" ) @inlineCallbacks def test_allocate_buckets_creates_lease(self): """ When buckets are created using ``allocate_buckets()``, a lease is created once writing is done. """ storage_index, _, _ = yield self.create_share() [lease] = self.server.get_leases(storage_index) # Lease expires in 31 days. self.assertTrue( lease.get_expiration_time() - self.fake_time() > (31 * 24 * 60 * 60 - 10) ) @inlineCallbacks def test_add_lease_non_existent(self): """ If the storage index doesn't exist, adding the lease silently does nothing. """ storage_index = new_storage_index() self.assertEqual(list(self.server.get_leases(storage_index)), []) renew_secret = new_secret() cancel_secret = new_secret() # Add a lease: yield self.storage_client.add_lease(storage_index, renew_secret, cancel_secret) self.assertEqual(list(self.server.get_leases(storage_index)), []) @inlineCallbacks def test_add_lease_renewal(self): """ If the lease secret is reused, ``add_lease()`` extends the existing lease. """ storage_index, renew_secret, cancel_secret = yield self.create_share() [lease] = self.server.get_leases(storage_index) initial_expiration_time = lease.get_expiration_time() # Time passes: self.fake_sleep(178) # We renew the lease: yield self.storage_client.add_lease(storage_index, renew_secret, cancel_secret) [lease] = self.server.get_leases(storage_index) new_expiration_time = lease.get_expiration_time() self.assertEqual(new_expiration_time - initial_expiration_time, 178) @inlineCallbacks def test_add_new_lease(self): """ If a new lease secret is used, ``add_lease()`` creates a new lease. """ storage_index, _, _ = yield self.create_share() [lease] = self.server.get_leases(storage_index) initial_expiration_time = lease.get_expiration_time() # Time passes: self.fake_sleep(167) # We create a new lease: renew_secret = new_secret() cancel_secret = new_secret() yield self.storage_client.add_lease(storage_index, renew_secret, cancel_secret) [lease1, lease2] = self.server.get_leases(storage_index) self.assertEqual(lease1.get_expiration_time(), initial_expiration_time) self.assertEqual(lease2.get_expiration_time() - initial_expiration_time, 167) class IStorageServerMutableAPIsTestsMixin(object): """ Tests for ``IStorageServer``'s mutable APIs. ``self.storage_client`` is expected to provide ``IStorageServer``. ``self.server`` is expected to be the corresponding ``allmydata.storage.server.StorageServer`` instance. ``STARAW`` is short for ``slot_testv_and_readv_and_writev``. """ def new_secrets(self): """Return a 3-tuple of secrets for STARAW calls.""" return (new_secret(), new_secret(), new_secret()) def staraw(self, *args, **kwargs): """Like ``slot_testv_and_readv_and_writev``, but less typing.""" return self.storage_client.slot_testv_and_readv_and_writev(*args, **kwargs) @inlineCallbacks def test_STARAW_reads_after_write(self): """ When data is written with ``IStorageServer.slot_testv_and_readv_and_writev``, it can then be read by a separate call using that API. """ secrets = self.new_secrets() storage_index = new_storage_index() (written, _) = yield self.staraw( storage_index, secrets, tw_vectors={ 0: ([], [(0, b"abcdefg")], 7), 1: ([], [(0, b"0123"), (4, b"456")], 7), }, r_vector=[], ) self.assertEqual(written, True) (_, reads) = yield self.staraw( storage_index, secrets, tw_vectors={}, # Whole thing, partial, going beyond the edge, completely outside # range: r_vector=[(0, 7), (2, 3), (6, 8), (100, 10)], ) self.assertEqual( reads, {0: [b"abcdefg", b"cde", b"g", b""], 1: [b"0123456", b"234", b"6", b""]}, ) @inlineCallbacks def test_SATRAW_reads_happen_before_writes_in_single_query(self): """ If a ``IStorageServer.slot_testv_and_readv_and_writev`` command contains both reads and writes, the read returns results that precede the write. """ secrets = self.new_secrets() storage_index = new_storage_index() (written, _) = yield self.staraw( storage_index, secrets, tw_vectors={ 0: ([], [(0, b"abcdefg")], 7), }, r_vector=[], ) self.assertEqual(written, True) # Read and write in same command; read happens before write: (written, reads) = yield self.staraw( storage_index, secrets, tw_vectors={ 0: ([], [(0, b"X" * 7)], 7), }, r_vector=[(0, 7)], ) self.assertEqual(written, True) self.assertEqual(reads, {0: [b"abcdefg"]}) # The write is available in next read: (_, reads) = yield self.staraw( storage_index, secrets, tw_vectors={}, r_vector=[(0, 7)], ) self.assertEqual(reads, {0: [b"X" * 7]}) @inlineCallbacks def test_SATRAW_writes_happens_only_if_test_matches(self): """ If a ``IStorageServer.slot_testv_and_readv_and_writev`` includes both a test and a write, the write succeeds if the test matches, and fails if the test does not match. """ secrets = self.new_secrets() storage_index = new_storage_index() (written, _) = yield self.staraw( storage_index, secrets, tw_vectors={ 0: ([], [(0, b"1" * 7)], 7), }, r_vector=[], ) self.assertEqual(written, True) # Test matches, so write happens: (written, _) = yield self.staraw( storage_index, secrets, tw_vectors={ 0: ( [(0, 3, b"1" * 3), (3, 4, b"1" * 4)], [(0, b"2" * 7)], 7, ), }, r_vector=[], ) self.assertEqual(written, True) (_, reads) = yield self.staraw( storage_index, secrets, tw_vectors={}, r_vector=[(0, 7)], ) self.assertEqual(reads, {0: [b"2" * 7]}) # Test does not match, so write does not happen: (written, _) = yield self.staraw( storage_index, secrets, tw_vectors={ 0: ([(0, 7, b"1" * 7)], [(0, b"3" * 7)], 7), }, r_vector=[], ) self.assertEqual(written, False) (_, reads) = yield self.staraw( storage_index, secrets, tw_vectors={}, r_vector=[(0, 7)], ) self.assertEqual(reads, {0: [b"2" * 7]}) @inlineCallbacks def test_SATRAW_tests_past_end_of_data(self): """ If a ``IStorageServer.slot_testv_and_readv_and_writev`` includes a test vector that reads past the end of the data, the result is limited to actual available data. """ secrets = self.new_secrets() storage_index = new_storage_index() # Since there is no data on server, the test vector will return empty # string, which matches expected result, so write will succeed. (written, _) = yield self.staraw( storage_index, secrets, tw_vectors={ 0: ([(0, 10, b"")], [(0, b"1" * 7)], 7), }, r_vector=[], ) self.assertEqual(written, True) # Now the test vector is a 10-read off of a 7-byte value, but expected # value is still 7 bytes, so the write will again succeed. (written, _) = yield self.staraw( storage_index, secrets, tw_vectors={ 0: ([(0, 10, b"1" * 7)], [(0, b"2" * 7)], 7), }, r_vector=[], ) self.assertEqual(written, True) @inlineCallbacks def test_SATRAW_reads_past_end_of_data(self): """ If a ``IStorageServer.slot_testv_and_readv_and_writev`` reads past the end of the data, the result is limited to actual available data. """ secrets = self.new_secrets() storage_index = new_storage_index() # Write some data (written, _) = yield self.staraw( storage_index, secrets, tw_vectors={ 0: ([], [(0, b"12345")], 5), }, r_vector=[], ) self.assertEqual(written, True) # Reads past end. (_, reads) = yield self.staraw( storage_index, secrets, tw_vectors={}, r_vector=[(0, 100), (2, 50)], ) self.assertEqual(reads, {0: [b"12345", b"345"]}) @inlineCallbacks def test_STARAW_write_enabler_must_match(self): """ If the write enabler secret passed to ``IStorageServer.slot_testv_and_readv_and_writev`` doesn't match previous writes, the write fails. """ secrets = self.new_secrets() storage_index = new_storage_index() (written, _) = yield self.staraw( storage_index, secrets, tw_vectors={ 0: ([], [(0, b"1" * 7)], 7), }, r_vector=[], ) self.assertEqual(written, True) # Write enabler secret does not match, so write does not happen: bad_secrets = (new_secret(),) + secrets[1:] with self.assertRaises(RemoteException): yield self.staraw( storage_index, bad_secrets, tw_vectors={ 0: ([], [(0, b"2" * 7)], 7), }, r_vector=[], ) (_, reads) = yield self.staraw( storage_index, secrets, tw_vectors={}, r_vector=[(0, 7)], ) self.assertEqual(reads, {0: [b"1" * 7]}) @inlineCallbacks def test_STARAW_zero_new_length_deletes(self): """ A zero new length passed to ``IStorageServer.slot_testv_and_readv_and_writev`` deletes the share. """ secrets = self.new_secrets() storage_index = new_storage_index() (written, _) = yield self.staraw( storage_index, secrets, tw_vectors={ 0: ([], [(0, b"1" * 7)], 7), }, r_vector=[], ) self.assertEqual(written, True) # Write with new length of 0: (written, _) = yield self.staraw( storage_index, secrets, tw_vectors={ 0: ([], [(0, b"1" * 7)], 0), }, r_vector=[], ) self.assertEqual(written, True) # It's gone! (_, reads) = yield self.staraw( storage_index, secrets, tw_vectors={}, r_vector=[(0, 7)], ) self.assertEqual(reads, {}) @inlineCallbacks def test_slot_readv(self): """ Data written with ``IStorageServer.slot_testv_and_readv_and_writev()`` can be read using ``IStorageServer.slot_readv()``. Reads can't go past the end of the data. """ secrets = self.new_secrets() storage_index = new_storage_index() (written, _) = yield self.staraw( storage_index, secrets, tw_vectors={ 0: ([], [(0, b"abcdefg")], 7), 1: ([], [(0, b"0123"), (4, b"456")], 7), # This will never get read from, just here to show we only read # from shares explicitly requested by slot_readv: 2: ([], [(0, b"XYZW")], 4), }, r_vector=[], ) self.assertEqual(written, True) reads = yield self.storage_client.slot_readv( storage_index, shares=[0, 1], # Whole thing, partial, going beyond the edge, completely outside # range: readv=[(0, 7), (2, 3), (6, 8), (100, 10)], ) self.assertEqual( reads, {0: [b"abcdefg", b"cde", b"g", b""], 1: [b"0123456", b"234", b"6", b""]}, ) @inlineCallbacks def test_slot_readv_no_shares(self): """ With no shares given, ``IStorageServer.slot_readv()`` reads from all shares. """ secrets = self.new_secrets() storage_index = new_storage_index() (written, _) = yield self.staraw( storage_index, secrets, tw_vectors={ 0: ([], [(0, b"abcdefg")], 7), 1: ([], [(0, b"0123456")], 7), 2: ([], [(0, b"9876543")], 7), }, r_vector=[], ) self.assertEqual(written, True) reads = yield self.storage_client.slot_readv( storage_index, shares=[], readv=[(0, 7)], ) self.assertEqual( reads, {0: [b"abcdefg"], 1: [b"0123456"], 2: [b"9876543"]}, ) @inlineCallbacks def test_slot_readv_unknown_storage_index(self): """ With unknown storage index, ``IStorageServer.slot_readv()`` returns empty dict. """ storage_index = new_storage_index() reads = yield self.storage_client.slot_readv( storage_index, shares=[], readv=[(0, 7)], ) self.assertEqual( reads, {}, ) @inlineCallbacks def create_slot(self): """Create a slot with sharenum 0.""" secrets = self.new_secrets() storage_index = new_storage_index() (written, _) = yield self.staraw( storage_index, secrets, tw_vectors={ 0: ([], [(0, b"abcdefg")], 7), }, r_vector=[], ) self.assertEqual(written, True) returnValue((secrets, storage_index)) @inlineCallbacks def test_advise_corrupt_share(self): """ Calling ``advise_corrupt_share()`` on a mutable share does not result in error (other behavior is opaque at this level of abstraction). """ secrets, storage_index = yield self.create_slot() yield self.storage_client.advise_corrupt_share( b"mutable", storage_index, 0, b"ono" ) @inlineCallbacks def test_advise_corrupt_share_unknown_share_number(self): """ Calling ``advise_corrupt_share()`` on a mutable share with an unknown share number does not result in error (other behavior is opaque at this level of abstraction). """ secrets, storage_index = yield self.create_slot() yield self.storage_client.advise_corrupt_share( b"mutable", storage_index, 999, b"ono" ) @inlineCallbacks def test_STARAW_create_lease(self): """ When STARAW creates a new slot, it also creates a lease. """ _, storage_index = yield self.create_slot() [lease] = self.server.get_slot_leases(storage_index) # Lease expires in 31 days. self.assertTrue( lease.get_expiration_time() - self.fake_time() > (31 * 24 * 60 * 60 - 10) ) @inlineCallbacks def test_STARAW_renews_lease(self): """ When STARAW is run on an existing slot with same renewal secret, it renews the lease. """ secrets, storage_index = yield self.create_slot() [lease] = self.server.get_slot_leases(storage_index) initial_expire = lease.get_expiration_time() # Time passes... self.fake_sleep(17) # We do another write: (written, _) = yield self.staraw( storage_index, secrets, tw_vectors={ 0: ([], [(0, b"1234567")], 7), }, r_vector=[], ) self.assertEqual(written, True) # The lease has been renewed: [lease] = self.server.get_slot_leases(storage_index) self.assertEqual(lease.get_expiration_time() - initial_expire, 17) @inlineCallbacks def test_STARAW_new_lease(self): """ When STARAW is run with a new renewal secret on an existing slot, it adds a new lease. """ secrets, storage_index = yield self.create_slot() [lease] = self.server.get_slot_leases(storage_index) initial_expire = lease.get_expiration_time() # Time passes... self.fake_sleep(19) # We do another write: (written, _) = yield self.staraw( storage_index, (secrets[0], new_secret(), new_secret()), tw_vectors={ 0: ([], [(0, b"1234567")], 7), }, r_vector=[], ) self.assertEqual(written, True) # A new lease was added: [lease1, lease2] = self.server.get_slot_leases(storage_index) self.assertEqual(lease1.get_expiration_time(), initial_expire) self.assertEqual(lease2.get_expiration_time() - initial_expire, 19) @inlineCallbacks def test_add_lease_renewal(self): """ If the lease secret is reused, ``add_lease()`` extends the existing lease. """ secrets, storage_index = yield self.create_slot() [lease] = self.server.get_slot_leases(storage_index) initial_expiration_time = lease.get_expiration_time() # Time passes: self.fake_sleep(178) # We renew the lease: yield self.storage_client.add_lease(storage_index, secrets[1], secrets[2]) [lease] = self.server.get_slot_leases(storage_index) new_expiration_time = lease.get_expiration_time() self.assertEqual(new_expiration_time - initial_expiration_time, 178) @inlineCallbacks def test_add_new_lease(self): """ If a new lease secret is used, ``add_lease()`` creates a new lease. """ secrets, storage_index = yield self.create_slot() [lease] = self.server.get_slot_leases(storage_index) initial_expiration_time = lease.get_expiration_time() # Time passes: self.fake_sleep(167) # We create a new lease: renew_secret = new_secret() cancel_secret = new_secret() yield self.storage_client.add_lease(storage_index, renew_secret, cancel_secret) [lease1, lease2] = self.server.get_slot_leases(storage_index) self.assertEqual(lease1.get_expiration_time(), initial_expiration_time) self.assertEqual(lease2.get_expiration_time() - initial_expiration_time, 167) class _SharedMixin(SystemTestMixin): """Base class for Foolscap and HTTP mixins.""" SKIP_TESTS : set[str] = set() def _get_istorage_server(self): native_server = next(iter(self.clients[0].storage_broker.get_known_servers())) client = native_server.get_storage_server() self.assertTrue(IStorageServer.providedBy(client)) return client @inlineCallbacks def setUp(self): if self._testMethodName in self.SKIP_TESTS: raise SkipTest( "Test {} is still not supported".format(self._testMethodName) ) AsyncTestCase.setUp(self) self.basedir = "test_istorageserver/" + self.id() yield SystemTestMixin.setUp(self) yield self.set_up_nodes(1) self.server = None for s in self.clients[0].services: if isinstance(s, StorageServer): self.server = s break assert self.server is not None, "Couldn't find StorageServer" self._clock = Clock() self._clock.advance(123456) self.server._clock = self._clock self.storage_client = self._get_istorage_server() def fake_time(self): """Return the current fake, test-controlled, time.""" return self._clock.seconds() def fake_sleep(self, seconds): """Advance the fake time by the given number of seconds.""" self._clock.advance(seconds) @inlineCallbacks def tearDown(self): AsyncTestCase.tearDown(self) yield SystemTestMixin.tearDown(self) class FoolscapSharedAPIsTests( _SharedMixin, IStorageServerSharedAPIsTestsMixin, AsyncTestCase ): """Foolscap-specific tests for shared ``IStorageServer`` APIs.""" FORCE_FOOLSCAP_FOR_STORAGE = True class HTTPSharedAPIsTests( _SharedMixin, IStorageServerSharedAPIsTestsMixin, AsyncTestCase ): """HTTP-specific tests for shared ``IStorageServer`` APIs.""" FORCE_FOOLSCAP_FOR_STORAGE = False class FoolscapImmutableAPIsTests( _SharedMixin, IStorageServerImmutableAPIsTestsMixin, AsyncTestCase ): """Foolscap-specific tests for immutable ``IStorageServer`` APIs.""" FORCE_FOOLSCAP_FOR_STORAGE = True def test_disconnection(self): """ If we disconnect in the middle of writing to a bucket, all data is wiped, and it's even possible to write different data to the bucket. (In the real world one shouldn't do that, but writing different data is a good way to test that the original data really was wiped.) HTTP protocol doesn't need this test, since disconnection is a meaningless concept; this is more about testing the implicit contract the Foolscap implementation depends on doesn't change as we refactor things. """ return self.abort_or_disconnect_half_way(lambda _: self.disconnect()) @inlineCallbacks def disconnect(self): """ Disconnect and then reconnect with a new ``IStorageServer``. """ current = self.storage_client yield self.bounce_client(0) self.storage_client = self._get_istorage_server() assert self.storage_client is not current class HTTPImmutableAPIsTests( _SharedMixin, IStorageServerImmutableAPIsTestsMixin, AsyncTestCase ): """HTTP-specific tests for immutable ``IStorageServer`` APIs.""" FORCE_FOOLSCAP_FOR_STORAGE = False class FoolscapMutableAPIsTests( _SharedMixin, IStorageServerMutableAPIsTestsMixin, AsyncTestCase ): """Foolscap-specific tests for mutable ``IStorageServer`` APIs.""" FORCE_FOOLSCAP_FOR_STORAGE = True class HTTPMutableAPIsTests( _SharedMixin, IStorageServerMutableAPIsTestsMixin, AsyncTestCase ): """HTTP-specific tests for mutable ``IStorageServer`` APIs.""" FORCE_FOOLSCAP_FOR_STORAGE = False tahoe_lafs-1.20.0/src/allmydata/test/test_json_metadata.py0000644000000000000000000000354113615410400020562 0ustar00""" Ported to Python 3. """ from twisted.trial.unittest import TestCase from allmydata.web.common import get_filenode_metadata, SDMF_VERSION, MDMF_VERSION class MockFileNode(object): def __init__(self, size, mutable_version=None): self.size = size self.mutable_version = mutable_version def get_size(self): return self.size def is_mutable(self): return self.mutable_version is not None def get_version(self): if self.mutable_version is None: raise AttributeError() return self.mutable_version class CommonFixture(object): def test_size_is_0(self): """If get_size doesn't return None the returned metadata must contain "size".""" mockfilenode = MockFileNode(0, mutable_version=self.mutable_version) metadata = get_filenode_metadata(mockfilenode) self.failUnlessEqual(metadata['size'], 0) def test_size_is_1000(self): """1000 is sufficiently large to guarantee the cap is not a literal.""" mockfilenode = MockFileNode(1000, mutable_version=self.mutable_version) metadata = get_filenode_metadata(mockfilenode) self.failUnlessEqual(metadata['size'], 1000) def test_size_is_None(self): """If get_size returns None the returned metadata must not contain "size".""" mockfilenode = MockFileNode(None, mutable_version=self.mutable_version) metadata = get_filenode_metadata(mockfilenode) self.failIfIn('size', metadata) class Test_GetFileNodeMetaData_Immutable(CommonFixture, TestCase): def setUp(self): self.mutable_version = None class Test_GetFileNodeMetaData_SDMF(CommonFixture, TestCase): def setUp(self): self.mutable_version = SDMF_VERSION class Test_GetFileNodeMetaData_MDMF(CommonFixture, TestCase): def setUp(self): self.mutable_version = MDMF_VERSION tahoe_lafs-1.20.0/src/allmydata/test/test_log.py0000644000000000000000000001212513615410400016530 0ustar00""" Tests for allmydata.util.log. Ported to Python 3. """ from twisted.trial import unittest from twisted.python.failure import Failure from foolscap.logging import log from allmydata.util import log as tahoe_log class SampleError(Exception): pass class Log(unittest.TestCase): def setUp(self): self.messages = [] def msg(msg, facility, parent, *args, **kwargs): self.messages.append((msg, facility, parent, args, kwargs)) return "msg{}".format(len(self.messages)) self.patch(log, "msg", msg) def test_err(self): """Logging with log.err() causes tests to fail.""" try: raise SampleError("simple sample") except: f = Failure() tahoe_log.err(format="intentional sample error", failure=f, level=tahoe_log.OPERATIONAL, umid="wO9UoQ") result = self.flushLoggedErrors(SampleError) self.assertEqual(len(result), 1) def test_default_facility(self): """ If facility is passed to PrefixingLogMixin.__init__, it is used as default facility. """ class LoggingObject1(tahoe_log.PrefixingLogMixin): pass obj = LoggingObject1(facility="defaultfac") obj.log("hello") obj.log("world", facility="override") self.assertEqual(self.messages[-2][1], "defaultfac") self.assertEqual(self.messages[-1][1], "override") def test_with_prefix(self): """ If prefix is passed to PrefixingLogMixin.__init__, it is used in message rendering. """ class LoggingObject4(tahoe_log.PrefixingLogMixin): pass obj = LoggingObject4("fac", prefix="pre1") obj.log("hello") obj.log("world") self.assertEqual(self.messages[-2][0], '(pre1): hello') self.assertEqual(self.messages[-1][0], '(pre1): world') def test_with_bytes_prefix(self): """ If bytes prefix is passed to PrefixingLogMixin.__init__, it is used in message rendering. """ class LoggingObject5(tahoe_log.PrefixingLogMixin): pass obj = LoggingObject5("fac", prefix=b"pre1") obj.log("hello") obj.log("world") self.assertEqual(self.messages[-2][0], '(pre1): hello') self.assertEqual(self.messages[-1][0], '(pre1): world') def test_no_prefix(self): """ If no prefix is passed to PrefixingLogMixin.__init__, it is not used in message rendering. """ class LoggingObject2(tahoe_log.PrefixingLogMixin): pass obj = LoggingObject2() obj.log("hello") obj.log("world") self.assertEqual(self.messages[-2][0], ': hello') self.assertEqual(self.messages[-1][0], ': world') def test_numming(self): """ Objects inheriting from PrefixingLogMixin get a unique number from a class-specific counter. """ class LoggingObject3(tahoe_log.PrefixingLogMixin): pass obj = LoggingObject3() obj2 = LoggingObject3() obj.log("hello") obj2.log("world") self.assertEqual(self.messages[-2][0], ': hello') self.assertEqual(self.messages[-1][0], ': world') def test_parent_id(self): """ The parent message id can be passed in, otherwise the first message's id is used as the parent. This logic is pretty bogus, but that's what the code does. """ class LoggingObject1(tahoe_log.PrefixingLogMixin): pass obj = LoggingObject1() result = obj.log("zero") self.assertEqual(result, "msg1") obj.log("one", parent="par1") obj.log("two", parent="par2") obj.log("three") obj.log("four") self.assertEqual([m[2] for m in self.messages], [None, "par1", "par2", "msg1", "msg1"]) def test_grandparent_id(self): """ If grandparent message id is given, it's used as parent id of the first message. """ class LoggingObject1(tahoe_log.PrefixingLogMixin): pass obj = LoggingObject1(grandparentmsgid="grand") result = obj.log("zero") self.assertEqual(result, "msg1") obj.log("one", parent="par1") obj.log("two", parent="par2") obj.log("three") obj.log("four") self.assertEqual([m[2] for m in self.messages], ["grand", "par1", "par2", "msg1", "msg1"]) def test_native_string_keys(self): """Keyword argument keys are all native strings.""" class LoggingObject17(tahoe_log.PrefixingLogMixin): pass obj = LoggingObject17() # Native string by default: obj.log(hello="world") # Will be Unicode on Python 2: obj.log(**{"my": "message"}) for message in self.messages: for k in message[-1].keys(): self.assertIsInstance(k, str) tahoe_lafs-1.20.0/src/allmydata/test/test_monitor.py0000644000000000000000000000225313615410400017437 0ustar00""" Tests for allmydata.monitor. """ from twisted.trial import unittest from allmydata.monitor import Monitor, OperationCancelledError class MonitorTests(unittest.TestCase): """Tests for the Monitor class.""" def test_cancellation(self): """The monitor can be cancelled.""" m = Monitor() self.assertFalse(m.is_cancelled()) m.raise_if_cancelled() m.cancel() self.assertTrue(m.is_cancelled()) with self.assertRaises(OperationCancelledError): m.raise_if_cancelled() def test_status(self): """The monitor can have its status set.""" m = Monitor() self.assertEqual(m.get_status(), None) m.set_status("discombobulated") self.assertEqual(m.get_status(), "discombobulated") def test_finish(self): """The monitor can finish.""" m = Monitor() self.assertFalse(m.is_finished()) d = m.when_done() self.assertNoResult(d) result = m.finish(300) self.assertEqual(result, 300) self.assertEqual(m.get_status(), 300) self.assertTrue(m.is_finished()) d.addBoth(self.assertEqual, 300) return d tahoe_lafs-1.20.0/src/allmydata/test/test_multi_introducers.py0000644000000000000000000001453013615410400021524 0ustar00""" Ported to Python 3. """ from six import ensure_binary import os from twisted.python.filepath import FilePath from twisted.trial import unittest from twisted.internet import defer from allmydata.util import yamlutil from allmydata.client import create_client from allmydata.scripts.create_node import write_node_config INTRODUCERS_CFG_FURLS=['furl1', 'furl2'] INTRODUCERS_CFG_FURLS_COMMENTED="""introducers: 'intro1': {furl: furl1} # 'intro2': {furl: furl4} """ class MultiIntroTests(unittest.TestCase): async def setUp(self): # setup tahoe.cfg and basedir/private/introducers # create a custom tahoe.cfg self.basedir = os.path.dirname(self.mktemp()) c = open(os.path.join(self.basedir, "tahoe.cfg"), "w") config = {'hide-ip':False, 'listen': 'tcp', 'port': None, 'location': None, 'hostname': 'example.net'} await write_node_config(c, config) c.write("[storage]\n") c.write("enabled = false\n") c.close() os.mkdir(os.path.join(self.basedir,"private")) self.yaml_path = FilePath(os.path.join(self.basedir, "private", "introducers.yaml")) @defer.inlineCallbacks def test_introducer_count(self): """ If there are two introducers configured in ``introducers.yaml`` then ``Client`` creates two introducer clients. """ connections = { 'introducers': { u'intro1':{ 'furl': 'furl1' }, u'intro2':{ 'furl': 'furl4' }, }, } self.yaml_path.setContent(ensure_binary(yamlutil.safe_dump(connections))) # get a client and count of introducer_clients myclient = yield create_client(self.basedir) ic_count = len(myclient.introducer_clients) # assertions self.failUnlessEqual(ic_count, len(connections["introducers"])) async def test_read_introducer_furl_from_tahoecfg(self): """ The deprecated [client]introducer.furl item is still read and respected. """ # create a custom tahoe.cfg c = open(os.path.join(self.basedir, "tahoe.cfg"), "w") config = {'hide-ip':False, 'listen': 'tcp', 'port': None, 'location': None, 'hostname': 'example.net'} await write_node_config(c, config) fake_furl = "furl1" c.write("[client]\n") c.write("introducer.furl = %s\n" % fake_furl) c.write("[storage]\n") c.write("enabled = false\n") c.close() # get a client and first introducer_furl myclient = yield create_client(self.basedir) tahoe_cfg_furl = myclient.introducer_clients[0].introducer_furl # assertions self.failUnlessEqual(fake_furl, str(tahoe_cfg_furl, "utf-8")) self.assertEqual( list( warning["message"] for warning in self.flushWarnings() if warning["category"] is DeprecationWarning ), ["tahoe.cfg [client]introducer.furl is deprecated; " "use private/introducers.yaml instead."], ) @defer.inlineCallbacks def test_reject_default_in_yaml(self): """ If an introducer is configured in tahoe.cfg with the deprecated [client]introducer.furl then a "default" introducer in introducers.yaml is rejected. """ connections = { 'introducers': { u'default': { 'furl': 'furl1' }, }, } self.yaml_path.setContent(ensure_binary(yamlutil.safe_dump(connections))) FilePath(self.basedir).child("tahoe.cfg").setContent( b"[client]\n" b"introducer.furl = furl1\n" ) with self.assertRaises(ValueError) as ctx: yield create_client(self.basedir) self.assertEquals( str(ctx.exception), "'default' introducer furl cannot be specified in tahoe.cfg and introducers.yaml; " "please fix impossible configuration.", ) SIMPLE_YAML = b""" introducers: one: furl: furl1 """ # this format was recommended in docs/configuration.rst in 1.12.0, but it # isn't correct (the "furl = furl1" line is recorded as the string value of # the ["one"] key, instead of being parsed as a single-key dictionary). EQUALS_YAML = b""" introducers: one: furl = furl1 """ class NoDefault(unittest.TestCase): async def setUp(self): # setup tahoe.cfg and basedir/private/introducers # create a custom tahoe.cfg self.basedir = os.path.dirname(self.mktemp()) c = open(os.path.join(self.basedir, "tahoe.cfg"), "w") config = {'hide-ip':False, 'listen': 'tcp', 'port': None, 'location': None, 'hostname': 'example.net'} await write_node_config(c, config) c.write("[storage]\n") c.write("enabled = false\n") c.close() os.mkdir(os.path.join(self.basedir,"private")) self.yaml_path = FilePath(os.path.join(self.basedir, "private", "introducers.yaml")) @defer.inlineCallbacks def test_ok(self): connections = {'introducers': { u'one': { 'furl': 'furl1' }, }} self.yaml_path.setContent(ensure_binary(yamlutil.safe_dump(connections))) myclient = yield create_client(self.basedir) tahoe_cfg_furl = myclient.introducer_clients[0].introducer_furl self.assertEquals(tahoe_cfg_furl, b'furl1') @defer.inlineCallbacks def test_real_yaml(self): self.yaml_path.setContent(SIMPLE_YAML) myclient = yield create_client(self.basedir) tahoe_cfg_furl = myclient.introducer_clients[0].introducer_furl self.assertEquals(tahoe_cfg_furl, b'furl1') @defer.inlineCallbacks def test_invalid_equals_yaml(self): self.yaml_path.setContent(EQUALS_YAML) with self.assertRaises(TypeError) as ctx: yield create_client(self.basedir) self.assertIsInstance( ctx.exception, TypeError, ) @defer.inlineCallbacks def test_introducerless(self): connections = {'introducers': {} } self.yaml_path.setContent(ensure_binary(yamlutil.safe_dump(connections))) myclient = yield create_client(self.basedir) self.assertEquals(len(myclient.introducer_clients), 0) tahoe_lafs-1.20.0/src/allmydata/test/test_netstring.py0000644000000000000000000000431413615410400017765 0ustar00""" Tests for allmydata.util.netstring. Ported to Python 3. """ from twisted.trial import unittest from allmydata.util.netstring import netstring, split_netstring class Netstring(unittest.TestCase): def test_encode(self): """netstring() correctly encodes the given bytes.""" result = netstring(b"abc") self.assertEqual(result, b"3:abc,") self.assertIsInstance(result, bytes) def test_split(self): a = netstring(b"hello") + netstring(b"world") for s in split_netstring(a, 2)[0]: self.assertIsInstance(s, bytes) self.failUnlessEqual(split_netstring(a, 2), ([b"hello", b"world"], len(a))) self.failUnlessEqual(split_netstring(a, 2, required_trailer=b""), ([b"hello", b"world"], len(a))) self.failUnlessRaises(ValueError, split_netstring, a, 3) self.failUnlessRaises(ValueError, split_netstring, a+b" extra", 2, required_trailer=b"") self.failUnlessEqual(split_netstring(a+b" extra", 2), ([b"hello", b"world"], len(a))) self.failUnlessEqual(split_netstring(a+b"++", 2, required_trailer=b"++"), ([b"hello", b"world"], len(a)+2)) self.failUnlessRaises(ValueError, split_netstring, a+b"+", 2, required_trailer=b"not") def test_extra(self): a = netstring(b"hello") self.failUnlessEqual(split_netstring(a, 1), ([b"hello"], len(a))) b = netstring(b"hello") + b"extra stuff" self.failUnlessEqual(split_netstring(b, 1), ([b"hello"], len(a))) def test_nested(self): a = netstring(b"hello") + netstring(b"world") + b"extra stuff" b = netstring(b"a") + netstring(b"is") + netstring(a) + netstring(b".") (top, pos) = split_netstring(b, 4) self.failUnlessEqual(len(top), 4) self.failUnlessEqual(top[0], b"a") self.failUnlessEqual(top[1], b"is") self.failUnlessEqual(top[2], a) self.failUnlessEqual(top[3], b".") self.failUnlessRaises(ValueError, split_netstring, a, 2, required_trailer=b"") bottom = split_netstring(a, 2) self.failUnlessEqual(bottom, ([b"hello", b"world"], len(netstring(b"hello")+netstring(b"world")))) tahoe_lafs-1.20.0/src/allmydata/test/test_no_network.py0000644000000000000000000000310413615410400020131 0ustar00""" Test the NoNetworkGrid test harness. Ported to Python 3. """ from twisted.trial import unittest from twisted.application import service from allmydata.test.no_network import NoNetworkGrid from allmydata.immutable.upload import Data from allmydata.util.consumer import download_to_data from .common import ( SameProcessStreamEndpointAssigner, ) class Harness(unittest.TestCase): def setUp(self): self.s = service.MultiService() self.s.startService() self.addCleanup(self.s.stopService) self.port_assigner = SameProcessStreamEndpointAssigner() self.port_assigner.setUp() self.addCleanup(self.port_assigner.tearDown) def grid(self, basedir): return NoNetworkGrid( basedir, num_clients=1, num_servers=10, client_config_hooks={}, port_assigner=self.port_assigner, ) def test_create(self): basedir = "no_network/Harness/create" g = self.grid(basedir) g.startService() return g.stopService() def test_upload(self): basedir = "no_network/Harness/upload" g = self.grid(basedir) g.setServiceParent(self.s) c0 = g.clients[0] DATA = b"Data to upload" * 100 data = Data(DATA, b"") d = c0.upload(data) def _uploaded(res): n = c0.create_node_from_uri(res.get_uri()) return download_to_data(n) d.addCallback(_uploaded) def _check(res): self.failUnlessEqual(res, DATA) d.addCallback(_check) return d tahoe_lafs-1.20.0/src/allmydata/test/test_node.py0000644000000000000000000010267213615410400016703 0ustar00from __future__ import annotations import base64 import os import stat import sys import time from textwrap import dedent import configparser from hypothesis import ( given, ) from hypothesis.strategies import ( integers, sets, ) from unittest import skipIf from twisted.python.filepath import ( FilePath, ) from twisted.python.runtime import platform from twisted.trial import unittest from twisted.internet import defer import foolscap.logging.log from twisted.application import service from allmydata.node import ( PortAssignmentRequired, PrivacyError, tub_listen_on, create_tub_options, create_main_tub, create_node_dir, create_default_connection_handlers, create_connection_handlers, config_from_string, read_config, MissingConfigEntry, _tub_portlocation, formatTimeTahoeStyle, UnescapedHashError, ) from allmydata.introducer.server import create_introducer from allmydata import client from allmydata.util import fileutil, iputil from allmydata.util.namespace import Namespace from allmydata.util.configutil import ( ValidConfiguration, UnknownConfigError, ) from allmydata.util.i2p_provider import create as create_i2p_provider from allmydata.util.tor_provider import create as create_tor_provider import allmydata.test.common_util as testutil from .common import ( ConstantAddresses, SameProcessStreamEndpointAssigner, UseNode, superuser, ) def port_numbers(): return integers(min_value=1, max_value=2 ** 16 - 1) class LoggingMultiService(service.MultiService): def log(self, msg, **kw): pass # see https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2946 def testing_tub(reactor, config_data=''): """ Creates a 'main' Tub for testing purposes, from config data """ basedir = 'dummy_basedir' config = config_from_string(basedir, 'DEFAULT_PORTNUMFILE_BLANK', config_data) fileutil.make_dirs(os.path.join(basedir, 'private')) i2p_provider = create_i2p_provider(reactor, config) tor_provider = create_tor_provider(reactor, config) handlers = create_connection_handlers(config, i2p_provider, tor_provider) default_connection_handlers, foolscap_connection_handlers = handlers tub_options = create_tub_options(config) main_tub = create_main_tub( config, tub_options, default_connection_handlers, foolscap_connection_handlers, i2p_provider, tor_provider, cert_filename='DEFAULT_CERTFILE_BLANK' ) return main_tub class TestCase(testutil.SignalMixin, unittest.TestCase): def setUp(self): testutil.SignalMixin.setUp(self) self.parent = LoggingMultiService() # We can use a made-up port number because these tests never actually # try to bind the port. We'll use a low-numbered one that's likely to # conflict with another service to prove it. self._available_port = 22 self.port_assigner = SameProcessStreamEndpointAssigner() self.port_assigner.setUp() self.addCleanup(self.port_assigner.tearDown) def _test_location( self, expected_addresses, tub_port=None, tub_location=None, local_addresses=None, ): """ Verify that a Tub configured with the given *tub.port* and *tub.location* values generates fURLs with the given addresses in its location hints. :param [str] expected_addresses: The addresses which must appear in the generated fURL for the test to pass. All addresses must appear. :param tub_port: If not ``None`` then a value for the *tub.port* configuration item. :param tub_location: If not ``None`` then a value for the *tub.port* configuration item. :param local_addresses: If not ``None`` then a list of addresses to supply to the system under test as local addresses. """ from twisted.internet import reactor basedir = self.mktemp() create_node_dir(basedir, "testing") if tub_port is None: # Always configure a usable tub.port address instead of relying on # the automatic port assignment. The automatic port assignment is # prone to collisions and spurious test failures. _, tub_port = self.port_assigner.assign(reactor) config_data = "[node]\n" config_data += "tub.port = {}\n".format(tub_port) # If they wanted a certain location, go for it. This probably won't # agree with the tub.port value we set but that only matters if # anything tries to use this to establish a connection ... which # nothing in this test suite will. if tub_location is not None: config_data += "tub.location = {}\n".format(tub_location) if local_addresses is not None: self.patch(iputil, 'get_local_addresses_sync', lambda: local_addresses) tub = testing_tub(reactor, config_data) class Foo(object): pass furl = tub.registerReference(Foo()) for address in expected_addresses: self.assertIn(address, furl) def test_location1(self): return self._test_location(expected_addresses=["192.0.2.0:1234"], tub_location="192.0.2.0:1234") def test_location2(self): return self._test_location(expected_addresses=["192.0.2.0:1234", "example.org:8091"], tub_location="192.0.2.0:1234,example.org:8091") def test_location_not_set(self): """Checks the autogenerated furl when tub.location is not set.""" return self._test_location( expected_addresses=[ "127.0.0.1:{}".format(self._available_port), "192.0.2.0:{}".format(self._available_port), ], tub_port=self._available_port, local_addresses=["127.0.0.1", "192.0.2.0"], ) def test_location_auto_and_explicit(self): """Checks the autogenerated furl when tub.location contains 'AUTO'.""" return self._test_location( expected_addresses=[ "127.0.0.1:{}".format(self._available_port), "192.0.2.0:{}".format(self._available_port), "example.com:4321", ], tub_port=self._available_port, tub_location="AUTO,example.com:{}".format(self._available_port), local_addresses=["127.0.0.1", "192.0.2.0", "example.com:4321"], ) def test_tahoe_cfg_utf8(self): basedir = "test_node/test_tahoe_cfg_utf8" fileutil.make_dirs(basedir) f = open(os.path.join(basedir, 'tahoe.cfg'), 'wb') f.write(u"\uFEFF[node]\n".encode('utf-8')) f.write(u"nickname = \u2621\n".encode('utf-8')) f.close() config = read_config(basedir, "") self.failUnlessEqual(config.get_config("node", "nickname"), u"\u2621") def test_tahoe_cfg_hash_in_name(self): basedir = "test_node/test_cfg_hash_in_name" nickname = "Hash#Bang!" # a clever nickname containing a hash fileutil.make_dirs(basedir) f = open(os.path.join(basedir, 'tahoe.cfg'), 'wt') f.write("[node]\n") f.write("nickname = %s\n" % (nickname,)) f.close() config = read_config(basedir, "") self.failUnless(config.nickname == nickname) def test_hash_in_furl(self): """ Hashes in furl options are not allowed, resulting in exception. """ basedir = self.mktemp() fileutil.make_dirs(basedir) with open(os.path.join(basedir, 'tahoe.cfg'), 'wt') as f: f.write("[node]\n") f.write("log_gatherer.furl = lalal#onohash\n") config = read_config(basedir, "") with self.assertRaises(UnescapedHashError): config.get_config("node", "log_gatherer.furl") def test_missing_config_item(self): """ If a config item is missing: 1. Given a default, return default. 2. Otherwise, raise MissingConfigEntry. """ basedir = self.mktemp() fileutil.make_dirs(basedir) with open(os.path.join(basedir, 'tahoe.cfg'), 'wt') as f: f.write("[node]\n") config = read_config(basedir, "") self.assertEquals(config.get_config("node", "log_gatherer.furl", "def"), "def") with self.assertRaises(MissingConfigEntry): config.get_config("node", "log_gatherer.furl") def test_missing_config_section(self): """ Enumerating a missing section returns empty dict """ basedir = self.mktemp() fileutil.make_dirs(basedir) with open(os.path.join(basedir, 'tahoe.cfg'), 'w'): pass config = read_config(basedir, "") self.assertEquals( config.enumerate_section("not-a-section"), {} ) def test_config_required(self): """ Asking for missing (but required) configuration is an error """ basedir = u"test_node/test_config_required" config = read_config(basedir, "portnum") with self.assertRaises(Exception): config.get_config_from_file("it_does_not_exist", required=True) def test_config_items(self): """ All items in a config section can be retrieved. """ basedir = u"test_node/test_config_items" create_node_dir(basedir, "testing") with open(os.path.join(basedir, 'tahoe.cfg'), 'wt') as f: f.write(dedent( """ [node] nickname = foo timeout.disconnect = 12 """ )) config = read_config(basedir, "portnum") self.assertEqual( config.items("node"), [("nickname", "foo"), ("timeout.disconnect", "12"), ], ) self.assertEqual( config.items("node", [("unnecessary", "default")]), [("nickname", "foo"), ("timeout.disconnect", "12"), ], ) def test_config_items_missing_section(self): """ If a default is given for a missing section, the default is used. Lacking both default and section, an error is raised. """ basedir = self.mktemp() create_node_dir(basedir, "testing") with open(os.path.join(basedir, 'tahoe.cfg'), 'wt') as f: f.write("") config = read_config(basedir, "portnum") with self.assertRaises(configparser.NoSectionError): config.items("nosuch") default = [("hello", "world")] self.assertEqual(config.items("nosuch", default), default) @skipIf(platform.isWindows(), "We don't know how to set permissions on Windows.") @skipIf(superuser, "cannot test as superuser with all permissions") def test_private_config_unreadable(self): """ Asking for inaccessible private config is an error """ basedir = u"test_node/test_private_config_unreadable" create_node_dir(basedir, "testing") config = read_config(basedir, "portnum") config.get_or_create_private_config("foo", "contents") fname = os.path.join(basedir, "private", "foo") os.chmod(fname, 0) with self.assertRaises(Exception): config.get_or_create_private_config("foo") @skipIf(platform.isWindows(), "We don't know how to set permissions on Windows.") @skipIf(superuser, "cannot test as superuser with all permissions") def test_private_config_unreadable_preexisting(self): """ error if reading private config data fails """ basedir = u"test_node/test_private_config_unreadable_preexisting" create_node_dir(basedir, "testing") config = read_config(basedir, "portnum") fname = os.path.join(basedir, "private", "foo") with open(fname, "w") as f: f.write("stuff") os.chmod(fname, 0) with self.assertRaises(Exception): config.get_private_config("foo") def test_private_config_missing(self): """ a missing config with no default is an error """ basedir = u"test_node/test_private_config_missing" create_node_dir(basedir, "testing") config = read_config(basedir, "portnum") with self.assertRaises(MissingConfigEntry): config.get_or_create_private_config("foo") def test_private_config(self): basedir = u"test_node/test_private_config" privdir = os.path.join(basedir, "private") fileutil.make_dirs(privdir) f = open(os.path.join(privdir, 'already'), 'wt') f.write("secret") f.close() basedir = fileutil.abspath_expanduser_unicode(basedir) config = config_from_string(basedir, "", "") self.assertEqual(config.get_private_config("already"), "secret") self.assertEqual(config.get_private_config("not", "default"), "default") self.assertRaises(MissingConfigEntry, config.get_private_config, "not") value = config.get_or_create_private_config("new", "start") self.assertEqual(value, "start") self.assertEqual(config.get_private_config("new"), "start") counter = [] def make_newer(): counter.append("called") return "newer" value = config.get_or_create_private_config("newer", make_newer) self.assertEqual(len(counter), 1) self.assertEqual(value, "newer") self.assertEqual(config.get_private_config("newer"), "newer") value = config.get_or_create_private_config("newer", make_newer) self.assertEqual(len(counter), 1) # don't call unless necessary self.assertEqual(value, "newer") @skipIf(superuser, "cannot test as superuser with all permissions") def test_write_config_unwritable_file(self): """ Existing behavior merely logs any errors upon writing configuration files; this bad behavior should probably be fixed to do something better (like fail entirely). See #2905 """ basedir = "test_node/configdir" fileutil.make_dirs(basedir) config = config_from_string(basedir, "", "") with open(os.path.join(basedir, "bad"), "w") as f: f.write("bad") os.chmod(os.path.join(basedir, "bad"), 0o000) config.write_config_file("bad", "some value") errs = self.flushLoggedErrors(IOError) self.assertEqual(1, len(errs)) def test_timestamp(self): # this modified logger doesn't seem to get used during the tests, # probably because we don't modify the LogObserver that trial # installs (only the one that twistd installs). So manually exercise # it a little bit. t = formatTimeTahoeStyle("ignored", time.time()) self.failUnless("Z" in t) t2 = formatTimeTahoeStyle("ignored", int(time.time())) self.failUnless("Z" in t2) def test_secrets_dir(self): basedir = "test_node/test_secrets_dir" create_node_dir(basedir, "testing") self.failUnless(os.path.exists(os.path.join(basedir, "private"))) def test_secrets_dir_protected(self): if "win32" in sys.platform.lower() or "cygwin" in sys.platform.lower(): # We don't know how to test that unprivileged users can't read this # thing. (Also we don't know exactly how to set the permissions so # that unprivileged users can't read this thing.) raise unittest.SkipTest("We don't know how to set permissions on Windows.") basedir = "test_node/test_secrets_dir_protected" create_node_dir(basedir, "nothing to see here") # make sure private dir was created with correct modes privdir = os.path.join(basedir, "private") st = os.stat(privdir) bits = stat.S_IMODE(st[stat.ST_MODE]) self.failUnless(bits & 0o001 == 0, bits) @defer.inlineCallbacks def test_logdir_is_str(self): from twisted.internet import reactor basedir = FilePath(self.mktemp()) fixture = UseNode(None, None, basedir, "pb://introducer/furl", {}, reactor=reactor) fixture.setUp() self.addCleanup(fixture.cleanUp) ns = Namespace() ns.called = False def call_setLogDir(logdir): ns.called = True self.failUnless(isinstance(logdir, str), logdir) self.patch(foolscap.logging.log, 'setLogDir', call_setLogDir) yield fixture.create_node() self.failUnless(ns.called) def test_set_config_unescaped_furl_hash(self): """ ``_Config.set_config`` raises ``UnescapedHashError`` if the item being set is a furl and the value includes ``"#"`` and does not set the value. """ basedir = self.mktemp() new_config = config_from_string(basedir, "", "") with self.assertRaises(UnescapedHashError): new_config.set_config("foo", "bar.furl", "value#1") with self.assertRaises(MissingConfigEntry): new_config.get_config("foo", "bar.furl") def test_set_config_new_section(self): """ ``_Config.set_config`` can be called with the name of a section that does not already exist to create that section and set an item in it. """ basedir = self.mktemp() new_config = config_from_string(basedir, "", "", ValidConfiguration.everything()) new_config.set_config("foo", "bar", "value1") self.assertEqual( new_config.get_config("foo", "bar"), "value1" ) def test_set_config_replace(self): """ ``_Config.set_config`` can be called with a section and item that already exists to change an existing value to a new one. """ basedir = self.mktemp() new_config = config_from_string(basedir, "", "", ValidConfiguration.everything()) new_config.set_config("foo", "bar", "value1") new_config.set_config("foo", "bar", "value2") self.assertEqual( new_config.get_config("foo", "bar"), "value2" ) def test_set_config_write(self): """ ``_Config.set_config`` persists the configuration change so it can be re-loaded later. """ # Let our nonsense config through valid_config = ValidConfiguration.everything() basedir = FilePath(self.mktemp()) basedir.makedirs() cfg = basedir.child(b"tahoe.cfg") cfg.setContent(b"") new_config = read_config(basedir.path, "", [], valid_config) new_config.set_config("foo", "bar", "value1") loaded_config = read_config(basedir.path, "", [], valid_config) self.assertEqual( loaded_config.get_config("foo", "bar"), "value1", ) def test_set_config_rejects_invalid_config(self): """ ``_Config.set_config`` raises ``UnknownConfigError`` if the section or item is not recognized by the validation object and does not set the value. """ # Make everything invalid. valid_config = ValidConfiguration.nothing() new_config = config_from_string(self.mktemp(), "", "", valid_config) with self.assertRaises(UnknownConfigError): new_config.set_config("foo", "bar", "baz") with self.assertRaises(MissingConfigEntry): new_config.get_config("foo", "bar") def _stub_get_local_addresses_sync(): """ A function like ``allmydata.util.iputil.get_local_addresses_sync``. """ return ["LOCAL"] def _stub_allocate_tcp_port(): """ A function like ``allmydata.util.iputil.allocate_tcp_port``. """ return 999 def _stub_none(): """ A function like ``_stub_allocate_tcp`` or ``_stub_get_local_addresses_sync`` but that return an empty list since ``allmydata.node._tub_portlocation`` requires a callable for paramter 1 and 2 counting from 0. """ return [] class TestMissingPorts(unittest.TestCase): """ Test certain ``_tub_portlocation`` error cases for ports setup. """ def setUp(self): self.basedir = self.mktemp() create_node_dir(self.basedir, "testing") def test_listen_on_zero(self): """ ``_tub_portlocation`` raises ``PortAssignmentRequired`` called with a listen address including port 0 and no interface. """ config_data = ( "[node]\n" "tub.port = tcp:0\n" ) config = config_from_string(self.basedir, "portnum", config_data) with self.assertRaises(PortAssignmentRequired): _tub_portlocation(config, _stub_none, _stub_none) def test_listen_on_zero_with_host(self): """ ``_tub_portlocation`` raises ``PortAssignmentRequired`` called with a listen address including port 0 and an interface. """ config_data = ( "[node]\n" "tub.port = tcp:0:interface=127.0.0.1\n" ) config = config_from_string(self.basedir, "portnum", config_data) with self.assertRaises(PortAssignmentRequired): _tub_portlocation(config, _stub_none, _stub_none) def test_parsing_tcp(self): """ When ``tub.port`` is given and ``tub.location`` is **AUTO** the port number from ``tub.port`` is used as the port number for the value constructed for ``tub.location``. """ config_data = ( "[node]\n" "tub.port = tcp:777\n" "tub.location = AUTO\n" ) config = config_from_string(self.basedir, "portnum", config_data) tubport, tublocation = _tub_portlocation( config, _stub_get_local_addresses_sync, _stub_allocate_tcp_port, ) self.assertEqual(tubport, "tcp:777") self.assertEqual(tublocation, b"tcp:LOCAL:777") def test_parsing_defaults(self): """ parse empty config, check defaults """ config_data = ( "[node]\n" ) config = config_from_string(self.basedir, "portnum", config_data) tubport, tublocation = _tub_portlocation( config, _stub_get_local_addresses_sync, _stub_allocate_tcp_port, ) self.assertEqual(tubport, "tcp:999") self.assertEqual(tublocation, b"tcp:LOCAL:999") def test_parsing_location_complex(self): """ location with two options (including defaults) """ config_data = ( "[node]\n" "tub.location = tcp:HOST:888,AUTO\n" ) config = config_from_string(self.basedir, "portnum", config_data) tubport, tublocation = _tub_portlocation( config, _stub_get_local_addresses_sync, _stub_allocate_tcp_port, ) self.assertEqual(tubport, "tcp:999") self.assertEqual(tublocation, b"tcp:HOST:888,tcp:LOCAL:999") def test_parsing_all_disabled(self): """ parse config with both port + location disabled """ config_data = ( "[node]\n" "tub.port = disabled\n" "tub.location = disabled\n" ) config = config_from_string(self.basedir, "portnum", config_data) res = _tub_portlocation( config, _stub_get_local_addresses_sync, _stub_allocate_tcp_port, ) self.assertTrue(res is None) def test_empty_tub_port(self): """ port povided, but empty is an error """ config_data = ( "[node]\n" "tub.port = \n" ) config = config_from_string(self.basedir, "portnum", config_data) with self.assertRaises(ValueError) as ctx: _tub_portlocation( config, _stub_get_local_addresses_sync, _stub_allocate_tcp_port, ) self.assertIn( "tub.port must not be empty", str(ctx.exception) ) def test_empty_tub_location(self): """ location povided, but empty is an error """ config_data = ( "[node]\n" "tub.location = \n" ) config = config_from_string(self.basedir, "portnum", config_data) with self.assertRaises(ValueError) as ctx: _tub_portlocation( config, _stub_get_local_addresses_sync, _stub_allocate_tcp_port, ) self.assertIn( "tub.location must not be empty", str(ctx.exception) ) def test_disabled_port_not_tub(self): """ error to disable port but not location """ config_data = ( "[node]\n" "tub.port = disabled\n" "tub.location = not_disabled\n" ) config = config_from_string(self.basedir, "portnum", config_data) with self.assertRaises(ValueError) as ctx: _tub_portlocation( config, _stub_get_local_addresses_sync, _stub_allocate_tcp_port, ) self.assertIn( "tub.port is disabled, but not tub.location", str(ctx.exception) ) def test_disabled_tub_not_port(self): """ error to disable location but not port """ config_data = ( "[node]\n" "tub.port = not_disabled\n" "tub.location = disabled\n" ) config = config_from_string(self.basedir, "portnum", config_data) with self.assertRaises(ValueError) as ctx: _tub_portlocation( config, _stub_get_local_addresses_sync, _stub_allocate_tcp_port, ) self.assertIn( "tub.location is disabled, but not tub.port", str(ctx.exception) ) def test_tub_location_tcp(self): """ If ``reveal-IP-address`` is set to false and ``tub.location`` includes a **tcp** hint then ``_tub_portlocation`` raises `PrivacyError`` because TCP leaks IP addresses. """ config = config_from_string( "fake.port", "no-basedir", "[node]\nreveal-IP-address = false\ntub.location=tcp:hostname:1234\n", ) with self.assertRaises(PrivacyError) as ctx: _tub_portlocation( config, _stub_get_local_addresses_sync, _stub_allocate_tcp_port, ) self.assertEqual( str(ctx.exception), "tub.location includes tcp: hint", ) def test_tub_location_legacy_tcp(self): """ If ``reveal-IP-address`` is set to false and ``tub.location`` includes a "legacy" hint with no explicit type (which means it is a **tcp** hint) then the behavior is the same as for an explicit **tcp** hint. """ config = config_from_string( "fake.port", "no-basedir", "[node]\nreveal-IP-address = false\ntub.location=hostname:1234\n", ) with self.assertRaises(PrivacyError) as ctx: _tub_portlocation( config, _stub_get_local_addresses_sync, _stub_allocate_tcp_port, ) self.assertEqual( str(ctx.exception), "tub.location includes tcp: hint", ) BASE_CONFIG = """ [tor] enabled = false [i2p] enabled = false """ NOLISTEN = """ [node] tub.port = disabled tub.location = disabled """ DISABLE_STORAGE = """ [storage] enabled = false """ ENABLE_STORAGE = """ [storage] enabled = true """ ENABLE_HELPER = """ [helper] enabled = true """ class FakeTub(object): def __init__(self): self.tubID = base64.b32encode(b"foo") self.listening_ports = [] def setOption(self, name, value): pass def removeAllConnectionHintHandlers(self): pass def addConnectionHintHandler(self, hint_type, handler): pass def listenOn(self, what): self.listening_ports.append(what) def setLocation(self, location): pass def setServiceParent(self, parent): pass class Listeners(unittest.TestCase): # Randomly allocate a couple distinct port numbers to try out. The test # never actually binds these port numbers so we don't care if they're "in # use" on the system or not. We just want a couple distinct values we can # check expected results against. @given(ports=sets(elements=port_numbers(), min_size=2, max_size=2)) def test_multiple_ports(self, ports): """ When there are multiple listen addresses suggested by the ``tub.port`` and ``tub.location`` configuration, the node's *main* port listens on all of them. """ port1, port2 = iter(ports) port = ("tcp:%d:interface=127.0.0.1,tcp:%d:interface=127.0.0.1" % (port1, port2)) location = "tcp:localhost:%d,tcp:localhost:%d" % (port1, port2) t = FakeTub() tub_listen_on(None, None, t, port, location) self.assertEqual(t.listening_ports, ["tcp:%d:interface=127.0.0.1" % port1, "tcp:%d:interface=127.0.0.1" % port2]) def test_tor_i2p_listeners(self): """ When configured to listen on an "i2p" or "tor" address, ``tub_listen_on`` tells the Tub to listen on endpoints supplied by the given Tor and I2P providers. """ t = FakeTub() i2p_listener = object() i2p_provider = ConstantAddresses(i2p_listener) tor_listener = object() tor_provider = ConstantAddresses(tor_listener) tub_listen_on( i2p_provider, tor_provider, t, "listen:i2p,listen:tor", "tcp:example.org:1234", ) self.assertEqual( t.listening_ports, [i2p_listener, tor_listener], ) class ClientNotListening(unittest.TestCase): @defer.inlineCallbacks def test_disabled(self): basedir = "test_node/test_disabled" create_node_dir(basedir, "testing") f = open(os.path.join(basedir, 'tahoe.cfg'), 'wt') f.write(BASE_CONFIG) f.write(NOLISTEN) f.write(DISABLE_STORAGE) f.close() n = yield client.create_client(basedir) self.assertEqual(n.tub.getListeners(), []) @defer.inlineCallbacks def test_disabled_but_storage(self): basedir = "test_node/test_disabled_but_storage" create_node_dir(basedir, "testing") f = open(os.path.join(basedir, 'tahoe.cfg'), 'wt') f.write(BASE_CONFIG) f.write(NOLISTEN) f.write(ENABLE_STORAGE) f.close() with self.assertRaises(ValueError) as ctx: yield client.create_client(basedir) self.assertIn( "storage is enabled, but tub is not listening", str(ctx.exception), ) @defer.inlineCallbacks def test_disabled_but_helper(self): basedir = "test_node/test_disabled_but_helper" create_node_dir(basedir, "testing") f = open(os.path.join(basedir, 'tahoe.cfg'), 'wt') f.write(BASE_CONFIG) f.write(NOLISTEN) f.write(DISABLE_STORAGE) f.write(ENABLE_HELPER) f.close() with self.assertRaises(ValueError) as ctx: yield client.create_client(basedir) self.assertIn( "helper is enabled, but tub is not listening", str(ctx.exception), ) class IntroducerNotListening(unittest.TestCase): @defer.inlineCallbacks def test_port_none_introducer(self): basedir = "test_node/test_port_none_introducer" create_node_dir(basedir, "testing") with open(os.path.join(basedir, 'tahoe.cfg'), 'wt') as f: f.write("[node]\n") f.write("tub.port = disabled\n") f.write("tub.location = disabled\n") with self.assertRaises(ValueError) as ctx: yield create_introducer(basedir) self.assertIn( "we are Introducer, but tub is not listening", str(ctx.exception), ) class Configuration(unittest.TestCase): def setUp(self): self.basedir = self.mktemp() fileutil.make_dirs(self.basedir) def test_read_invalid_config(self): with open(os.path.join(self.basedir, 'tahoe.cfg'), 'w') as f: f.write( '[invalid section]\n' 'foo = bar\n' ) with self.assertRaises(UnknownConfigError) as ctx: read_config( self.basedir, "client.port", ) self.assertIn( "invalid section", str(ctx.exception), ) @defer.inlineCallbacks def test_create_client_invalid_config(self): with open(os.path.join(self.basedir, 'tahoe.cfg'), 'w') as f: f.write( '[invalid section]\n' 'foo = bar\n' ) with self.assertRaises(UnknownConfigError) as ctx: yield client.create_client(self.basedir) self.assertIn( "invalid section", str(ctx.exception), ) class CreateDefaultConnectionHandlersTests(unittest.TestCase): """ Tests for create_default_connection_handlers(). """ def test_tcp_disabled(self): """ If tcp is set to disabled, no TCP handler is set. """ config = config_from_string("", "", dedent(""" [connections] tcp = disabled """)) default_handlers = create_default_connection_handlers( config, {}, ) self.assertIs(default_handlers["tcp"], None) tahoe_lafs-1.20.0/src/allmydata/test/test_observer.py0000644000000000000000000001021213615410400017571 0ustar00""" Tests for allmydata.util.observer. Ported to Python 3. """ from twisted.trial import unittest from twisted.internet import defer, reactor from allmydata.util import observer def nextTurn(res=None): d = defer.Deferred() reactor.callLater(1, d.callback, res) return d class Observer(unittest.TestCase): def test_oneshot(self): ol = observer.OneShotObserverList() rep = repr(ol) self.failUnlessEqual(rep, "") d1 = ol.when_fired() d2 = ol.when_fired() def _addmore(res): self.failUnlessEqual(res, "result") d3 = ol.when_fired() d3.addCallback(self.failUnlessEqual, "result") return d3 d1.addCallback(_addmore) ol.fire("result") rep = repr(ol) self.failUnlessEqual(rep, " result>") d4 = ol.when_fired() dl = defer.DeferredList([d1,d2,d4]) return dl def test_oneshot_fireagain(self): ol = observer.OneShotObserverList() d = ol.when_fired() def _addmore(res): self.failUnlessEqual(res, "result") ol.fire_if_not_fired("result3") # should be ignored d2 = ol.when_fired() d2.addCallback(self.failUnlessEqual, "result") return d2 d.addCallback(_addmore) ol.fire_if_not_fired("result") ol.fire_if_not_fired("result2") return d def test_lazy_oneshot(self): ol = observer.LazyOneShotObserverList() d1 = ol.when_fired() d2 = ol.when_fired() def _addmore(res): self.failUnlessEqual(res, "result") d3 = ol.when_fired() d3.addCallback(self.failUnlessEqual, "result") return d3 d1.addCallback(_addmore) def _get_result(): return "result" ol.fire(_get_result) d4 = ol.when_fired() dl = defer.DeferredList([d1,d2,d4]) return dl def test_observerlist(self): ol = observer.ObserverList() l1 = [] l2 = [] l3 = [] ol.subscribe(l1.append) ol.notify(1) ol.subscribe(l2.append) ol.notify(2) ol.unsubscribe(l1.append) ol.notify(3) def _check(res): self.failUnlessEqual(l1, [1,2]) self.failUnlessEqual(l2, [2,3]) d = nextTurn() d.addCallback(_check) def _step2(res): def _add(a, b, c=None): l3.append((a,b,c)) ol.unsubscribe(l2.append) ol.subscribe(_add) ol.notify(4, 5, c=6) return nextTurn() def _check2(res): self.failUnlessEqual(l3, [(4,5,6)]) d.addCallback(_step2) d.addCallback(_check2) return d def test_observer_list_reentrant(self): """ ``ObserverList`` is reentrant. """ observed = [] def observer_one(): obs.unsubscribe(observer_one) def observer_two(): observed.append(None) obs = observer.ObserverList() obs.subscribe(observer_one) obs.subscribe(observer_two) obs.notify() self.assertEqual([None], observed) def test_observer_list_observer_errors(self): """ An error in an earlier observer does not prevent notification from being delivered to a later observer. """ observed = [] def observer_one(): raise Exception("Some problem here") def observer_two(): observed.append(None) obs = observer.ObserverList() obs.subscribe(observer_one) obs.subscribe(observer_two) obs.notify() self.assertEqual([None], observed) self.assertEqual(1, len(self.flushLoggedErrors(Exception))) def test_observer_list_propagate_keyboardinterrupt(self): """ ``KeyboardInterrupt`` escapes ``ObserverList.notify``. """ def observer_one(): raise KeyboardInterrupt() obs = observer.ObserverList() obs.subscribe(observer_one) with self.assertRaises(KeyboardInterrupt): obs.notify() tahoe_lafs-1.20.0/src/allmydata/test/test_openmetrics.py0000644000000000000000000002640513615410400020305 0ustar00""" Tests for ``/statistics?t=openmetrics``. Ported to Python 3. """ from prometheus_client.openmetrics import parser from treq.testing import RequestTraversalAgent from twisted.web.http import OK from twisted.web.client import readBody from twisted.web.resource import Resource from testtools.twistedsupport import succeeded from testtools.matchers import ( AfterPreprocessing, Equals, MatchesAll, MatchesStructure, MatchesPredicate, ) from testtools.content import text_content from allmydata.web.status import Statistics from allmydata.test.common import SyncTestCase class FakeStatsProvider(object): """ A stats provider that hands backed a canned collection of performance statistics. """ def get_stats(self): # Parsed into a dict from a running tahoe's /statistics?t=json stats = { "stats": { "storage_server.latencies.get.99_9_percentile": None, "storage_server.latencies.close.10_0_percentile": 0.00021910667419433594, "storage_server.latencies.read.01_0_percentile": 2.8848648071289062e-05, "storage_server.latencies.writev.99_9_percentile": None, "storage_server.latencies.read.99_9_percentile": None, "storage_server.latencies.allocate.99_0_percentile": 0.000988006591796875, "storage_server.latencies.writev.mean": 0.00045332245070571654, "storage_server.latencies.close.99_9_percentile": None, "cpu_monitor.15min_avg": 0.00017592000079223033, "storage_server.disk_free_for_root": 103289454592, "storage_server.latencies.get.99_0_percentile": 0.000347137451171875, "storage_server.latencies.get.mean": 0.00021158285060171353, "storage_server.latencies.read.90_0_percentile": 8.893013000488281e-05, "storage_server.latencies.write.01_0_percentile": 3.600120544433594e-05, "storage_server.latencies.write.99_9_percentile": 0.00017690658569335938, "storage_server.latencies.close.90_0_percentile": 0.00033211708068847656, "storage_server.disk_total": 103497859072, "storage_server.latencies.close.95_0_percentile": 0.0003509521484375, "storage_server.latencies.readv.samplesize": 1000, "storage_server.disk_free_for_nonroot": 103289454592, "storage_server.latencies.close.mean": 0.0002715024480059103, "storage_server.latencies.writev.95_0_percentile": 0.0007410049438476562, "storage_server.latencies.readv.90_0_percentile": 0.0003781318664550781, "storage_server.latencies.readv.99_0_percentile": 0.0004050731658935547, "storage_server.latencies.allocate.mean": 0.0007128627429454784, "storage_server.latencies.close.samplesize": 326, "storage_server.latencies.get.50_0_percentile": 0.0001819133758544922, "storage_server.latencies.write.50_0_percentile": 4.482269287109375e-05, "storage_server.latencies.readv.01_0_percentile": 0.0002970695495605469, "storage_server.latencies.get.10_0_percentile": 0.00015687942504882812, "storage_server.latencies.allocate.90_0_percentile": 0.0008189678192138672, "storage_server.latencies.get.samplesize": 472, "storage_server.total_bucket_count": 393, "storage_server.latencies.read.mean": 5.936201880959903e-05, "storage_server.latencies.allocate.01_0_percentile": 0.0004208087921142578, "storage_server.latencies.allocate.99_9_percentile": None, "storage_server.latencies.readv.mean": 0.00034061360359191893, "storage_server.disk_used": 208404480, "storage_server.latencies.allocate.50_0_percentile": 0.0007410049438476562, "storage_server.latencies.read.99_0_percentile": 0.00011992454528808594, "node.uptime": 3805759.8545179367, "storage_server.latencies.writev.10_0_percentile": 0.00035190582275390625, "storage_server.latencies.writev.90_0_percentile": 0.0006821155548095703, "storage_server.latencies.close.01_0_percentile": 0.00021505355834960938, "storage_server.latencies.close.50_0_percentile": 0.0002579689025878906, "cpu_monitor.1min_avg": 0.0002130000000003444, "storage_server.latencies.writev.50_0_percentile": 0.0004138946533203125, "storage_server.latencies.read.95_0_percentile": 9.107589721679688e-05, "storage_server.latencies.readv.95_0_percentile": 0.0003859996795654297, "storage_server.latencies.write.10_0_percentile": 3.719329833984375e-05, "storage_server.accepting_immutable_shares": 1, "storage_server.latencies.writev.samplesize": 309, "storage_server.latencies.get.95_0_percentile": 0.0003190040588378906, "storage_server.latencies.readv.10_0_percentile": 0.00032210350036621094, "storage_server.latencies.get.90_0_percentile": 0.0002999305725097656, "storage_server.latencies.get.01_0_percentile": 0.0001239776611328125, "cpu_monitor.total": 641.4941180000001, "storage_server.latencies.write.samplesize": 1000, "storage_server.latencies.write.95_0_percentile": 9.489059448242188e-05, "storage_server.latencies.read.50_0_percentile": 6.890296936035156e-05, "storage_server.latencies.writev.01_0_percentile": 0.00033211708068847656, "storage_server.latencies.read.10_0_percentile": 3.0994415283203125e-05, "storage_server.latencies.allocate.10_0_percentile": 0.0004949569702148438, "storage_server.reserved_space": 0, "storage_server.disk_avail": 103289454592, "storage_server.latencies.write.99_0_percentile": 0.00011301040649414062, "storage_server.latencies.write.90_0_percentile": 9.083747863769531e-05, "cpu_monitor.5min_avg": 0.0002370666691157502, "storage_server.latencies.write.mean": 5.8008909225463864e-05, "storage_server.latencies.readv.50_0_percentile": 0.00033020973205566406, "storage_server.latencies.close.99_0_percentile": 0.0004038810729980469, "storage_server.allocated": 0, "storage_server.latencies.writev.99_0_percentile": 0.0007710456848144531, "storage_server.latencies.readv.99_9_percentile": 0.0004780292510986328, "storage_server.latencies.read.samplesize": 170, "storage_server.latencies.allocate.samplesize": 406, "storage_server.latencies.allocate.95_0_percentile": 0.0008411407470703125, }, "counters": { "storage_server.writev": 309, "storage_server.bytes_added": 197836146, "storage_server.close": 326, "storage_server.readv": 14299, "storage_server.allocate": 406, "storage_server.read": 170, "storage_server.write": 3775, "storage_server.get": 472, }, } return stats class HackItResource(Resource, object): """ A bridge between ``RequestTraversalAgent`` and ``MultiFormatResource`` (used by ``Statistics``). ``MultiFormatResource`` expects the request object to have a ``fields`` attribute but Twisted's ``IRequest`` has no such attribute. Create it here. """ def getChildWithDefault(self, path, request): request.fields = None return Resource.getChildWithDefault(self, path, request) class OpenMetrics(SyncTestCase): """ Tests for ``/statistics?t=openmetrics``. """ def test_spec_compliance(self): """ Does our output adhere to the `OpenMetrics ` spec? https://github.com/OpenObservability/OpenMetrics/ https://prometheus.io/docs/instrumenting/exposition_formats/ """ root = HackItResource() root.putChild(b"", Statistics(FakeStatsProvider())) rta = RequestTraversalAgent(root) d = rta.request(b"GET", b"http://localhost/?t=openmetrics") self.assertThat(d, succeeded(matches_stats(self))) def matches_stats(testcase): """ Create a matcher that matches a response that confirms to the OpenMetrics specification. * The ``Content-Type`` is **application/openmetrics-text; version=1.0.0; charset=utf-8**. * The status is **OK**. * The body can be parsed by an OpenMetrics parser. * The metric families in the body are grouped and sorted. * At least one of the expected families appears in the body. :param testtools.TestCase testcase: The case to which to add detail about the matching process. :return: A matcher. """ return MatchesAll( MatchesStructure( code=Equals(OK), # "The content type MUST be..." headers=has_header( "content-type", "application/openmetrics-text; version=1.0.0; charset=utf-8", ), ), AfterPreprocessing( readBodyText, succeeded( MatchesAll( MatchesPredicate(add_detail(testcase, "response body"), "%s dummy"), parses_as_openmetrics(), ) ), ), ) def add_detail(testcase, name): """ Create a matcher that always matches and as a side-effect adds the matched value as detail to the testcase. :param testtools.TestCase testcase: The case to which to add the detail. :return: A matcher. """ def predicate(value): testcase.addDetail(name, text_content(value)) return True return predicate def readBodyText(response): """ Read the response body and decode it using UTF-8. :param twisted.web.iweb.IResponse response: The response from which to read the body. :return: A ``Deferred`` that fires with the ``str`` body. """ d = readBody(response) d.addCallback(lambda body: body.decode("utf-8")) return d def has_header(name, value): """ Create a matcher that matches a response object that includes the given name / value pair. :param str name: The name of the item in the HTTP header to match. :param str value: The value of the item in the HTTP header to match by equality. :return: A matcher. """ return AfterPreprocessing( lambda headers: headers.getRawHeaders(name), Equals([value]), ) def parses_as_openmetrics(): """ Create a matcher that matches a ``str`` string that can be parsed as an OpenMetrics response and includes a certain well-known value expected by the tests. :return: A matcher. """ # The parser throws if it does not like its input. # Wrapped in a list() to drain the generator. return AfterPreprocessing( lambda body: list(parser.text_string_to_metric_families(body)), AfterPreprocessing( lambda families: families[-1].name, Equals("tahoe_stats_storage_server_total_bucket_count"), ), ) tahoe_lafs-1.20.0/src/allmydata/test/test_protocol_switch.py0000644000000000000000000000244013615410400021170 0ustar00""" Unit tests for ``allmydata.protocol_switch``. By its nature, most of the testing needs to be end-to-end; essentially any test that uses real Foolscap (``test_system.py``, integration tests) ensures Foolscap still works. ``test_istorageserver.py`` tests the HTTP support. """ from foolscap.negotiate import Negotiation from .common import TestCase from ..protocol_switch import _PretendToBeNegotiation class UtilityTests(TestCase): """Tests for utilities in the protocol switch code.""" def test_metaclass(self): """ A class that has the ``_PretendToBeNegotiation`` metaclass will support ``isinstance()``'s normal semantics on its own instances, but will also indicate that ``Negotiation`` instances are its instances. """ class Parent(metaclass=_PretendToBeNegotiation): pass class Child(Parent): pass class Other: pass p = Parent() self.assertIsInstance(p, Parent) self.assertIsInstance(Negotiation(), Parent) self.assertNotIsInstance(Other(), Parent) c = Child() self.assertIsInstance(c, Child) self.assertIsInstance(c, Parent) self.assertIsInstance(Negotiation(), Child) self.assertNotIsInstance(Other(), Child) tahoe_lafs-1.20.0/src/allmydata/test/test_repairer.py0000644000000000000000000010712513615410400017565 0ustar00# -*- coding: utf-8 -*- """ Ported to Python 3. """ from allmydata.test import common from allmydata.monitor import Monitor from allmydata import check_results from allmydata.interfaces import NotEnoughSharesError from allmydata.immutable import upload from allmydata.util.consumer import download_to_data from twisted.internet import defer from twisted.trial import unittest import random from allmydata.test.no_network import GridTestMixin # We'll allow you to pass this test even if you trigger eighteen times as # many disk reads and block fetches as would be optimal. READ_LEEWAY = 18 MAX_DELTA_READS = 10 * READ_LEEWAY # N = 10 timeout=240 # François's ARM box timed out after 120 seconds of Verifier.test_corrupt_crypttext_hashtree class RepairTestMixin(object): def _count_reads(self): sum_of_read_counts = 0 for (i, ss, storedir) in self.iterate_servers(): counters = ss.stats_provider.get_stats()['counters'] sum_of_read_counts += counters.get('storage_server.read', 0) return sum_of_read_counts def _count_allocates(self): sum_of_allocate_counts = 0 for (i, ss, storedir) in self.iterate_servers(): counters = ss.stats_provider.get_stats()['counters'] sum_of_allocate_counts += counters.get('storage_server.allocate', 0) return sum_of_allocate_counts def _count_writes(self): sum_of_write_counts = 0 for (i, ss, storedir) in self.iterate_servers(): counters = ss.stats_provider.get_stats()['counters'] sum_of_write_counts += counters.get('storage_server.write', 0) return sum_of_write_counts def _stash_counts(self): self.before_repair_reads = self._count_reads() self.before_repair_allocates = self._count_allocates() self.before_repair_writes = self._count_writes() def _get_delta_counts(self): delta_reads = self._count_reads() - self.before_repair_reads delta_allocates = self._count_allocates() - self.before_repair_allocates delta_writes = self._count_writes() - self.before_repair_writes return (delta_reads, delta_allocates, delta_writes) def failIfBigger(self, x, y): self.failIf(x > y, "%s > %s" % (x, y)) def upload_and_stash(self): c0 = self.g.clients[0] c1 = self.g.clients[1] c0.encoding_params['max_segment_size'] = 12 d = c0.upload(upload.Data(common.TEST_DATA, convergence=b"")) def _stash_uri(ur): self.uri = ur.get_uri() self.c0_filenode = c0.create_node_from_uri(ur.get_uri()) self.c1_filenode = c1.create_node_from_uri(ur.get_uri()) d.addCallback(_stash_uri) return d class Verifier(GridTestMixin, unittest.TestCase, RepairTestMixin): def test_check_without_verify(self): """Check says the file is healthy when none of the shares have been touched. It says that the file is unhealthy when all of them have been removed. It doesn't use any reads. """ self.basedir = "repairer/Verifier/check_without_verify" self.set_up_grid(num_clients=2) d = self.upload_and_stash() d.addCallback(lambda ignored: self._stash_counts()) d.addCallback(lambda ignored: self.c0_filenode.check(Monitor(), verify=False)) def _check(cr): self.failUnless(cr.is_healthy()) delta_reads, delta_allocates, delta_writes = self._get_delta_counts() self.failIfBigger(delta_reads, 0) d.addCallback(_check) def _remove_all(ignored): for sh in self.find_uri_shares(self.uri): self.delete_share(sh) d.addCallback(_remove_all) d.addCallback(lambda ignored: self._stash_counts()) d.addCallback(lambda ignored: self.c0_filenode.check(Monitor(), verify=False)) def _check2(cr): self.failIf(cr.is_healthy()) delta_reads, delta_allocates, delta_writes = self._get_delta_counts() self.failIfBigger(delta_reads, 0) d.addCallback(_check2) return d def _help_test_verify(self, corruptor, judgement, shnum=0, debug=False): self.set_up_grid(num_clients=2) d = self.upload_and_stash() d.addCallback(lambda ignored: self._stash_counts()) d.addCallback(lambda ignored: self.corrupt_shares_numbered(self.uri, [shnum],corruptor,debug=debug)) d.addCallback(lambda ignored: self.c1_filenode.check(Monitor(), verify=True)) def _check(vr): delta_reads, delta_allocates, delta_writes = self._get_delta_counts() self.failIfBigger(delta_reads, MAX_DELTA_READS) try: judgement(vr) except unittest.FailTest as e: # FailTest just uses e.args[0] == str new_arg = str(e.args[0]) + "\nvr.data is: " + str(vr.as_dict()) e.args = (new_arg,) raise d.addCallback(_check) return d def judge_no_problem(self, vr): """ Verify says the file is healthy when none of the shares have been touched in a way that matters. It doesn't use more than seven times as many reads as it needs.""" self.failUnless(vr.is_healthy(), (vr, vr.is_healthy(), vr.as_dict())) self.failUnlessEqual(vr.get_share_counter_good(), 10) self.failUnlessEqual(len(vr.get_sharemap()), 10) self.failUnlessEqual(vr.get_encoding_needed(), 3) self.failUnlessEqual(vr.get_encoding_expected(), 10) self.failUnlessEqual(vr.get_host_counter_good_shares(), 10) self.failUnlessEqual(len(vr.get_servers_responding()), 10) self.failUnlessEqual(len(vr.get_corrupt_shares()), 0) def test_ok_no_corruption(self): self.basedir = "repairer/Verifier/ok_no_corruption" return self._help_test_verify(common._corrupt_nothing, self.judge_no_problem) def test_ok_filedata_size(self): self.basedir = "repairer/Verifier/ok_filedatasize" return self._help_test_verify(common._corrupt_size_of_file_data, self.judge_no_problem) def test_ok_sharedata_size(self): self.basedir = "repairer/Verifier/ok_sharedata_size" return self._help_test_verify(common._corrupt_size_of_sharedata, self.judge_no_problem) def test_ok_segment_size(self): self.basedir = "repairer/Verifier/test_ok_segment_size" return self._help_test_verify(common._corrupt_segment_size, self.judge_no_problem) def judge_visible_corruption(self, vr): """Corruption which is detected by the server means that the server will send you back a Failure in response to get_bucket instead of giving you the share data. Test that verifier handles these answers correctly. It doesn't use more than seven times as many reads as it needs.""" self.failIf(vr.is_healthy(), (vr, vr.is_healthy(), vr.as_dict())) self.failUnlessEqual(vr.get_share_counter_good(), 9) self.failUnlessEqual(len(vr.get_sharemap()), 9) self.failUnlessEqual(vr.get_encoding_needed(), 3) self.failUnlessEqual(vr.get_encoding_expected(), 10) self.failUnlessEqual(vr.get_host_counter_good_shares(), 9) self.failUnlessEqual(len(vr.get_servers_responding()), 9) self.failUnlessEqual(len(vr.get_corrupt_shares()), 0) def test_corrupt_file_verno(self): self.basedir = "repairer/Verifier/corrupt_file_verno" return self._help_test_verify(common._corrupt_file_version_number, self.judge_visible_corruption) def judge_share_version_incompatibility(self, vr): # corruption of the share version (inside the container, the 1/2 # value that determines whether we've got 4-byte offsets or 8-byte # offsets) to something larger than 2 will trigger a # ShareVersionIncompatible exception, which should be counted in # list-incompatible-shares, rather than list-corrupt-shares. self.failIf(vr.is_healthy(), (vr, vr.is_healthy(), vr.as_dict())) self.failUnlessEqual(vr.get_share_counter_good(), 9) self.failUnlessEqual(len(vr.get_sharemap()), 9) self.failUnlessEqual(vr.get_encoding_needed(), 3) self.failUnlessEqual(vr.get_encoding_expected(), 10) self.failUnlessEqual(vr.get_host_counter_good_shares(), 9) self.failUnlessEqual(len(vr.get_servers_responding()), 10) self.failUnlessEqual(len(vr.get_corrupt_shares()), 0) self.failUnlessEqual(len(vr.get_incompatible_shares()), 1) def test_corrupt_share_verno(self): self.basedir = "repairer/Verifier/corrupt_share_verno" return self._help_test_verify(common._corrupt_sharedata_version_number, self.judge_share_version_incompatibility) def judge_invisible_corruption(self, vr): # corruption of fields that the server does not check (which is most # of them), which will be detected by the client as it downloads # those shares. self.failIf(vr.is_healthy(), (vr, vr.is_healthy(), vr.as_dict())) self.failUnlessEqual(vr.get_share_counter_good(), 9) self.failUnlessEqual(vr.get_encoding_needed(), 3) self.failUnlessEqual(vr.get_encoding_expected(), 10) self.failUnlessEqual(vr.get_host_counter_good_shares(), 9) self.failUnlessEqual(len(vr.get_corrupt_shares()), 1) self.failUnlessEqual(len(vr.get_incompatible_shares()), 0) self.failUnlessEqual(len(vr.get_servers_responding()), 10) self.failUnlessEqual(len(vr.get_sharemap()), 9) def test_corrupt_sharedata_offset(self): self.basedir = "repairer/Verifier/corrupt_sharedata_offset" return self._help_test_verify(common._corrupt_offset_of_sharedata, self.judge_invisible_corruption) def test_corrupt_ueb_offset(self): self.basedir = "repairer/Verifier/corrupt_ueb_offset" return self._help_test_verify(common._corrupt_offset_of_uri_extension, self.judge_invisible_corruption) def test_corrupt_ueb_offset_shortread(self): self.basedir = "repairer/Verifier/corrupt_ueb_offset_shortread" return self._help_test_verify(common._corrupt_offset_of_uri_extension_to_force_short_read, self.judge_invisible_corruption) def test_corrupt_sharedata(self): self.basedir = "repairer/Verifier/corrupt_sharedata" return self._help_test_verify(common._corrupt_share_data, self.judge_invisible_corruption) def test_corrupt_sharedata_last_byte(self): self.basedir = "repairer/Verifier/corrupt_sharedata_last_byte" return self._help_test_verify(common._corrupt_share_data_last_byte, self.judge_invisible_corruption) def test_corrupt_ueb_length(self): self.basedir = "repairer/Verifier/corrupt_ueb_length" return self._help_test_verify(common._corrupt_length_of_uri_extension, self.judge_invisible_corruption) def test_corrupt_ueb(self): # Note that in some rare situations this might fail, specifically if # the length of the UEB is corrupted to be a value that is bigger than # the size but less than 2000, it might not get caught... But that's # mostly because in that case it doesn't meaningfully corrupt it. See # _get_uri_extension_the_old_way() in layout.py for where the 2000 # number comes from. self.basedir = "repairer/Verifier/corrupt_ueb" return self._help_test_verify(common._corrupt_uri_extension, self.judge_invisible_corruption) def test_truncate_crypttext_hashtree(self): # change the start of the block hashtree, to truncate the preceding # crypttext hashtree self.basedir = "repairer/Verifier/truncate_crypttext_hashtree" return self._help_test_verify(common._corrupt_offset_of_block_hashes_to_truncate_crypttext_hashes, self.judge_invisible_corruption) def test_corrupt_block_hashtree_offset(self): self.basedir = "repairer/Verifier/corrupt_block_hashtree_offset" return self._help_test_verify(common._corrupt_offset_of_block_hashes, self.judge_invisible_corruption) def test_wrong_share_verno(self): self.basedir = "repairer/Verifier/wrong_share_verno" return self._help_test_verify(common._corrupt_sharedata_version_number_to_plausible_version, self.judge_invisible_corruption) def test_corrupt_share_hashtree_offset(self): self.basedir = "repairer/Verifier/corrupt_share_hashtree_offset" return self._help_test_verify(common._corrupt_offset_of_share_hashes, self.judge_invisible_corruption) def test_corrupt_crypttext_hashtree_offset(self): self.basedir = "repairer/Verifier/corrupt_crypttext_hashtree_offset" return self._help_test_verify(common._corrupt_offset_of_ciphertext_hash_tree, self.judge_invisible_corruption) def test_corrupt_crypttext_hashtree(self): self.basedir = "repairer/Verifier/corrupt_crypttext_hashtree" return self._help_test_verify(common._corrupt_crypttext_hash_tree, self.judge_invisible_corruption) def test_corrupt_crypttext_hashtree_byte_x221(self): self.basedir = "repairer/Verifier/corrupt_crypttext_hashtree_byte_9_bit_7" return self._help_test_verify(common._corrupt_crypttext_hash_tree_byte_x221, self.judge_invisible_corruption, debug=True) def test_corrupt_block_hashtree(self): self.basedir = "repairer/Verifier/corrupt_block_hashtree" return self._help_test_verify(common._corrupt_block_hashes, self.judge_invisible_corruption) def test_corrupt_share_hashtree(self): self.basedir = "repairer/Verifier/corrupt_share_hashtree" return self._help_test_verify(common._corrupt_share_hashes, self.judge_invisible_corruption) # TODO: the Verifier should decode to ciphertext and check it against the # crypttext-hash-tree. Check this by constructing a bogus file, in which # the crypttext-hash-tree is modified after encoding is done, but before # the UEB is finalized. The Verifier should see a valid # crypttext-hash-tree but then the ciphertext should show up as invalid. # Normally this could only be triggered by a bug in FEC decode. def OFF_test_each_byte(self): # this test takes 140s to run on my laptop, and doesn't have any # actual asserts, so it's commented out. It corrupts each byte of the # share in sequence, and checks to see which ones the Verifier # catches and which it misses. Ticket #819 contains details: there # are several portions of the share that are unused, for which # corruption is not supposed to be caught. # # If the test ran quickly, we could use the share size to compute the # offsets of these unused portions and assert that everything outside # of them was detected. We could then replace the rest of # Verifier.test_* (which takes 16s to run on my laptop) with this # one. self.basedir = "repairer/Verifier/each_byte" self.set_up_grid(num_clients=2) d = self.upload_and_stash() def _grab_sh0(res): self.sh0_file = [sharefile for (shnum, serverid, sharefile) in self.find_uri_shares(self.uri) if shnum == 0][0] self.sh0_orig = open(self.sh0_file, "rb").read() d.addCallback(_grab_sh0) def _fix_sh0(res): f = open(self.sh0_file, "wb") f.write(self.sh0_orig) f.close() def _corrupt(ign, which): def _corruptor(s, debug=False): return s[:which] + chr(ord(s[which])^0x01) + s[which+1:] self.corrupt_shares_numbered(self.uri, [0], _corruptor) results = {} def _did_check(vr, i): #print("corrupt %d: healthy=%s" % (i, vr.is_healthy())) results[i] = vr.is_healthy() def _start(ign): d = defer.succeed(None) for i in range(len(self.sh0_orig)): d.addCallback(_corrupt, i) d.addCallback(lambda ign: self.c1_filenode.check(Monitor(), verify=True)) d.addCallback(_did_check, i) d.addCallback(_fix_sh0) return d d.addCallback(_start) def _show_results(ign): f = open("test_each_byte_output", "w") for i in sorted(results.keys()): print("%d: %s" % (i, results[i]), file=f) f.close() print("Please look in _trial_temp/test_each_byte_output for results") d.addCallback(_show_results) return d # We'll allow you to pass this test even if you trigger thirty-five times as # many block sends and disk writes as would be optimal. WRITE_LEEWAY = 35 # Optimally, you could repair one of these (small) files in a single write. DELTA_WRITES_PER_SHARE = 1 * WRITE_LEEWAY class Repairer(GridTestMixin, unittest.TestCase, RepairTestMixin, common.ShouldFailMixin): def test_harness(self): # This test is actually to make sure our test harness works, rather # than testing anything about Tahoe code itself. self.basedir = "repairer/Repairer/test_code" self.set_up_grid(num_clients=2) d = self.upload_and_stash() d.addCallback(lambda ignored: self.find_uri_shares(self.uri)) def _stash_shares(oldshares): self.oldshares = oldshares d.addCallback(_stash_shares) d.addCallback(lambda ignored: self.find_uri_shares(self.uri)) def _compare(newshares): self.failUnlessEqual(newshares, self.oldshares) d.addCallback(_compare) def _delete_8(ignored): shnum = self.oldshares[0][0] self.delete_shares_numbered(self.uri, [shnum]) for sh in self.oldshares[1:8]: self.delete_share(sh) d.addCallback(_delete_8) d.addCallback(lambda ignored: self.find_uri_shares(self.uri)) d.addCallback(lambda shares: self.failUnlessEqual(len(shares), 2)) d.addCallback(lambda ignored: self.shouldFail(NotEnoughSharesError, "then_download", None, download_to_data, self.c1_filenode)) d.addCallback(lambda ignored: self.shouldFail(NotEnoughSharesError, "then_repair", None, self.c1_filenode.check_and_repair, Monitor(), verify=False)) # test share corruption def _test_corrupt(ignored): olddata = {} shares = self.find_uri_shares(self.uri) for (shnum, serverid, sharefile) in shares: olddata[ (shnum, serverid) ] = open(sharefile, "rb").read() for sh in shares: self.corrupt_share(sh, common._corrupt_uri_extension) for (shnum, serverid, sharefile) in shares: newdata = open(sharefile, "rb").read() self.failIfEqual(olddata[ (shnum, serverid) ], newdata) d.addCallback(_test_corrupt) def _remove_all(ignored): for sh in self.find_uri_shares(self.uri): self.delete_share(sh) d.addCallback(_remove_all) d.addCallback(lambda ignored: self.find_uri_shares(self.uri)) d.addCallback(lambda shares: self.failUnlessEqual(shares, [])) return d def test_repair_from_deletion_of_1(self): """ Repair replaces a share that got deleted. """ self.basedir = "repairer/Repairer/repair_from_deletion_of_1" self.set_up_grid(num_clients=2) d = self.upload_and_stash() d.addCallback(lambda ignored: self.delete_shares_numbered(self.uri, [2])) d.addCallback(lambda ignored: self._stash_counts()) d.addCallback(lambda ignored: self.c0_filenode.check_and_repair(Monitor(), verify=False)) def _check_results(crr): self.failUnlessIsInstance(crr, check_results.CheckAndRepairResults) pre = crr.get_pre_repair_results() self.failUnlessIsInstance(pre, check_results.CheckResults) post = crr.get_post_repair_results() self.failUnlessIsInstance(post, check_results.CheckResults) delta_reads, delta_allocates, delta_writes = self._get_delta_counts() self.failIfBigger(delta_reads, MAX_DELTA_READS) self.failIfBigger(delta_allocates, DELTA_WRITES_PER_SHARE) self.failIf(pre.is_healthy()) self.failUnless(post.is_healthy()) # Now we inspect the filesystem to make sure that it has 10 # shares. shares = self.find_uri_shares(self.uri) self.failIf(len(shares) < 10) d.addCallback(_check_results) d.addCallback(lambda ignored: self.c0_filenode.check(Monitor(), verify=True)) d.addCallback(lambda vr: self.failUnless(vr.is_healthy())) # Now we delete seven of the other shares, then try to download the # file and assert that it succeeds at downloading and has the right # contents. This can't work unless it has already repaired the # previously-deleted share #2. d.addCallback(lambda ignored: self.delete_shares_numbered(self.uri, list(range(3, 10+1)))) d.addCallback(lambda ignored: download_to_data(self.c1_filenode)) d.addCallback(lambda newdata: self.failUnlessEqual(newdata, common.TEST_DATA)) return d def test_repair_from_deletion_of_7(self): """ Repair replaces seven shares that got deleted. """ self.basedir = "repairer/Repairer/repair_from_deletion_of_7" self.set_up_grid(num_clients=2) d = self.upload_and_stash() d.addCallback(lambda ignored: self.delete_shares_numbered(self.uri, list(range(7)))) d.addCallback(lambda ignored: self._stash_counts()) d.addCallback(lambda ignored: self.c0_filenode.check_and_repair(Monitor(), verify=False)) def _check_results(crr): self.failUnlessIsInstance(crr, check_results.CheckAndRepairResults) pre = crr.get_pre_repair_results() self.failUnlessIsInstance(pre, check_results.CheckResults) post = crr.get_post_repair_results() self.failUnlessIsInstance(post, check_results.CheckResults) delta_reads, delta_allocates, delta_writes = self._get_delta_counts() self.failIfBigger(delta_reads, MAX_DELTA_READS) self.failIfBigger(delta_allocates, (DELTA_WRITES_PER_SHARE * 7)) self.failIf(pre.is_healthy()) self.failUnless(post.is_healthy(), post.as_dict()) # Make sure we really have 10 shares. shares = self.find_uri_shares(self.uri) self.failIf(len(shares) < 10) d.addCallback(_check_results) d.addCallback(lambda ignored: self.c0_filenode.check(Monitor(), verify=True)) d.addCallback(lambda vr: self.failUnless(vr.is_healthy())) # Now we delete seven of the other shares, then try to download the # file and assert that it succeeds at downloading and has the right # contents. This can't work unless it has already repaired the # previously-deleted share #2. d.addCallback(lambda ignored: self.delete_shares_numbered(self.uri, list(range(3, 10+1)))) d.addCallback(lambda ignored: download_to_data(self.c1_filenode)) d.addCallback(lambda newdata: self.failUnlessEqual(newdata, common.TEST_DATA)) return d def test_repairer_servers_of_happiness(self): # The repairer is supposed to generate and place as many of the # missing shares as possible without caring about how they are # distributed. self.basedir = "repairer/Repairer/repairer_servers_of_happiness" self.set_up_grid(num_clients=2, num_servers=10) d = self.upload_and_stash() # Now delete some servers. We want to leave 3 servers, which # will allow us to restore the file to a healthy state without # distributing the shares widely enough to satisfy the default # happiness setting. def _delete_some_servers(ignored): for i in range(7): self.g.remove_server(self.g.servers_by_number[i].my_nodeid) assert len(self.g.servers_by_number) == 3 d.addCallback(_delete_some_servers) # Now try to repair the file. d.addCallback(lambda ignored: self.c0_filenode.check_and_repair(Monitor(), verify=False)) def _check_results(crr): self.failUnlessIsInstance(crr, check_results.CheckAndRepairResults) pre = crr.get_pre_repair_results() post = crr.get_post_repair_results() for p in (pre, post): self.failUnlessIsInstance(p, check_results.CheckResults) self.failIf(pre.is_healthy()) self.failUnless(post.is_healthy()) d.addCallback(_check_results) return d # why is test_repair_from_corruption_of_1 disabled? Read on: # # As recently documented in NEWS.rst for the 1.3.0 release, the current # immutable repairer suffers from several limitations: # # * minimalistic verifier: it's just download without decryption, so we # don't look for corruption in N-k shares, and for many fields (those # which are the same in all shares) we only look for corruption in a # single share # # * some kinds of corruption cause download to fail (when it ought to # just switch to a different share), so repair will fail on these too # # * RIStorageServer doesn't offer a way to delete old corrupt immutable # shares (the authority model is not at all clear), so the best the # repairer can do is to put replacement shares on new servers, # unfortunately leaving the corrupt shares in place # # This test is pretty strenuous: it asserts that the repairer does the # ideal thing in 8 distinct situations, with randomized corruption in # each. Because of the aforementioned limitations, it is highly unlikely # to pass any of these. We're also concerned that the download-fails case # can provoke a lost-progress bug (one was fixed, but there might be more # lurking), which will cause the test to fail despite a ".todo" marker, # and will probably cause subsequent unrelated tests to fail too (due to # "unclean reactor" problems). # # In addition, I (warner) have recently refactored the rest of this class # to use the much-faster no_network.GridTestMixin, so this tests needs to # be updated before it will be able to run again. # # So we're turning this test off until we've done one or more of the # following: # * remove some of these limitations # * break the test up into smaller, more functionally-oriented pieces # * simplify the repairer enough to let us be confident that it is free # of lost-progress bugs def OFF_test_repair_from_corruption_of_1(self): d = defer.succeed(None) d.addCallback(self.find_all_shares) stash = [None] def _stash_it(res): stash[0] = res return res d.addCallback(_stash_it) def _put_it_all_back(ignored): self.replace_shares(stash[0], storage_index=self.uri.get_storage_index()) return ignored def _repair_from_corruption(shnum, corruptor_func): before_repair_reads = self._count_reads() before_repair_allocates = self._count_writes() d2 = self.filenode.check_and_repair(Monitor(), verify=True) def _after_repair(checkandrepairresults): prerepairres = checkandrepairresults.get_pre_repair_results() postrepairres = checkandrepairresults.get_post_repair_results() after_repair_reads = self._count_reads() after_repair_allocates = self._count_writes() # The "* 2" in reads is because you might read a whole share # before figuring out that it is corrupted. It might be # possible to make this delta reads number a little tighter. self.failIf(after_repair_reads - before_repair_reads > (MAX_DELTA_READS * 2), (after_repair_reads, before_repair_reads)) # The "* 2" in writes is because each server has two shares, # and it is reasonable for repairer to conclude that there # are two shares that it should upload, if the server fails # to serve the first share. self.failIf(after_repair_allocates - before_repair_allocates > (DELTA_WRITES_PER_SHARE * 2), (after_repair_allocates, before_repair_allocates)) self.failIf(prerepairres.is_healthy(), (prerepairres.data, corruptor_func)) self.failUnless(postrepairres.is_healthy(), (postrepairres.data, corruptor_func)) # Now we inspect the filesystem to make sure that it has 10 # shares. shares = self.find_all_shares() self.failIf(len(shares) < 10) # Now we assert that the verifier reports the file as healthy. d3 = self.filenode.check(Monitor(), verify=True) def _after_verify(verifyresults): self.failUnless(verifyresults.is_healthy()) d3.addCallback(_after_verify) # Now we delete seven of the other shares, then try to # download the file and assert that it succeeds at # downloading and has the right contents. This can't work # unless it has already repaired the previously-corrupted share. def _then_delete_7_and_try_a_download(unused=None): shnums = list(range(10)) shnums.remove(shnum) random.shuffle(shnums) for sharenum in shnums[:7]: self._delete_a_share(sharenum=sharenum) return self._download_and_check_plaintext() d3.addCallback(_then_delete_7_and_try_a_download) return d3 d2.addCallback(_after_repair) return d2 for corruptor_func in ( common._corrupt_file_version_number, common._corrupt_sharedata_version_number, common._corrupt_offset_of_sharedata, common._corrupt_offset_of_uri_extension, common._corrupt_offset_of_uri_extension_to_force_short_read, common._corrupt_share_data, common._corrupt_length_of_uri_extension, common._corrupt_uri_extension, ): # Now we corrupt a share... d.addCallback(self._corrupt_a_random_share, corruptor_func) # And repair... d.addCallback(_repair_from_corruption, corruptor_func) return d #test_repair_from_corruption_of_1.todo = "Repairer doesn't properly replace corrupted shares yet." def test_tiny_reads(self): # ticket #1223 points out three problems: # repairer reads beyond end of input file # new-downloader does not tolerate overreads # uploader does lots of tiny reads, inefficient self.basedir = "repairer/Repairer/test_tiny_reads" self.set_up_grid() c0 = self.g.clients[0] DATA = b"a"*135 c0.encoding_params['k'] = 22 c0.encoding_params['n'] = 66 d = c0.upload(upload.Data(DATA, convergence=b"")) def _then(ur): self.uri = ur.get_uri() self.delete_shares_numbered(self.uri, [0]) self.c0_filenode = c0.create_node_from_uri(ur.get_uri()) self._stash_counts() return self.c0_filenode.check_and_repair(Monitor()) d.addCallback(_then) def _check(ign): (r,a,w) = self._get_delta_counts() # when the uploader (driven by the repairer) does full-segment # reads, this makes 44 server read calls (2*k). Before, when it # was doing input_chunk_size reads (7 bytes), it was doing over # 400. self.failIf(r > 100, "too many reads: %d>100" % r) d.addCallback(_check) return d def test_servers_responding(self): self.basedir = "repairer/Repairer/servers_responding" self.set_up_grid(num_clients=2) d = self.upload_and_stash() # now cause one of the servers to not respond during the pre-repair # filecheck, but then *do* respond to the post-repair filecheck def _then(ign): ss = self.g.servers_by_number[0] # we want to delete the share corresponding to the server # we're making not-respond share = next(ss.get_shares(self.c0_filenode.get_storage_index()))[0] self.delete_shares_numbered(self.uri, [share]) return self.c0_filenode.check_and_repair(Monitor()) d.addCallback(_then) def _check(rr): # this exercises a bug in which the servers-responding list did # not include servers that responded to the Repair, but which did # not respond to the pre-repair filecheck prr = rr.get_post_repair_results() expected = set(self.g.get_all_serverids()) responding_set = frozenset([s.get_serverid() for s in prr.get_servers_responding()]) self.failIf(expected - responding_set, expected - responding_set) self.failIf(responding_set - expected, responding_set - expected) self.failUnlessEqual(expected, set([s.get_serverid() for s in prr.get_servers_responding()])) d.addCallback(_check) return d # XXX extend these tests to show that the checker detects which specific # share on which specific server is broken -- this is necessary so that the # checker results can be passed to the repairer and the repairer can go ahead # and upload fixes without first doing what is effectively a check (/verify) # run # XXX extend these tests to show bad behavior of various kinds from servers: # raising exception from each remove_foo() method, for example # XXX test disconnect DeadReferenceError from get_buckets and get_block_whatsit # XXX test corruption that truncates other hash trees than just the crypttext # hash tree # XXX test the notify-someone-about-corruption feature (also implement that # feature) # XXX test whether repairer (downloader) correctly downloads a file even if # to do so it has to acquire shares from a server that has already tried to # serve it a corrupted share. (I don't think the current downloader would # pass this test, depending on the kind of corruption.) tahoe_lafs-1.20.0/src/allmydata/test/test_runner.py0000644000000000000000000005532013615410400017264 0ustar00""" Ported to Python 3 """ import os.path, re, sys from os import linesep import locale from testtools.matchers import ( MatchesListwise, MatchesAny, Contains, Equals, Always, ) from testtools.twistedsupport import ( succeeded, ) from eliot import ( log_call, ) from twisted.trial import unittest from twisted.internet import reactor from twisted.python import usage from twisted.python.runtime import platform from twisted.internet.defer import ( inlineCallbacks, DeferredList, ) from twisted.internet.testing import ( MemoryReactorClock, ) from twisted.python.filepath import FilePath from allmydata.util import fileutil, pollmixin from allmydata.util.encodingutil import unicode_to_argv from allmydata.util.pid import ( check_pid_process, _pidfile_to_lockpath, ProcessInTheWay, ) from allmydata.test import common_util import allmydata from allmydata.scripts.tahoe_run import ( on_stdin_close, ) from .common import ( PIPE, Popen, ) from .common_util import ( parse_cli, run_cli, run_cli_unicode, ) from .cli_node_api import ( CLINodeAPI, Expect, on_stdout, on_stdout_and_stderr, ) from ..util.eliotutil import ( inline_callbacks, ) from .common import ( SyncTestCase, ) def get_root_from_file(src): srcdir = os.path.dirname(os.path.dirname(os.path.normcase(os.path.realpath(src)))) root = os.path.dirname(srcdir) if os.path.basename(srcdir) == 'site-packages': if re.search(r'python.+\..+', os.path.basename(root)): root = os.path.dirname(root) root = os.path.dirname(root) elif os.path.basename(root) == 'src': root = os.path.dirname(root) return root srcfile = allmydata.__file__ rootdir = get_root_from_file(srcfile) class ParseOrExitTests(SyncTestCase): """ Tests for ``parse_or_exit``. """ def test_nonascii_error_content(self): """ ``parse_or_exit`` can report errors that include non-ascii content. """ tricky = u"\u00F6" self.assertThat( run_cli_unicode(tricky, [], encoding="utf-8"), succeeded( MatchesListwise([ # returncode Equals(1), # stdout MatchesAny( # Python 2 Contains(u"Unknown command: \\xf6"), # Python 3 Contains(u"Unknown command: \xf6"), ), # stderr, Always() ]), ), ) @log_call(action_type="run-bin-tahoe") def run_bintahoe(extra_argv, python_options=None): """ Run the main Tahoe entrypoint in a child process with the given additional arguments. :param [unicode] extra_argv: More arguments for the child process argv. :return: A three-tuple of stdout (unicode), stderr (unicode), and the child process "returncode" (int). """ argv = [sys.executable] if python_options is not None: argv.extend(python_options) argv.extend([u"-b", u"-m", u"allmydata.scripts.runner"]) argv.extend(extra_argv) argv = list(unicode_to_argv(arg) for arg in argv) p = Popen(argv, stdout=PIPE, stderr=PIPE) encoding = locale.getpreferredencoding(False) out = p.stdout.read().decode(encoding) err = p.stderr.read().decode(encoding) returncode = p.wait() return (out, err, returncode) class BinTahoe(common_util.SignalMixin, unittest.TestCase): def test_unicode_arguments_and_output(self): """ The runner script receives unmangled non-ASCII values in argv. """ tricky = u"\u00F6" out, err, returncode = run_bintahoe([tricky]) expected = u"Unknown command: \xf6" self.assertEqual(returncode, 1) self.assertIn( expected, out, "expected {!r} not found in {!r}\nstderr: {!r}".format(expected, out, err), ) def test_with_python_options(self): """ Additional options for the Python interpreter don't prevent the runner script from receiving the arguments meant for it. """ # This seems like a redundant test for someone else's functionality # but on Windows we parse the whole command line string ourselves so # we have to have our own implementation of skipping these options. # -B is a harmless option that prevents writing bytecode so we can add it # without impacting other behavior noticably. out, err, returncode = run_bintahoe([u"--version"], python_options=[u"-B"]) self.assertEqual(returncode, 0, f"Out:\n{out}\nErr:\n{err}") self.assertTrue(out.startswith(allmydata.__appname__ + '/')) def test_help_eliot_destinations(self): out, err, returncode = run_bintahoe([u"--help-eliot-destinations"]) self.assertIn(u"\tfile:", out) self.assertEqual(returncode, 0) def test_eliot_destination(self): out, err, returncode = run_bintahoe([ # Proves little but maybe more than nothing. u"--eliot-destination=file:-", # Throw in *some* command or the process exits with error, making # it difficult for us to see if the previous arg was accepted or # not. u"--help", ]) self.assertEqual(returncode, 0) def test_unknown_eliot_destination(self): out, err, returncode = run_bintahoe([ u"--eliot-destination=invalid:more", ]) self.assertEqual(1, returncode) self.assertIn(u"Unknown destination description", out) self.assertIn(u"invalid:more", out) def test_malformed_eliot_destination(self): out, err, returncode = run_bintahoe([ u"--eliot-destination=invalid", ]) self.assertEqual(1, returncode) self.assertIn(u"must be formatted like", out) def test_escape_in_eliot_destination(self): out, err, returncode = run_bintahoe([ u"--eliot-destination=file:@foo", ]) self.assertEqual(1, returncode) self.assertIn(u"Unsupported escape character", out) class CreateNode(unittest.TestCase): # exercise "tahoe create-node" and "tahoe create-introducer" by calling # the corresponding code as a subroutine. def workdir(self, name): basedir = os.path.join("test_runner", "CreateNode", name) fileutil.make_dirs(basedir) return basedir @inlineCallbacks def do_create(self, kind, *args): basedir = self.workdir("test_" + kind) command = "create-" + kind is_client = kind in ("node", "client") tac = is_client and "tahoe-client.tac" or ("tahoe-" + kind + ".tac") n1 = os.path.join(basedir, command + "-n1") argv = ["--quiet", command, "--basedir", n1] + list(args) rc, out, err = yield run_cli(*map(unicode_to_argv, argv)) self.failUnlessEqual(err, "") self.failUnlessEqual(out, "") self.failUnlessEqual(rc, 0) self.failUnless(os.path.exists(n1)) self.failUnless(os.path.exists(os.path.join(n1, tac))) if is_client: # tahoe.cfg should exist, and should have storage enabled for # 'create-node', and disabled for 'create-client'. tahoe_cfg = os.path.join(n1, "tahoe.cfg") self.failUnless(os.path.exists(tahoe_cfg)) content = fileutil.read(tahoe_cfg).decode('utf-8').replace('\r\n', '\n') if kind == "client": self.failUnless(re.search(r"\n\[storage\]\n#.*\nenabled = false\n", content), content) else: self.failUnless(re.search(r"\n\[storage\]\n#.*\nenabled = true\n", content), content) self.failUnless("\nreserved_space = 1G\n" in content) # creating the node a second time should be rejected rc, out, err = yield run_cli(*map(unicode_to_argv, argv)) self.failIfEqual(rc, 0, str((out, err, rc))) self.failUnlessEqual(out, "") self.failUnless("is not empty." in err) # Fail if there is a non-empty line that doesn't end with a # punctuation mark. for line in err.splitlines(): self.failIf(re.search("[\S][^\.!?]$", line), (line,)) # test that the non --basedir form works too n2 = os.path.join(basedir, command + "-n2") argv = ["--quiet", command] + list(args) + [n2] rc, out, err = yield run_cli(*map(unicode_to_argv, argv)) self.failUnlessEqual(err, "") self.failUnlessEqual(out, "") self.failUnlessEqual(rc, 0) self.failUnless(os.path.exists(n2)) self.failUnless(os.path.exists(os.path.join(n2, tac))) # test the --node-directory form n3 = os.path.join(basedir, command + "-n3") argv = ["--quiet", "--node-directory", n3, command] + list(args) rc, out, err = yield run_cli(*map(unicode_to_argv, argv)) self.failUnlessEqual(err, "") self.failUnlessEqual(out, "") self.failUnlessEqual(rc, 0) self.failUnless(os.path.exists(n3)) self.failUnless(os.path.exists(os.path.join(n3, tac))) if kind in ("client", "node", "introducer"): # test that the output (without --quiet) includes the base directory n4 = os.path.join(basedir, command + "-n4") argv = [command] + list(args) + [n4] rc, out, err = yield run_cli(*map(unicode_to_argv, argv)) self.failUnlessEqual(err, "") self.failUnlessIn(" created in ", out) self.failUnlessIn(n4, out) self.failIfIn("\\\\?\\", out) self.failUnlessEqual(rc, 0) self.failUnless(os.path.exists(n4)) self.failUnless(os.path.exists(os.path.join(n4, tac))) # make sure it rejects too many arguments self.failUnlessRaises(usage.UsageError, parse_cli, command, "basedir", "extraarg") # when creating a non-client, there is no default for the basedir if not is_client: argv = [command] self.failUnlessRaises(usage.UsageError, parse_cli, command) def test_node(self): self.do_create("node", "--hostname=127.0.0.1") def test_client(self): # create-client should behave like create-node --no-storage. self.do_create("client") def test_introducer(self): self.do_create("introducer", "--hostname=127.0.0.1") def test_subcommands(self): # no arguments should trigger a command listing, via UsageError self.failUnlessRaises(usage.UsageError, parse_cli, ) class RunNode(common_util.SignalMixin, unittest.TestCase, pollmixin.PollMixin): """ exercise "tahoe run" for both introducer and client node, by spawning "tahoe run" as a subprocess. This doesn't get us line-level coverage, but it does a better job of confirming that the user can actually run "./bin/tahoe run" and expect it to work. This verifies that bin/tahoe sets up PYTHONPATH and the like correctly. """ def workdir(self, name): basedir = os.path.join("test_runner", "RunNode", name) fileutil.make_dirs(basedir) return basedir @inline_callbacks def test_introducer(self): """ The introducer furl is stable across restarts. """ basedir = self.workdir("test_introducer") c1 = os.path.join(basedir, u"c1") tahoe = CLINodeAPI(reactor, FilePath(c1)) self.addCleanup(tahoe.stop_and_wait) out, err, returncode = run_bintahoe([ u"--quiet", u"create-introducer", u"--basedir", c1, u"--hostname", u"127.0.0.1", ]) self.assertEqual( returncode, 0, "stdout: {!r}\n" "stderr: {!r}\n", ) # This makes sure that node.url is written, which allows us to # detect when the introducer restarts in _node_has_restarted below. config = fileutil.read(tahoe.config_file.path).decode('utf-8') self.assertIn('{}web.port = {}'.format(linesep, linesep), config) fileutil.write( tahoe.config_file.path, config.replace( '{}web.port = {}'.format(linesep, linesep), '{}web.port = 0{}'.format(linesep, linesep), ) ) p = Expect() tahoe.run(on_stdout(p)) yield p.expect(b"introducer running") tahoe.active() yield self.poll(tahoe.introducer_furl_file.exists) # read the introducer.furl file so we can check that the contents # don't change on restart furl = fileutil.read(tahoe.introducer_furl_file.path) tahoe.active() self.assertTrue(tahoe.twistd_pid_file.exists()) self.assertTrue(tahoe.node_url_file.exists()) # rm this so we can detect when the second incarnation is ready tahoe.node_url_file.remove() yield tahoe.stop_and_wait() p = Expect() tahoe.run(on_stdout(p)) yield p.expect(b"introducer running") # Again, the second incarnation of the node might not be ready yet, so # poll until it is. This time introducer_furl_file already exists, so # we check for the existence of node_url_file instead. yield self.poll(tahoe.node_url_file.exists) # The point of this test! After starting the second time the # introducer furl file must exist and contain the same contents as it # did before. self.assertTrue(tahoe.introducer_furl_file.exists()) self.assertEqual(furl, fileutil.read(tahoe.introducer_furl_file.path)) @inline_callbacks def test_client(self): """ Test too many things. 0) Verify that "tahoe create-node" takes a --webport option and writes the value to the configuration file. 1) Verify that "tahoe run" writes a pid file and a node url file (on POSIX). 2) Verify that the storage furl file has a stable value across a "tahoe run" / stop / "tahoe run" sequence. 3) Verify that the pid file is removed after SIGTERM (on POSIX). """ basedir = self.workdir("test_client") c1 = os.path.join(basedir, u"c1") tahoe = CLINodeAPI(reactor, FilePath(c1)) # Set this up right now so we don't forget later. self.addCleanup(tahoe.cleanup) out, err, returncode = run_bintahoe([ u"--quiet", u"create-node", u"--basedir", c1, u"--webport", u"0", u"--hostname", u"localhost", ]) self.failUnlessEqual(returncode, 0) # Check that the --webport option worked. config = fileutil.read(tahoe.config_file.path).decode('utf-8') self.assertIn( '{}web.port = 0{}'.format(linesep, linesep), config, ) # After this it's safe to start the node tahoe.active() p = Expect() # This will run until we stop it. tahoe.run(on_stdout(p)) # Wait for startup to have proceeded to a reasonable point. yield p.expect(b"client running") tahoe.active() # read the storage.furl file so we can check that its contents don't # change on restart storage_furl = fileutil.read(tahoe.storage_furl_file.path) self.assertTrue(tahoe.twistd_pid_file.exists()) # rm this so we can detect when the second incarnation is ready tahoe.node_url_file.remove() yield tahoe.stop_and_wait() p = Expect() # We don't have to add another cleanup for this one, the one from # above is still registered. tahoe.run(on_stdout(p)) yield p.expect(b"client running") tahoe.active() self.assertEqual( storage_furl, fileutil.read(tahoe.storage_furl_file.path), ) self.assertTrue( tahoe.twistd_pid_file.exists(), "PID file ({}) didn't exist when we expected it to. " "These exist: {}".format( tahoe.twistd_pid_file, tahoe.twistd_pid_file.parent().listdir(), ), ) yield tahoe.stop_and_wait() # twistd.pid should be gone by now -- except on Windows, where # killing a subprocess immediately exits with no chance for # any shutdown code (that is, no Twisted shutdown hooks can # run). if not platform.isWindows(): self.assertFalse(tahoe.twistd_pid_file.exists()) def _remove(self, res, file): fileutil.remove(file) return res def test_run_bad_directory(self): """ If ``tahoe run`` is pointed at a non-node directory, it reports an error and exits. """ return self._bad_directory_test( u"test_run_bad_directory", "tahoe run", lambda tahoe, p: tahoe.run(p), "is not a recognizable node directory", ) def test_run_bogus_directory(self): """ If ``tahoe run`` is pointed at a non-directory, it reports an error and exits. """ return self._bad_directory_test( u"test_run_bogus_directory", "tahoe run", lambda tahoe, p: CLINodeAPI( tahoe.reactor, tahoe.basedir.sibling(u"bogus"), ).run(p), "does not look like a directory at all" ) @inline_callbacks def _bad_directory_test(self, workdir, description, operation, expected_message): """ Verify that a certain ``tahoe`` CLI operation produces a certain expected message and then exits. :param unicode workdir: A distinct path name for this test to operate on. :param unicode description: A description of the operation being performed. :param operation: A two-argument callable implementing the operation. The first argument is a ``CLINodeAPI`` instance to use to perform the operation. The second argument is an ``IProcessProtocol`` to which the operations output must be delivered. :param unicode expected_message: Some text that is expected in the stdout or stderr of the operation in the successful case. :return: A ``Deferred`` that fires when the assertions have been made. """ basedir = self.workdir(workdir) fileutil.make_dirs(basedir) tahoe = CLINodeAPI(reactor, FilePath(basedir)) # If tahoe ends up thinking it should keep running, make sure it stops # promptly when the test is done. self.addCleanup(tahoe.cleanup) p = Expect() operation(tahoe, on_stdout_and_stderr(p)) client_running = p.expect(b"client running") result, index = yield DeferredList([ p.expect(expected_message.encode('utf-8')), client_running, ], fireOnOneCallback=True, consumeErrors=True, ) self.assertEqual( index, 0, "Expected error message from '{}', got something else: {}".format( description, str(p.get_buffered_output(), "utf-8"), ), ) # It should not be running (but windows shutdown can't run # code so the PID file still exists there). if not platform.isWindows(): self.assertFalse(tahoe.twistd_pid_file.exists()) # Wait for the operation to *complete*. If we got this far it's # because we got the expected message so we can expect the "tahoe ..." # child process to exit very soon. This other Deferred will fail when # it eventually does but DeferredList above will consume the error. # What's left is a perfect indicator that the process has exited and # we won't get blamed for leaving the reactor dirty. yield client_running def _simulate_windows_stdin_close(stdio): """ on Unix we can just close all the readers, correctly "simulating" a stdin close .. of course, Windows has to be difficult """ stdio.writeConnectionLost() stdio.readConnectionLost() class OnStdinCloseTests(SyncTestCase): """ Tests for on_stdin_close """ def test_close_called(self): """ our on-close method is called when stdin closes """ reactor = MemoryReactorClock() called = [] def onclose(): called.append(True) transport = on_stdin_close(reactor, onclose) self.assertEqual(called, []) if platform.isWindows(): _simulate_windows_stdin_close(transport) else: for reader in reactor.getReaders(): reader.loseConnection() reactor.advance(1) # ProcessReader does a callLater(0, ..) self.assertEqual(called, [True]) def test_exception_ignored(self): """ An exception from our on-close function is discarded. """ reactor = MemoryReactorClock() called = [] def onclose(): called.append(True) raise RuntimeError("unexpected error") transport = on_stdin_close(reactor, onclose) self.assertEqual(called, []) if platform.isWindows(): _simulate_windows_stdin_close(transport) else: for reader in reactor.getReaders(): reader.loseConnection() reactor.advance(1) # ProcessReader does a callLater(0, ..) self.assertEqual(called, [True]) class PidFileLocking(SyncTestCase): """ Direct tests for allmydata.util.pid functions """ def test_locking(self): """ Fail to create a pidfile if another process has the lock already. """ # this can't just be "our" process because the locking library # allows the same process to acquire a lock multiple times. pidfile = FilePath(self.mktemp()) lockfile = _pidfile_to_lockpath(pidfile) with open("other_lock.py", "w") as f: f.write( "\n".join([ "import filelock, time, sys", "with filelock.FileLock(sys.argv[1], timeout=1):", " sys.stdout.write('.\\n')", " sys.stdout.flush()", " time.sleep(10)", ]) ) proc = Popen( [sys.executable, "other_lock.py", lockfile.path], stdout=PIPE, stderr=PIPE, ) # make sure our subprocess has had time to acquire the lock # for sure (from the "." it prints) proc.stdout.read(2) # acquiring the same lock should fail; it is locked by the subprocess with self.assertRaises(ProcessInTheWay): check_pid_process(pidfile) proc.terminate() tahoe_lafs-1.20.0/src/allmydata/test/test_sftp.py0000644000000000000000000023574513615410400016742 0ustar00""" Ported to Python 3. """ import re, struct, traceback, time, calendar from stat import S_IFREG, S_IFDIR from twisted.trial import unittest from twisted.internet import defer, reactor from twisted.python.failure import Failure from twisted.internet.error import ProcessDone, ProcessTerminated from allmydata.util import deferredutil try: from twisted.conch import interfaces as conch_interfaces from twisted.conch.ssh import filetransfer as sftp from allmydata.frontends import sftpd except ImportError as e: conch_interfaces = sftp = sftpd = None # type: ignore conch_unavailable_reason = e else: conch_unavailable_reason = None # type: ignore from allmydata.interfaces import IDirectoryNode, ExistingChildError, NoSuchChildError from allmydata.mutable.common import NotWriteableError from allmydata.util.consumer import download_to_data from allmydata.immutable import upload from allmydata.mutable import publish from allmydata.test.no_network import GridTestMixin from allmydata.test.common import ShouldFailMixin from allmydata.test.common_util import ReallyEqualMixin class Handler(GridTestMixin, ShouldFailMixin, ReallyEqualMixin, unittest.TestCase): """This is a no-network unit test of the SFTPUserHandler and the abstractions it uses.""" if conch_unavailable_reason: skip = "SFTP support requires Twisted Conch which is not available: {}".format( conch_unavailable_reason, ) def shouldFailWithSFTPError(self, expected_code, which, callable, *args, **kwargs): assert isinstance(expected_code, int), repr(expected_code) assert isinstance(which, str), repr(which) s = traceback.format_stack() d = defer.maybeDeferred(callable, *args, **kwargs) def _done(res): if isinstance(res, Failure): res.trap(sftp.SFTPError) self.failUnlessReallyEqual(res.value.code, expected_code, "%s was supposed to raise SFTPError(%r), not SFTPError(%r): %s" % (which, expected_code, res.value.code, res)) else: print('@' + '@'.join(s)) self.fail("%s was supposed to raise SFTPError(%r), not get %r" % (which, expected_code, res)) d.addBoth(_done) return d def _set_up(self, basedir, num_clients=1, num_servers=10): self.basedir = "sftp/" + basedir self.set_up_grid(num_clients=num_clients, num_servers=num_servers, oneshare=True) self.client = self.g.clients[0] self.username = "alice" d = self.client.create_dirnode() def _created_root(node): self.root = node self.root_uri = node.get_uri() sftpd._reload() self.handler = sftpd.SFTPUserHandler(self.client, self.root, self.username) d.addCallback(_created_root) return d def _set_up_tree(self): u = publish.MutableData(b"mutable file contents") d = self.client.create_mutable_file(u) d.addCallback(lambda node: self.root.set_node(u"mutable", node)) def _created_mutable(n): self.mutable = n self.mutable_uri = n.get_uri() d.addCallback(_created_mutable) d.addCallback(lambda ign: self.root._create_and_validate_node(None, self.mutable.get_readonly_uri(), name=u"readonly")) d.addCallback(lambda node: self.root.set_node(u"readonly", node)) def _created_readonly(n): self.readonly = n self.readonly_uri = n.get_uri() d.addCallback(_created_readonly) gross = upload.Data(b"0123456789" * 101, None) d.addCallback(lambda ign: self.root.add_file(u"gro\u00DF", gross)) def _created_gross(n): self.gross = n self.gross_uri = n.get_uri() d.addCallback(_created_gross) small = upload.Data(b"0123456789", None) d.addCallback(lambda ign: self.root.add_file(u"small", small)) def _created_small(n): self.small = n self.small_uri = n.get_uri() d.addCallback(_created_small) small2 = upload.Data(b"Small enough for a LIT too", None) d.addCallback(lambda ign: self.root.add_file(u"small2", small2)) def _created_small2(n): self.small2 = n self.small2_uri = n.get_uri() d.addCallback(_created_small2) empty_litdir_uri = b"URI:DIR2-LIT:" # contains one child which is itself also LIT: tiny_litdir_uri = b"URI:DIR2-LIT:gqytunj2onug64tufqzdcosvkjetutcjkq5gw4tvm5vwszdgnz5hgyzufqydulbshj5x2lbm" unknown_uri = b"x-tahoe-crazy://I_am_from_the_future." d.addCallback(lambda ign: self.root._create_and_validate_node(None, empty_litdir_uri, name=u"empty_lit_dir")) def _created_empty_lit_dir(n): self.empty_lit_dir = n self.empty_lit_dir_uri = n.get_uri() self.root.set_node(u"empty_lit_dir", n) d.addCallback(_created_empty_lit_dir) d.addCallback(lambda ign: self.root._create_and_validate_node(None, tiny_litdir_uri, name=u"tiny_lit_dir")) def _created_tiny_lit_dir(n): self.tiny_lit_dir = n self.tiny_lit_dir_uri = n.get_uri() self.root.set_node(u"tiny_lit_dir", n) d.addCallback(_created_tiny_lit_dir) d.addCallback(lambda ign: self.root._create_and_validate_node(None, unknown_uri, name=u"unknown")) def _created_unknown(n): self.unknown = n self.unknown_uri = n.get_uri() self.root.set_node(u"unknown", n) d.addCallback(_created_unknown) fall_of_the_Berlin_wall = calendar.timegm(time.strptime("1989-11-09 20:00:00 UTC", "%Y-%m-%d %H:%M:%S %Z")) md = {'mtime': fall_of_the_Berlin_wall, 'tahoe': {'linkmotime': fall_of_the_Berlin_wall}} d.addCallback(lambda ign: self.root.set_node(u"loop", self.root, metadata=md)) return d def test_basic(self): d = self._set_up("basic") def _check(ign): # Test operations that have no side-effects, and don't need the tree. version = self.handler.gotVersion(3, {}) self.failUnless(isinstance(version, dict)) self.failUnlessReallyEqual(self.handler._path_from_string(b""), []) self.failUnlessReallyEqual(self.handler._path_from_string(b"/"), []) self.failUnlessReallyEqual(self.handler._path_from_string(b"."), []) self.failUnlessReallyEqual(self.handler._path_from_string(b"//"), []) self.failUnlessReallyEqual(self.handler._path_from_string(b"/."), []) self.failUnlessReallyEqual(self.handler._path_from_string(b"/./"), []) self.failUnlessReallyEqual(self.handler._path_from_string(b"foo"), [u"foo"]) self.failUnlessReallyEqual(self.handler._path_from_string(b"/foo"), [u"foo"]) self.failUnlessReallyEqual(self.handler._path_from_string(b"foo/"), [u"foo"]) self.failUnlessReallyEqual(self.handler._path_from_string(b"/foo/"), [u"foo"]) self.failUnlessReallyEqual(self.handler._path_from_string(b"foo/bar"), [u"foo", u"bar"]) self.failUnlessReallyEqual(self.handler._path_from_string(b"/foo/bar"), [u"foo", u"bar"]) self.failUnlessReallyEqual(self.handler._path_from_string(b"foo/bar//"), [u"foo", u"bar"]) self.failUnlessReallyEqual(self.handler._path_from_string(b"/foo/bar//"), [u"foo", u"bar"]) self.failUnlessReallyEqual(self.handler._path_from_string(b"foo/./bar"), [u"foo", u"bar"]) self.failUnlessReallyEqual(self.handler._path_from_string(b"./foo/./bar"), [u"foo", u"bar"]) self.failUnlessReallyEqual(self.handler._path_from_string(b"foo/../bar"), [u"bar"]) self.failUnlessReallyEqual(self.handler._path_from_string(b"/foo/../bar"), [u"bar"]) self.failUnlessReallyEqual(self.handler._path_from_string(b"../bar"), [u"bar"]) self.failUnlessReallyEqual(self.handler._path_from_string(b"/../bar"), [u"bar"]) self.failUnlessReallyEqual(self.handler.realPath(b""), b"/") self.failUnlessReallyEqual(self.handler.realPath(b"/"), b"/") self.failUnlessReallyEqual(self.handler.realPath(b"."), b"/") self.failUnlessReallyEqual(self.handler.realPath(b"//"), b"/") self.failUnlessReallyEqual(self.handler.realPath(b"/."), b"/") self.failUnlessReallyEqual(self.handler.realPath(b"/./"), b"/") self.failUnlessReallyEqual(self.handler.realPath(b"foo"), b"/foo") self.failUnlessReallyEqual(self.handler.realPath(b"/foo"), b"/foo") self.failUnlessReallyEqual(self.handler.realPath(b"foo/"), b"/foo") self.failUnlessReallyEqual(self.handler.realPath(b"/foo/"), b"/foo") self.failUnlessReallyEqual(self.handler.realPath(b"foo/bar"), b"/foo/bar") self.failUnlessReallyEqual(self.handler.realPath(b"/foo/bar"), b"/foo/bar") self.failUnlessReallyEqual(self.handler.realPath(b"foo/bar//"), b"/foo/bar") self.failUnlessReallyEqual(self.handler.realPath(b"/foo/bar//"), b"/foo/bar") self.failUnlessReallyEqual(self.handler.realPath(b"foo/./bar"), b"/foo/bar") self.failUnlessReallyEqual(self.handler.realPath(b"./foo/./bar"), b"/foo/bar") self.failUnlessReallyEqual(self.handler.realPath(b"foo/../bar"), b"/bar") self.failUnlessReallyEqual(self.handler.realPath(b"/foo/../bar"), b"/bar") self.failUnlessReallyEqual(self.handler.realPath(b"../bar"), b"/bar") self.failUnlessReallyEqual(self.handler.realPath(b"/../bar"), b"/bar") d.addCallback(_check) d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_NO_SUCH_FILE, "_path_from_string invalid UTF-8", self.handler._path_from_string, b"\xFF")) d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_NO_SUCH_FILE, "realPath invalid UTF-8", self.handler.realPath, b"\xFF")) return d def test_convert_error(self): self.failUnlessReallyEqual(sftpd._convert_error(None, "request"), None) d = defer.succeed(None) d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_FAILURE, "_convert_error SFTPError", sftpd._convert_error, Failure(sftp.SFTPError(sftp.FX_FAILURE, "foo")), "request")) d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_NO_SUCH_FILE, "_convert_error NoSuchChildError", sftpd._convert_error, Failure(NoSuchChildError("foo")), "request")) d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_FAILURE, "_convert_error ExistingChildError", sftpd._convert_error, Failure(ExistingChildError("foo")), "request")) d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_PERMISSION_DENIED, "_convert_error NotWriteableError", sftpd._convert_error, Failure(NotWriteableError("foo")), "request")) d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_OP_UNSUPPORTED, "_convert_error NotImplementedError", sftpd._convert_error, Failure(NotImplementedError("foo")), "request")) d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_EOF, "_convert_error EOFError", sftpd._convert_error, Failure(EOFError("foo")), "request")) d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_EOF, "_convert_error defer.FirstError", sftpd._convert_error, Failure(defer.FirstError( Failure(sftp.SFTPError(sftp.FX_EOF, "foo")), 0)), "request")) d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_FAILURE, "_convert_error AssertionError", sftpd._convert_error, Failure(AssertionError("foo")), "request")) return d def test_not_implemented(self): d = self._set_up("not_implemented") d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_OP_UNSUPPORTED, "readLink link", self.handler.readLink, b"link")) d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_OP_UNSUPPORTED, "makeLink link file", self.handler.makeLink, b"link", b"file")) return d def _compareDirLists(self, actual, expected): actual_list = sorted(actual) expected_list = sorted(expected) self.failUnlessReallyEqual(len(actual_list), len(expected_list), "%r is wrong length, expecting %r" % (actual_list, expected_list)) for (a, b) in zip(actual_list, expected_list): (name, text, attrs) = a (expected_name, expected_text_re, expected_attrs) = b self.failUnlessReallyEqual(name, expected_name) self.failUnless(re.match(expected_text_re, text), "%r does not match %r in\n%r" % (text, expected_text_re, actual_list)) self._compareAttributes(attrs, expected_attrs) def _compareAttributes(self, attrs, expected_attrs): # It is ok for there to be extra actual attributes. # TODO: check times for e in expected_attrs: self.failUnless(e in attrs, "%r is not in\n%r" % (e, attrs)) self.failUnlessReallyEqual(attrs[e], expected_attrs[e], "%r:%r is not %r in\n%r" % (e, attrs[e], expected_attrs[e], attrs)) def test_openDirectory_and_attrs(self): d = self._set_up("openDirectory_and_attrs") d.addCallback(lambda ign: self._set_up_tree()) d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_PERMISSION_DENIED, "openDirectory small", self.handler.openDirectory, b"small")) d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_PERMISSION_DENIED, "openDirectory unknown", self.handler.openDirectory, b"unknown")) d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_NO_SUCH_FILE, "openDirectory nodir", self.handler.openDirectory, b"nodir")) d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_NO_SUCH_FILE, "openDirectory nodir/nodir", self.handler.openDirectory, b"nodir/nodir")) gross = u"gro\u00DF".encode("utf-8") expected_root = [ (b'empty_lit_dir', br'dr-xr-xr-x .* 0 .* empty_lit_dir$', {'permissions': S_IFDIR | 0o555}), (gross, br'-rw-rw-rw- .* 1010 .* '+gross+b'$', {'permissions': S_IFREG | 0o666, 'size': 1010}), # The fall of the Berlin wall may have been on 9th or 10th November 1989 depending on the gateway's timezone. #('loop', r'drwxrwxrwx .* 0 Nov (09|10) 1989 loop$', {'permissions': S_IFDIR | 0777}), (b'loop', br'drwxrwxrwx .* 0 .* loop$', {'permissions': S_IFDIR | 0o777}), (b'mutable', br'-rw-rw-rw- .* 0 .* mutable$', {'permissions': S_IFREG | 0o666}), (b'readonly', br'-r--r--r-- .* 0 .* readonly$', {'permissions': S_IFREG | 0o444}), (b'small', br'-rw-rw-rw- .* 10 .* small$', {'permissions': S_IFREG | 0o666, 'size': 10}), (b'small2', br'-rw-rw-rw- .* 26 .* small2$', {'permissions': S_IFREG | 0o666, 'size': 26}), (b'tiny_lit_dir', br'dr-xr-xr-x .* 0 .* tiny_lit_dir$', {'permissions': S_IFDIR | 0o555}), (b'unknown', br'\?--------- .* 0 .* unknown$', {'permissions': 0}), ] d.addCallback(lambda ign: self.handler.openDirectory(b"")) d.addCallback(lambda res: self._compareDirLists(res, expected_root)) d.addCallback(lambda ign: self.handler.openDirectory(b"loop")) d.addCallback(lambda res: self._compareDirLists(res, expected_root)) d.addCallback(lambda ign: self.handler.openDirectory(b"loop/loop")) d.addCallback(lambda res: self._compareDirLists(res, expected_root)) d.addCallback(lambda ign: self.handler.openDirectory(b"empty_lit_dir")) d.addCallback(lambda res: self._compareDirLists(res, [])) # The UTC epoch may either be in Jan 1 1970 or Dec 31 1969 depending on the gateway's timezone. expected_tiny_lit = [ (b'short', br'-r--r--r-- .* 8 (Jan 01 1970|Dec 31 1969) short$', {'permissions': S_IFREG | 0o444, 'size': 8}), ] d.addCallback(lambda ign: self.handler.openDirectory(b"tiny_lit_dir")) d.addCallback(lambda res: self._compareDirLists(res, expected_tiny_lit)) d.addCallback(lambda ign: self.handler.getAttrs(b"small", True)) d.addCallback(lambda attrs: self._compareAttributes(attrs, {'permissions': S_IFREG | 0o666, 'size': 10})) d.addCallback(lambda ign: self.handler.setAttrs(b"small", {})) d.addCallback(lambda res: self.failUnlessReallyEqual(res, None)) d.addCallback(lambda ign: self.handler.getAttrs(b"small", True)) d.addCallback(lambda attrs: self._compareAttributes(attrs, {'permissions': S_IFREG | 0o666, 'size': 10})) d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_OP_UNSUPPORTED, "setAttrs size", self.handler.setAttrs, b"small", {'size': 0})) d.addCallback(lambda ign: self.failUnlessEqual(sftpd.all_heisenfiles, {})) d.addCallback(lambda ign: self.failUnlessEqual(self.handler._heisenfiles, {})) return d def test_openFile_read(self): d = self._set_up("openFile_read") d.addCallback(lambda ign: self._set_up_tree()) d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_BAD_MESSAGE, "openFile small 0 bad", self.handler.openFile, b"small", 0, {})) # attempting to open a non-existent file should fail d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_NO_SUCH_FILE, "openFile nofile READ nosuch", self.handler.openFile, b"nofile", sftp.FXF_READ, {})) d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_NO_SUCH_FILE, "openFile nodir/file READ nosuch", self.handler.openFile, b"nodir/file", sftp.FXF_READ, {})) d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_PERMISSION_DENIED, "openFile unknown READ denied", self.handler.openFile, b"unknown", sftp.FXF_READ, {})) d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_PERMISSION_DENIED, "openFile unknown/file READ denied", self.handler.openFile, b"unknown/file", sftp.FXF_READ, {})) d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_PERMISSION_DENIED, "openFile tiny_lit_dir READ denied", self.handler.openFile, b"tiny_lit_dir", sftp.FXF_READ, {})) d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_PERMISSION_DENIED, "openFile unknown uri READ denied", self.handler.openFile, b"uri/"+self.unknown_uri, sftp.FXF_READ, {})) d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_PERMISSION_DENIED, "openFile tiny_lit_dir uri READ denied", self.handler.openFile, b"uri/"+self.tiny_lit_dir_uri, sftp.FXF_READ, {})) # FIXME: should be FX_NO_SUCH_FILE? d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_PERMISSION_DENIED, "openFile noexist uri READ denied", self.handler.openFile, b"uri/URI:noexist", sftp.FXF_READ, {})) d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_NO_SUCH_FILE, "openFile invalid UTF-8 uri READ denied", self.handler.openFile, b"uri/URI:\xFF", sftp.FXF_READ, {})) # reading an existing file should succeed d.addCallback(lambda ign: self.handler.openFile(b"small", sftp.FXF_READ, {})) def _read_small(rf): d2 = rf.readChunk(0, 10) d2.addCallback(lambda data: self.failUnlessReallyEqual(data, b"0123456789")) d2.addCallback(lambda ign: rf.readChunk(2, 6)) d2.addCallback(lambda data: self.failUnlessReallyEqual(data, b"234567")) d2.addCallback(lambda ign: rf.readChunk(1, 0)) d2.addCallback(lambda data: self.failUnlessReallyEqual(data, b"")) d2.addCallback(lambda ign: rf.readChunk(8, 4)) # read that starts before EOF is OK d2.addCallback(lambda data: self.failUnlessReallyEqual(data, b"89")) d2.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_EOF, "readChunk starting at EOF (0-byte)", rf.readChunk, 10, 0)) d2.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_EOF, "readChunk starting at EOF", rf.readChunk, 10, 1)) d2.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_EOF, "readChunk starting after EOF", rf.readChunk, 11, 1)) d2.addCallback(lambda ign: rf.getAttrs()) d2.addCallback(lambda attrs: self._compareAttributes(attrs, {'permissions': S_IFREG | 0o666, 'size': 10})) d2.addCallback(lambda ign: self.handler.getAttrs(b"small", followLinks=0)) d2.addCallback(lambda attrs: self._compareAttributes(attrs, {'permissions': S_IFREG | 0o666, 'size': 10})) d2.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_PERMISSION_DENIED, "writeChunk on read-only handle denied", rf.writeChunk, 0, b"a")) d2.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_PERMISSION_DENIED, "setAttrs on read-only handle denied", rf.setAttrs, {})) d2.addCallback(lambda ign: rf.close()) d2.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_BAD_MESSAGE, "readChunk on closed file bad", rf.readChunk, 0, 1)) d2.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_BAD_MESSAGE, "getAttrs on closed file bad", rf.getAttrs)) d2.addCallback(lambda ign: rf.close()) # should be no-op return d2 d.addCallback(_read_small) # repeat for a large file gross = u"gro\u00DF".encode("utf-8") d.addCallback(lambda ign: self.handler.openFile(gross, sftp.FXF_READ, {})) def _read_gross(rf): d2 = rf.readChunk(0, 10) d2.addCallback(lambda data: self.failUnlessReallyEqual(data, b"0123456789")) d2.addCallback(lambda ign: rf.readChunk(2, 6)) d2.addCallback(lambda data: self.failUnlessReallyEqual(data, b"234567")) d2.addCallback(lambda ign: rf.readChunk(1, 0)) d2.addCallback(lambda data: self.failUnlessReallyEqual(data, b"")) d2.addCallback(lambda ign: rf.readChunk(1008, 4)) # read that starts before EOF is OK d2.addCallback(lambda data: self.failUnlessReallyEqual(data, b"89")) d2.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_EOF, "readChunk starting at EOF (0-byte)", rf.readChunk, 1010, 0)) d2.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_EOF, "readChunk starting at EOF", rf.readChunk, 1010, 1)) d2.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_EOF, "readChunk starting after EOF", rf.readChunk, 1011, 1)) d2.addCallback(lambda ign: rf.getAttrs()) d2.addCallback(lambda attrs: self._compareAttributes(attrs, {'permissions': S_IFREG | 0o666, 'size': 1010})) d2.addCallback(lambda ign: self.handler.getAttrs(gross, followLinks=0)) d2.addCallback(lambda attrs: self._compareAttributes(attrs, {'permissions': S_IFREG | 0o666, 'size': 1010})) d2.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_PERMISSION_DENIED, "writeChunk on read-only handle denied", rf.writeChunk, 0, b"a")) d2.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_PERMISSION_DENIED, "setAttrs on read-only handle denied", rf.setAttrs, {})) d2.addCallback(lambda ign: rf.close()) d2.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_BAD_MESSAGE, "readChunk on closed file", rf.readChunk, 0, 1)) d2.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_BAD_MESSAGE, "getAttrs on closed file", rf.getAttrs)) d2.addCallback(lambda ign: rf.close()) # should be no-op return d2 d.addCallback(_read_gross) # reading an existing small file via uri/ should succeed d.addCallback(lambda ign: self.handler.openFile(b"uri/"+self.small_uri, sftp.FXF_READ, {})) def _read_small_uri(rf): d2 = rf.readChunk(0, 10) d2.addCallback(lambda data: self.failUnlessReallyEqual(data, b"0123456789")) d2.addCallback(lambda ign: rf.close()) return d2 d.addCallback(_read_small_uri) # repeat for a large file d.addCallback(lambda ign: self.handler.openFile(b"uri/"+self.gross_uri, sftp.FXF_READ, {})) def _read_gross_uri(rf): d2 = rf.readChunk(0, 10) d2.addCallback(lambda data: self.failUnlessReallyEqual(data, b"0123456789")) d2.addCallback(lambda ign: rf.close()) return d2 d.addCallback(_read_gross_uri) # repeat for a mutable file d.addCallback(lambda ign: self.handler.openFile(b"uri/"+self.mutable_uri, sftp.FXF_READ, {})) def _read_mutable_uri(rf): d2 = rf.readChunk(0, 100) d2.addCallback(lambda data: self.failUnlessReallyEqual(data, b"mutable file contents")) d2.addCallback(lambda ign: rf.close()) return d2 d.addCallback(_read_mutable_uri) # repeat for a file within a directory referenced by URI d.addCallback(lambda ign: self.handler.openFile(b"uri/"+self.tiny_lit_dir_uri+b"/short", sftp.FXF_READ, {})) def _read_short(rf): d2 = rf.readChunk(0, 100) d2.addCallback(lambda data: self.failUnlessReallyEqual(data, b"The end.")) d2.addCallback(lambda ign: rf.close()) return d2 d.addCallback(_read_short) # check that failed downloads cause failed reads. Note that this # trashes the grid (by deleting all shares), so this must be at the # end of the test function. d.addCallback(lambda ign: self.handler.openFile(b"uri/"+self.gross_uri, sftp.FXF_READ, {})) def _read_broken(rf): d2 = defer.succeed(None) d2.addCallback(lambda ign: self.g.nuke_from_orbit()) d2.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_FAILURE, "read broken", rf.readChunk, 0, 100)) # close shouldn't fail d2.addCallback(lambda ign: rf.close()) d2.addCallback(lambda res: self.failUnlessReallyEqual(res, None)) return d2 d.addCallback(_read_broken) d.addCallback(lambda ign: self.failUnlessEqual(sftpd.all_heisenfiles, {})) d.addCallback(lambda ign: self.failUnlessEqual(self.handler._heisenfiles, {})) return d def test_openFile_read_error(self): # The check at the end of openFile_read tested this for large files, # but it trashed the grid in the process, so this needs to be a # separate test. small = upload.Data(b"0123456789"*10, None) d = self._set_up("openFile_read_error") d.addCallback(lambda ign: self.root.add_file(u"small", small)) d.addCallback(lambda n: self.handler.openFile(b"/uri/"+n.get_uri(), sftp.FXF_READ, {})) def _read_broken(rf): d2 = defer.succeed(None) d2.addCallback(lambda ign: self.g.nuke_from_orbit()) d2.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_FAILURE, "read broken", rf.readChunk, 0, 100)) # close shouldn't fail d2.addCallback(lambda ign: rf.close()) d2.addCallback(lambda res: self.failUnlessReallyEqual(res, None)) return d2 d.addCallback(_read_broken) d.addCallback(lambda ign: self.failUnlessEqual(sftpd.all_heisenfiles, {})) d.addCallback(lambda ign: self.failUnlessEqual(self.handler._heisenfiles, {})) return d def test_openFile_write(self): d = self._set_up("openFile_write") d.addCallback(lambda ign: self._set_up_tree()) # '' is an invalid filename d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_NO_SUCH_FILE, "openFile '' WRITE|CREAT|TRUNC nosuch", self.handler.openFile, b"", sftp.FXF_WRITE | sftp.FXF_CREAT | sftp.FXF_TRUNC, {})) # TRUNC is not valid without CREAT if the file does not already exist d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_NO_SUCH_FILE, "openFile newfile WRITE|TRUNC nosuch", self.handler.openFile, b"newfile", sftp.FXF_WRITE | sftp.FXF_TRUNC, {})) # EXCL is not valid without CREAT d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_BAD_MESSAGE, "openFile small WRITE|EXCL bad", self.handler.openFile, b"small", sftp.FXF_WRITE | sftp.FXF_EXCL, {})) # cannot write to an existing directory d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_PERMISSION_DENIED, "openFile tiny_lit_dir WRITE denied", self.handler.openFile, b"tiny_lit_dir", sftp.FXF_WRITE, {})) # cannot write to an existing unknown d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_PERMISSION_DENIED, "openFile unknown WRITE denied", self.handler.openFile, b"unknown", sftp.FXF_WRITE, {})) # cannot create a child of an unknown d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_PERMISSION_DENIED, "openFile unknown/newfile WRITE|CREAT denied", self.handler.openFile, b"unknown/newfile", sftp.FXF_WRITE | sftp.FXF_CREAT, {})) # cannot write to a new file in an immutable directory d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_PERMISSION_DENIED, "openFile tiny_lit_dir/newfile WRITE|CREAT|TRUNC denied", self.handler.openFile, b"tiny_lit_dir/newfile", sftp.FXF_WRITE | sftp.FXF_CREAT | sftp.FXF_TRUNC, {})) # cannot write to an existing immutable file in an immutable directory (with or without CREAT and EXCL) d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_PERMISSION_DENIED, "openFile tiny_lit_dir/short WRITE denied", self.handler.openFile, b"tiny_lit_dir/short", sftp.FXF_WRITE, {})) d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_PERMISSION_DENIED, "openFile tiny_lit_dir/short WRITE|CREAT denied", self.handler.openFile, b"tiny_lit_dir/short", sftp.FXF_WRITE | sftp.FXF_CREAT, {})) # cannot write to a mutable file via a readonly cap (by path or uri) d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_PERMISSION_DENIED, "openFile readonly WRITE denied", self.handler.openFile, b"readonly", sftp.FXF_WRITE, {})) d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_PERMISSION_DENIED, "openFile readonly uri WRITE denied", self.handler.openFile, b"uri/"+self.readonly_uri, sftp.FXF_WRITE, {})) # cannot create a file with the EXCL flag if it already exists d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_FAILURE, "openFile small WRITE|CREAT|EXCL failure", self.handler.openFile, b"small", sftp.FXF_WRITE | sftp.FXF_CREAT | sftp.FXF_EXCL, {})) d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_FAILURE, "openFile mutable WRITE|CREAT|EXCL failure", self.handler.openFile, b"mutable", sftp.FXF_WRITE | sftp.FXF_CREAT | sftp.FXF_EXCL, {})) d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_FAILURE, "openFile mutable uri WRITE|CREAT|EXCL failure", self.handler.openFile, b"uri/"+self.mutable_uri, sftp.FXF_WRITE | sftp.FXF_CREAT | sftp.FXF_EXCL, {})) d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_FAILURE, "openFile tiny_lit_dir/short WRITE|CREAT|EXCL failure", self.handler.openFile, b"tiny_lit_dir/short", sftp.FXF_WRITE | sftp.FXF_CREAT | sftp.FXF_EXCL, {})) # cannot write to an immutable file if we don't have its parent (with or without CREAT, TRUNC, or EXCL) d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_PERMISSION_DENIED, "openFile small uri WRITE denied", self.handler.openFile, b"uri/"+self.small_uri, sftp.FXF_WRITE, {})) d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_PERMISSION_DENIED, "openFile small uri WRITE|CREAT denied", self.handler.openFile, b"uri/"+self.small_uri, sftp.FXF_WRITE | sftp.FXF_CREAT, {})) d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_PERMISSION_DENIED, "openFile small uri WRITE|CREAT|TRUNC denied", self.handler.openFile, b"uri/"+self.small_uri, sftp.FXF_WRITE | sftp.FXF_CREAT | sftp.FXF_TRUNC, {})) d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_PERMISSION_DENIED, "openFile small uri WRITE|CREAT|EXCL denied", self.handler.openFile, b"uri/"+self.small_uri, sftp.FXF_WRITE | sftp.FXF_CREAT | sftp.FXF_EXCL, {})) # test creating a new file with truncation and extension d.addCallback(lambda ign: self.handler.openFile(b"newfile", sftp.FXF_WRITE | sftp.FXF_CREAT | sftp.FXF_TRUNC, {})) def _write(wf): d2 = wf.writeChunk(0, b"0123456789") d2.addCallback(lambda res: self.failUnlessReallyEqual(res, None)) d2.addCallback(lambda ign: wf.writeChunk(8, b"0123")) d2.addCallback(lambda ign: wf.writeChunk(13, b"abc")) d2.addCallback(lambda ign: wf.getAttrs()) d2.addCallback(lambda attrs: self._compareAttributes(attrs, {'permissions': S_IFREG | 0o666, 'size': 16})) d2.addCallback(lambda ign: self.handler.getAttrs(b"newfile", followLinks=0)) d2.addCallback(lambda attrs: self._compareAttributes(attrs, {'permissions': S_IFREG | 0o666, 'size': 16})) d2.addCallback(lambda ign: wf.setAttrs({})) d2.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_BAD_MESSAGE, "setAttrs with negative size bad", wf.setAttrs, {'size': -1})) d2.addCallback(lambda ign: wf.setAttrs({'size': 14})) d2.addCallback(lambda ign: wf.getAttrs()) d2.addCallback(lambda attrs: self.failUnlessReallyEqual(attrs['size'], 14)) d2.addCallback(lambda ign: wf.setAttrs({'size': 14})) d2.addCallback(lambda ign: wf.getAttrs()) d2.addCallback(lambda attrs: self.failUnlessReallyEqual(attrs['size'], 14)) d2.addCallback(lambda ign: wf.setAttrs({'size': 17})) d2.addCallback(lambda ign: wf.getAttrs()) d2.addCallback(lambda attrs: self.failUnlessReallyEqual(attrs['size'], 17)) d2.addCallback(lambda ign: self.handler.getAttrs(b"newfile", followLinks=0)) d2.addCallback(lambda attrs: self.failUnlessReallyEqual(attrs['size'], 17)) d2.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_PERMISSION_DENIED, "readChunk on write-only handle denied", wf.readChunk, 0, 1)) d2.addCallback(lambda ign: wf.close()) d2.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_BAD_MESSAGE, "writeChunk on closed file bad", wf.writeChunk, 0, b"a")) d2.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_BAD_MESSAGE, "setAttrs on closed file bad", wf.setAttrs, {'size': 0})) d2.addCallback(lambda ign: wf.close()) # should be no-op return d2 d.addCallback(_write) d.addCallback(lambda ign: self.root.get(u"newfile")) d.addCallback(lambda node: download_to_data(node)) d.addCallback(lambda data: self.failUnlessReallyEqual(data, b"012345670123\x00a\x00\x00\x00")) # test APPEND flag, and also replacing an existing file ("newfile" created by the previous test) d.addCallback(lambda ign: self.handler.openFile(b"newfile", sftp.FXF_WRITE | sftp.FXF_CREAT | sftp.FXF_TRUNC | sftp.FXF_APPEND, {})) def _write_append(wf): d2 = wf.writeChunk(0, b"0123456789") d2.addCallback(lambda ign: wf.writeChunk(8, b"0123")) d2.addCallback(lambda ign: wf.setAttrs({'size': 17})) d2.addCallback(lambda ign: wf.getAttrs()) d2.addCallback(lambda attrs: self.failUnlessReallyEqual(attrs['size'], 17)) d2.addCallback(lambda ign: wf.writeChunk(0, b"z")) d2.addCallback(lambda ign: wf.close()) return d2 d.addCallback(_write_append) d.addCallback(lambda ign: self.root.get(u"newfile")) d.addCallback(lambda node: download_to_data(node)) d.addCallback(lambda data: self.failUnlessReallyEqual(data, b"01234567890123\x00\x00\x00z")) # test WRITE | TRUNC without CREAT, when the file already exists # This is invalid according to section 6.3 of the SFTP spec, but required for interoperability, # since POSIX does allow O_WRONLY | O_TRUNC. d.addCallback(lambda ign: self.handler.openFile(b"newfile", sftp.FXF_WRITE | sftp.FXF_TRUNC, {})) def _write_trunc(wf): d2 = wf.writeChunk(0, b"01234") d2.addCallback(lambda ign: wf.close()) return d2 d.addCallback(_write_trunc) d.addCallback(lambda ign: self.root.get(u"newfile")) d.addCallback(lambda node: download_to_data(node)) d.addCallback(lambda data: self.failUnlessReallyEqual(data, b"01234")) # test WRITE | TRUNC with permissions: 0 d.addCallback(lambda ign: self.handler.openFile(b"newfile", sftp.FXF_WRITE | sftp.FXF_TRUNC, {'permissions': 0})) d.addCallback(_write_trunc) d.addCallback(lambda ign: self.root.get(u"newfile")) d.addCallback(lambda node: download_to_data(node)) d.addCallback(lambda data: self.failUnlessReallyEqual(data, b"01234")) d.addCallback(lambda ign: self.root.get_metadata_for(u"newfile")) d.addCallback(lambda metadata: self.failIf(metadata.get('no-write', False), metadata)) # test EXCL flag d.addCallback(lambda ign: self.handler.openFile(b"excl", sftp.FXF_WRITE | sftp.FXF_CREAT | sftp.FXF_TRUNC | sftp.FXF_EXCL, {})) def _write_excl(wf): d2 = self.root.get(u"excl") d2.addCallback(lambda node: download_to_data(node)) d2.addCallback(lambda data: self.failUnlessReallyEqual(data, b"")) d2.addCallback(lambda ign: wf.writeChunk(0, b"0123456789")) d2.addCallback(lambda ign: wf.close()) return d2 d.addCallback(_write_excl) d.addCallback(lambda ign: self.root.get(u"excl")) d.addCallback(lambda node: download_to_data(node)) d.addCallback(lambda data: self.failUnlessReallyEqual(data, b"0123456789")) # test that writing a zero-length file with EXCL only updates the directory once d.addCallback(lambda ign: self.handler.openFile(b"zerolength", sftp.FXF_WRITE | sftp.FXF_CREAT | sftp.FXF_EXCL, {})) def _write_excl_zerolength(wf): d2 = self.root.get(u"zerolength") d2.addCallback(lambda node: download_to_data(node)) d2.addCallback(lambda data: self.failUnlessReallyEqual(data, b"")) # FIXME: no API to get the best version number exists (fix as part of #993) """ d2.addCallback(lambda ign: self.root.get_best_version_number()) def _check_version(version): d3 = wf.close() d3.addCallback(lambda ign: self.root.get_best_version_number()) d3.addCallback(lambda new_version: self.failUnlessReallyEqual(new_version, version)) return d3 d2.addCallback(_check_version) """ d2.addCallback(lambda ign: wf.close()) return d2 d.addCallback(_write_excl_zerolength) d.addCallback(lambda ign: self.root.get(u"zerolength")) d.addCallback(lambda node: download_to_data(node)) d.addCallback(lambda data: self.failUnlessReallyEqual(data, b"")) # test WRITE | CREAT | EXCL | APPEND d.addCallback(lambda ign: self.handler.openFile(b"exclappend", sftp.FXF_WRITE | sftp.FXF_CREAT | sftp.FXF_EXCL | sftp.FXF_APPEND, {})) def _write_excl_append(wf): d2 = self.root.get(u"exclappend") d2.addCallback(lambda node: download_to_data(node)) d2.addCallback(lambda data: self.failUnlessReallyEqual(data, b"")) d2.addCallback(lambda ign: wf.writeChunk(10, b"0123456789")) d2.addCallback(lambda ign: wf.writeChunk(5, b"01234")) d2.addCallback(lambda ign: wf.close()) return d2 d.addCallback(_write_excl_append) d.addCallback(lambda ign: self.root.get(u"exclappend")) d.addCallback(lambda node: download_to_data(node)) d.addCallback(lambda data: self.failUnlessReallyEqual(data, b"012345678901234")) # test WRITE | CREAT | APPEND when the file does not already exist d.addCallback(lambda ign: self.handler.openFile(b"creatappend", sftp.FXF_WRITE | sftp.FXF_CREAT | sftp.FXF_APPEND, {})) def _write_creat_append_new(wf): d2 = wf.writeChunk(10, b"0123456789") d2.addCallback(lambda ign: wf.writeChunk(5, b"01234")) d2.addCallback(lambda ign: wf.close()) return d2 d.addCallback(_write_creat_append_new) d.addCallback(lambda ign: self.root.get(u"creatappend")) d.addCallback(lambda node: download_to_data(node)) d.addCallback(lambda data: self.failUnlessReallyEqual(data, b"012345678901234")) # ... and when it does exist d.addCallback(lambda ign: self.handler.openFile(b"creatappend", sftp.FXF_WRITE | sftp.FXF_CREAT | sftp.FXF_APPEND, {})) def _write_creat_append_existing(wf): d2 = wf.writeChunk(5, b"01234") d2.addCallback(lambda ign: wf.close()) return d2 d.addCallback(_write_creat_append_existing) d.addCallback(lambda ign: self.root.get(u"creatappend")) d.addCallback(lambda node: download_to_data(node)) d.addCallback(lambda data: self.failUnlessReallyEqual(data, b"01234567890123401234")) # test WRITE | CREAT without TRUNC, when the file does not already exist d.addCallback(lambda ign: self.handler.openFile(b"newfile2", sftp.FXF_WRITE | sftp.FXF_CREAT, {})) def _write_creat_new(wf): d2 = wf.writeChunk(0, b"0123456789") d2.addCallback(lambda ign: wf.close()) return d2 d.addCallback(_write_creat_new) d.addCallback(lambda ign: self.root.get(u"newfile2")) d.addCallback(lambda node: download_to_data(node)) d.addCallback(lambda data: self.failUnlessReallyEqual(data, b"0123456789")) # ... and when it does exist d.addCallback(lambda ign: self.handler.openFile(b"newfile2", sftp.FXF_WRITE | sftp.FXF_CREAT, {})) def _write_creat_existing(wf): d2 = wf.writeChunk(0, b"abcde") d2.addCallback(lambda ign: wf.close()) return d2 d.addCallback(_write_creat_existing) d.addCallback(lambda ign: self.root.get(u"newfile2")) d.addCallback(lambda node: download_to_data(node)) d.addCallback(lambda data: self.failUnlessReallyEqual(data, b"abcde56789")) d.addCallback(lambda ign: self.root.set_node(u"mutable2", self.mutable)) # test writing to a mutable file d.addCallback(lambda ign: self.handler.openFile(b"mutable", sftp.FXF_WRITE, {})) def _write_mutable(wf): d2 = wf.writeChunk(8, b"new!") d2.addCallback(lambda ign: wf.close()) return d2 d.addCallback(_write_mutable) d.addCallback(lambda ign: self.root.get(u"mutable")) def _check_same_file(node): self.failUnless(node.is_mutable()) self.failIf(node.is_readonly()) self.failUnlessReallyEqual(node.get_uri(), self.mutable_uri) return node.download_best_version() d.addCallback(_check_same_file) d.addCallback(lambda data: self.failUnlessReallyEqual(data, b"mutable new! contents")) # ... and with permissions, which should be ignored d.addCallback(lambda ign: self.handler.openFile(b"mutable", sftp.FXF_WRITE, {'permissions': 0})) d.addCallback(_write_mutable) d.addCallback(lambda ign: self.root.get(u"mutable")) d.addCallback(_check_same_file) d.addCallback(lambda data: self.failUnlessReallyEqual(data, b"mutable new! contents")) # ... and with a setAttrs call that diminishes the parent link to read-only, first by path d.addCallback(lambda ign: self.handler.openFile(b"mutable", sftp.FXF_WRITE, {})) def _write_mutable_setattr(wf): d2 = wf.writeChunk(8, b"read-only link from parent") d2.addCallback(lambda ign: self.handler.setAttrs(b"mutable", {'permissions': 0o444})) d2.addCallback(lambda ign: self.root.get(u"mutable")) d2.addCallback(lambda node: self.failUnless(node.is_readonly())) d2.addCallback(lambda ign: wf.getAttrs()) d2.addCallback(lambda attrs: self.failUnlessReallyEqual(attrs['permissions'], S_IFREG | 0o666)) d2.addCallback(lambda ign: self.handler.getAttrs(b"mutable", followLinks=0)) d2.addCallback(lambda attrs: self.failUnlessReallyEqual(attrs['permissions'], S_IFREG | 0o444)) d2.addCallback(lambda ign: wf.close()) return d2 d.addCallback(_write_mutable_setattr) d.addCallback(lambda ign: self.root.get(u"mutable")) def _check_readonly_file(node): self.failUnless(node.is_mutable()) self.failUnless(node.is_readonly()) self.failUnlessReallyEqual(node.get_write_uri(), None) self.failUnlessReallyEqual(node.get_storage_index(), self.mutable.get_storage_index()) return node.download_best_version() d.addCallback(_check_readonly_file) d.addCallback(lambda data: self.failUnlessReallyEqual(data, b"mutable read-only link from parent")) # ... and then by handle d.addCallback(lambda ign: self.handler.openFile(b"mutable2", sftp.FXF_WRITE, {})) def _write_mutable2_setattr(wf): d2 = wf.writeChunk(7, b"2") d2.addCallback(lambda ign: wf.setAttrs({'permissions': 0o444, 'size': 8})) # The link isn't made read-only until the file is closed. d2.addCallback(lambda ign: self.root.get(u"mutable2")) d2.addCallback(lambda node: self.failIf(node.is_readonly())) d2.addCallback(lambda ign: wf.getAttrs()) d2.addCallback(lambda attrs: self.failUnlessReallyEqual(attrs['permissions'], S_IFREG | 0o444)) d2.addCallback(lambda ign: self.handler.getAttrs(b"mutable2", followLinks=0)) d2.addCallback(lambda attrs: self.failUnlessReallyEqual(attrs['permissions'], S_IFREG | 0o666)) d2.addCallback(lambda ign: wf.close()) return d2 d.addCallback(_write_mutable2_setattr) d.addCallback(lambda ign: self.root.get(u"mutable2")) d.addCallback(_check_readonly_file) # from above d.addCallback(lambda data: self.failUnlessReallyEqual(data, b"mutable2")) # test READ | WRITE without CREAT or TRUNC d.addCallback(lambda ign: self.handler.openFile(b"small", sftp.FXF_READ | sftp.FXF_WRITE, {})) def _read_write(rwf): d2 = rwf.writeChunk(8, b"0123") # test immediate read starting after the old end-of-file d2.addCallback(lambda ign: rwf.readChunk(11, 1)) d2.addCallback(lambda data: self.failUnlessReallyEqual(data, b"3")) d2.addCallback(lambda ign: rwf.readChunk(0, 100)) d2.addCallback(lambda data: self.failUnlessReallyEqual(data, b"012345670123")) d2.addCallback(lambda ign: rwf.close()) return d2 d.addCallback(_read_write) d.addCallback(lambda ign: self.root.get(u"small")) d.addCallback(lambda node: download_to_data(node)) d.addCallback(lambda data: self.failUnlessReallyEqual(data, b"012345670123")) # test WRITE and rename while still open d.addCallback(lambda ign: self.handler.openFile(b"small", sftp.FXF_WRITE, {})) def _write_rename(wf): d2 = wf.writeChunk(0, b"abcd") d2.addCallback(lambda ign: self.handler.renameFile(b"small", b"renamed")) d2.addCallback(lambda ign: wf.writeChunk(4, b"efgh")) d2.addCallback(lambda ign: wf.close()) return d2 d.addCallback(_write_rename) d.addCallback(lambda ign: self.root.get(u"renamed")) d.addCallback(lambda node: download_to_data(node)) d.addCallback(lambda data: self.failUnlessReallyEqual(data, b"abcdefgh0123")) d.addCallback(lambda ign: self.shouldFail(NoSuchChildError, "rename small while open", "small", self.root.get, u"small")) # test WRITE | CREAT | EXCL and rename while still open d.addCallback(lambda ign: self.handler.openFile(b"newexcl", sftp.FXF_WRITE | sftp.FXF_CREAT | sftp.FXF_EXCL, {})) def _write_creat_excl_rename(wf): d2 = wf.writeChunk(0, b"abcd") d2.addCallback(lambda ign: self.handler.renameFile(b"newexcl", b"renamedexcl")) d2.addCallback(lambda ign: wf.writeChunk(4, b"efgh")) d2.addCallback(lambda ign: wf.close()) return d2 d.addCallback(_write_creat_excl_rename) d.addCallback(lambda ign: self.root.get(u"renamedexcl")) d.addCallback(lambda node: download_to_data(node)) d.addCallback(lambda data: self.failUnlessReallyEqual(data, b"abcdefgh")) d.addCallback(lambda ign: self.shouldFail(NoSuchChildError, "rename newexcl while open", "newexcl", self.root.get, u"newexcl")) # it should be possible to rename even before the open has completed def _open_and_rename_race(ign): slow_open = defer.Deferred() reactor.callLater(1, slow_open.callback, None) d2 = self.handler.openFile(b"new", sftp.FXF_WRITE | sftp.FXF_CREAT, {}, delay=slow_open) # deliberate race between openFile and renameFile d3 = self.handler.renameFile(b"new", b"new2") d3.addErrback(lambda err: self.fail("renameFile failed: %r" % (err,))) return d2 d.addCallback(_open_and_rename_race) def _write_rename_race(wf): d2 = wf.writeChunk(0, b"abcd") d2.addCallback(lambda ign: wf.close()) return d2 d.addCallback(_write_rename_race) d.addCallback(lambda ign: self.root.get(u"new2")) d.addCallback(lambda node: download_to_data(node)) d.addCallback(lambda data: self.failUnlessReallyEqual(data, b"abcd")) d.addCallback(lambda ign: self.shouldFail(NoSuchChildError, "rename new while open", "new", self.root.get, u"new")) # check that failed downloads cause failed reads and failed close, # when open for writing. Note that this trashes the grid (by deleting # all shares), so this must be at the end of the test function. gross = u"gro\u00DF".encode("utf-8") d.addCallback(lambda ign: self.handler.openFile(gross, sftp.FXF_READ | sftp.FXF_WRITE, {})) def _read_write_broken(rwf): d2 = rwf.writeChunk(0, b"abcdefghij") d2.addCallback(lambda ign: self.g.nuke_from_orbit()) # reading should fail (reliably if we read past the written chunk) d2.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_FAILURE, "read/write broken", rwf.readChunk, 0, 100)) # close should fail in this case d2.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_FAILURE, "read/write broken close", rwf.close)) return d2 d.addCallback(_read_write_broken) d.addCallback(lambda ign: self.failUnlessEqual(sftpd.all_heisenfiles, {})) d.addCallback(lambda ign: self.failUnlessEqual(self.handler._heisenfiles, {})) return d def test_removeFile(self): d = self._set_up("removeFile") d.addCallback(lambda ign: self._set_up_tree()) d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_NO_SUCH_FILE, "removeFile nofile", self.handler.removeFile, b"nofile")) d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_NO_SUCH_FILE, "removeFile nofile", self.handler.removeFile, b"nofile")) d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_NO_SUCH_FILE, "removeFile nodir/file", self.handler.removeFile, b"nodir/file")) d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_NO_SUCH_FILE, "removefile ''", self.handler.removeFile, b"")) # removing a directory should fail d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_PERMISSION_DENIED, "removeFile tiny_lit_dir", self.handler.removeFile, b"tiny_lit_dir")) # removing a file should succeed d.addCallback(lambda ign: self.root.get(u"gro\u00DF")) d.addCallback(lambda ign: self.handler.removeFile(u"gro\u00DF".encode('utf-8'))) d.addCallback(lambda ign: self.shouldFail(NoSuchChildError, "removeFile gross", "gro", self.root.get, u"gro\u00DF")) # removing an unknown should succeed d.addCallback(lambda ign: self.root.get(u"unknown")) d.addCallback(lambda ign: self.handler.removeFile(b"unknown")) d.addCallback(lambda ign: self.shouldFail(NoSuchChildError, "removeFile unknown", "unknown", self.root.get, u"unknown")) # removing a link to an open file should not prevent it from being read d.addCallback(lambda ign: self.handler.openFile(b"small", sftp.FXF_READ, {})) def _remove_and_read_small(rf): d2 = self.handler.removeFile(b"small") d2.addCallback(lambda ign: self.shouldFail(NoSuchChildError, "removeFile small", "small", self.root.get, u"small")) d2.addCallback(lambda ign: rf.readChunk(0, 10)) d2.addCallback(lambda data: self.failUnlessReallyEqual(data, b"0123456789")) d2.addCallback(lambda ign: rf.close()) return d2 d.addCallback(_remove_and_read_small) # removing a link to a created file should prevent it from being created d.addCallback(lambda ign: self.handler.openFile(b"tempfile", sftp.FXF_READ | sftp.FXF_WRITE | sftp.FXF_CREAT, {})) def _write_remove(rwf): d2 = rwf.writeChunk(0, b"0123456789") d2.addCallback(lambda ign: self.handler.removeFile(b"tempfile")) d2.addCallback(lambda ign: rwf.readChunk(0, 10)) d2.addCallback(lambda data: self.failUnlessReallyEqual(data, b"0123456789")) d2.addCallback(lambda ign: rwf.close()) return d2 d.addCallback(_write_remove) d.addCallback(lambda ign: self.shouldFail(NoSuchChildError, "removeFile tempfile", "tempfile", self.root.get, u"tempfile")) # ... even if the link is renamed while open d.addCallback(lambda ign: self.handler.openFile(b"tempfile2", sftp.FXF_READ | sftp.FXF_WRITE | sftp.FXF_CREAT, {})) def _write_rename_remove(rwf): d2 = rwf.writeChunk(0, b"0123456789") d2.addCallback(lambda ign: self.handler.renameFile(b"tempfile2", b"tempfile3")) d2.addCallback(lambda ign: self.handler.removeFile(b"tempfile3")) d2.addCallback(lambda ign: rwf.readChunk(0, 10)) d2.addCallback(lambda data: self.failUnlessReallyEqual(data, b"0123456789")) d2.addCallback(lambda ign: rwf.close()) return d2 d.addCallback(_write_rename_remove) d.addCallback(lambda ign: self.shouldFail(NoSuchChildError, "removeFile tempfile2", "tempfile2", self.root.get, u"tempfile2")) d.addCallback(lambda ign: self.shouldFail(NoSuchChildError, "removeFile tempfile3", "tempfile3", self.root.get, u"tempfile3")) d.addCallback(lambda ign: self.failUnlessEqual(sftpd.all_heisenfiles, {})) d.addCallback(lambda ign: self.failUnlessEqual(self.handler._heisenfiles, {})) return d def test_removeDirectory(self): d = self._set_up("removeDirectory") d.addCallback(lambda ign: self._set_up_tree()) d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_NO_SUCH_FILE, "removeDirectory nodir", self.handler.removeDirectory, b"nodir")) d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_NO_SUCH_FILE, "removeDirectory nodir/nodir", self.handler.removeDirectory, b"nodir/nodir")) d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_NO_SUCH_FILE, "removeDirectory ''", self.handler.removeDirectory, b"")) # removing a file should fail d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_PERMISSION_DENIED, "removeDirectory gross", self.handler.removeDirectory, u"gro\u00DF".encode('utf-8'))) # removing a directory should succeed d.addCallback(lambda ign: self.root.get(u"tiny_lit_dir")) d.addCallback(lambda ign: self.handler.removeDirectory(b"tiny_lit_dir")) d.addCallback(lambda ign: self.shouldFail(NoSuchChildError, "removeDirectory tiny_lit_dir", "tiny_lit_dir", self.root.get, u"tiny_lit_dir")) # removing an unknown should succeed d.addCallback(lambda ign: self.root.get(u"unknown")) d.addCallback(lambda ign: self.handler.removeDirectory(b"unknown")) d.addCallback(lambda err: self.shouldFail(NoSuchChildError, "removeDirectory unknown", "unknown", self.root.get, u"unknown")) d.addCallback(lambda ign: self.failUnlessEqual(sftpd.all_heisenfiles, {})) d.addCallback(lambda ign: self.failUnlessEqual(self.handler._heisenfiles, {})) return d def test_renameFile(self): d = self._set_up("renameFile") d.addCallback(lambda ign: self._set_up_tree()) # renaming a non-existent file should fail d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_NO_SUCH_FILE, "renameFile nofile newfile", self.handler.renameFile, b"nofile", b"newfile")) d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_NO_SUCH_FILE, "renameFile '' newfile", self.handler.renameFile, b"", b"newfile")) # renaming a file to a non-existent path should fail d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_NO_SUCH_FILE, "renameFile small nodir/small", self.handler.renameFile, b"small", b"nodir/small")) # renaming a file to an invalid UTF-8 name should fail d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_NO_SUCH_FILE, "renameFile small invalid", self.handler.renameFile, b"small", b"\xFF")) # renaming a file to or from an URI should fail d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_NO_SUCH_FILE, "renameFile small from uri", self.handler.renameFile, b"uri/"+self.small_uri, b"new")) d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_NO_SUCH_FILE, "renameFile small to uri", self.handler.renameFile, b"small", b"uri/fake_uri")) # renaming a file onto an existing file, directory or unknown should fail # The SFTP spec isn't clear about what error should be returned, but sshfs depends on # it being FX_PERMISSION_DENIED. d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_PERMISSION_DENIED, "renameFile small small2", self.handler.renameFile, b"small", b"small2")) d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_PERMISSION_DENIED, "renameFile small tiny_lit_dir", self.handler.renameFile, b"small", b"tiny_lit_dir")) d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_PERMISSION_DENIED, "renameFile small unknown", self.handler.renameFile, b"small", b"unknown")) # renaming a file onto a heisenfile should fail, even if the open hasn't completed def _rename_onto_heisenfile_race(wf): slow_open = defer.Deferred() reactor.callLater(1, slow_open.callback, None) d2 = self.handler.openFile(b"heisenfile", sftp.FXF_WRITE | sftp.FXF_CREAT, {}, delay=slow_open) # deliberate race between openFile and renameFile d3 = self.shouldFailWithSFTPError(sftp.FX_PERMISSION_DENIED, "renameFile small heisenfile", self.handler.renameFile, b"small", b"heisenfile") d2.addCallback(lambda wf: wf.close()) return deferredutil.gatherResults([d2, d3]) d.addCallback(_rename_onto_heisenfile_race) # renaming a file to a correct path should succeed d.addCallback(lambda ign: self.handler.renameFile(b"small", b"new_small")) d.addCallback(lambda ign: self.root.get(u"new_small")) d.addCallback(lambda node: self.failUnlessReallyEqual(node.get_uri(), self.small_uri)) # renaming a file into a subdirectory should succeed (also tests Unicode names) d.addCallback(lambda ign: self.handler.renameFile(u"gro\u00DF".encode('utf-8'), u"loop/neue_gro\u00DF".encode('utf-8'))) d.addCallback(lambda ign: self.root.get(u"neue_gro\u00DF")) d.addCallback(lambda node: self.failUnlessReallyEqual(node.get_uri(), self.gross_uri)) # renaming a directory to a correct path should succeed d.addCallback(lambda ign: self.handler.renameFile(b"tiny_lit_dir", b"new_tiny_lit_dir")) d.addCallback(lambda ign: self.root.get(u"new_tiny_lit_dir")) d.addCallback(lambda node: self.failUnlessReallyEqual(node.get_uri(), self.tiny_lit_dir_uri)) # renaming an unknown to a correct path should succeed d.addCallback(lambda ign: self.handler.renameFile(b"unknown", b"new_unknown")) d.addCallback(lambda ign: self.root.get(u"new_unknown")) d.addCallback(lambda node: self.failUnlessReallyEqual(node.get_uri(), self.unknown_uri)) d.addCallback(lambda ign: self.failUnlessEqual(sftpd.all_heisenfiles, {})) d.addCallback(lambda ign: self.failUnlessEqual(self.handler._heisenfiles, {})) return d def test_renameFile_posix(self): def _renameFile(fromPathstring, toPathstring): extData = (struct.pack('>L', len(fromPathstring)) + fromPathstring + struct.pack('>L', len(toPathstring)) + toPathstring) d2 = self.handler.extendedRequest(b'posix-rename@openssh.com', extData) def _check(res): res.trap(sftp.SFTPError) if res.value.code == sftp.FX_OK: return None return res d2.addCallbacks(lambda res: self.fail("posix-rename request was supposed to " "raise an SFTPError, not get '%r'" % (res,)), _check) return d2 d = self._set_up("renameFile_posix") d.addCallback(lambda ign: self._set_up_tree()) d.addCallback(lambda ign: self.root.set_node(u"loop2", self.root)) d.addCallback(lambda ign: self.root.set_node(u"unknown2", self.unknown)) # POSIX-renaming a non-existent file should fail d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_NO_SUCH_FILE, "renameFile_posix nofile newfile", _renameFile, b"nofile", b"newfile")) d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_NO_SUCH_FILE, "renameFile_posix '' newfile", _renameFile, b"", b"newfile")) # POSIX-renaming a file to a non-existent path should fail d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_NO_SUCH_FILE, "renameFile_posix small nodir/small", _renameFile, b"small", b"nodir/small")) # POSIX-renaming a file to an invalid UTF-8 name should fail d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_NO_SUCH_FILE, "renameFile_posix small invalid", _renameFile, b"small", b"\xFF")) # POSIX-renaming a file to or from an URI should fail d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_NO_SUCH_FILE, "renameFile_posix small from uri", _renameFile, b"uri/"+self.small_uri, b"new")) d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_NO_SUCH_FILE, "renameFile_posix small to uri", _renameFile, b"small", b"uri/fake_uri")) # POSIX-renaming a file onto an existing file, directory or unknown should succeed d.addCallback(lambda ign: _renameFile(b"small", b"small2")) d.addCallback(lambda ign: self.root.get(u"small2")) d.addCallback(lambda node: self.failUnlessReallyEqual(node.get_uri(), self.small_uri)) d.addCallback(lambda ign: _renameFile(b"small2", b"loop2")) d.addCallback(lambda ign: self.root.get(u"loop2")) d.addCallback(lambda node: self.failUnlessReallyEqual(node.get_uri(), self.small_uri)) d.addCallback(lambda ign: _renameFile(b"loop2", b"unknown2")) d.addCallback(lambda ign: self.root.get(u"unknown2")) d.addCallback(lambda node: self.failUnlessReallyEqual(node.get_uri(), self.small_uri)) # POSIX-renaming a file to a correct new path should succeed d.addCallback(lambda ign: _renameFile(b"unknown2", b"new_small")) d.addCallback(lambda ign: self.root.get(u"new_small")) d.addCallback(lambda node: self.failUnlessReallyEqual(node.get_uri(), self.small_uri)) # POSIX-renaming a file into a subdirectory should succeed (also tests Unicode names) d.addCallback(lambda ign: _renameFile(u"gro\u00DF".encode('utf-8'), u"loop/neue_gro\u00DF".encode('utf-8'))) d.addCallback(lambda ign: self.root.get(u"neue_gro\u00DF")) d.addCallback(lambda node: self.failUnlessReallyEqual(node.get_uri(), self.gross_uri)) # POSIX-renaming a directory to a correct path should succeed d.addCallback(lambda ign: _renameFile(b"tiny_lit_dir", b"new_tiny_lit_dir")) d.addCallback(lambda ign: self.root.get(u"new_tiny_lit_dir")) d.addCallback(lambda node: self.failUnlessReallyEqual(node.get_uri(), self.tiny_lit_dir_uri)) # POSIX-renaming an unknown to a correct path should succeed d.addCallback(lambda ign: _renameFile(b"unknown", b"new_unknown")) d.addCallback(lambda ign: self.root.get(u"new_unknown")) d.addCallback(lambda node: self.failUnlessReallyEqual(node.get_uri(), self.unknown_uri)) d.addCallback(lambda ign: self.failUnlessEqual(sftpd.all_heisenfiles, {})) d.addCallback(lambda ign: self.failUnlessEqual(self.handler._heisenfiles, {})) return d def test_makeDirectory(self): d = self._set_up("makeDirectory") d.addCallback(lambda ign: self._set_up_tree()) # making a directory at a correct path should succeed d.addCallback(lambda ign: self.handler.makeDirectory(b"newdir", {'ext_foo': 'bar', 'ctime': 42})) d.addCallback(lambda ign: self.root.get_child_and_metadata(u"newdir")) def _got(child_and_metadata): (child, metadata) = child_and_metadata self.failUnless(IDirectoryNode.providedBy(child)) self.failUnless(child.is_mutable()) # FIXME #self.failUnless('ctime' in metadata, metadata) #self.failUnlessReallyEqual(metadata['ctime'], 42) #self.failUnless('ext_foo' in metadata, metadata) #self.failUnlessReallyEqual(metadata['ext_foo'], 'bar') # TODO: child should be empty d.addCallback(_got) # making intermediate directories should also succeed d.addCallback(lambda ign: self.handler.makeDirectory(b"newparent/newchild", {})) d.addCallback(lambda ign: self.root.get(u"newparent")) def _got_newparent(newparent): self.failUnless(IDirectoryNode.providedBy(newparent)) self.failUnless(newparent.is_mutable()) return newparent.get(u"newchild") d.addCallback(_got_newparent) def _got_newchild(newchild): self.failUnless(IDirectoryNode.providedBy(newchild)) self.failUnless(newchild.is_mutable()) d.addCallback(_got_newchild) d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_NO_SUCH_FILE, "makeDirectory invalid UTF-8", self.handler.makeDirectory, b"\xFF", {})) # should fail because there is an existing file "small" d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_FAILURE, "makeDirectory small", self.handler.makeDirectory, b"small", {})) # directories cannot be created read-only via SFTP d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_PERMISSION_DENIED, "makeDirectory newdir2 permissions:0444 denied", self.handler.makeDirectory, b"newdir2", {'permissions': 0o444})) d.addCallback(lambda ign: self.failUnlessEqual(sftpd.all_heisenfiles, {})) d.addCallback(lambda ign: self.failUnlessEqual(self.handler._heisenfiles, {})) return d def test_execCommand_and_openShell(self): class MockProtocol(object): def __init__(self): self.output = "" self.error = "" self.reason = None def write(self, data): return self.outReceived(data) def outReceived(self, data): self.output += data return defer.succeed(None) def errReceived(self, data): self.error += data return defer.succeed(None) def processEnded(self, reason): self.reason = reason return defer.succeed(None) def _lines_end_in_crlf(s): return s.replace('\r\n', '').find('\n') == -1 and s.endswith('\r\n') d = self._set_up("execCommand_and_openShell") d.addCallback(lambda ign: conch_interfaces.ISession(self.handler)) def _exec_df(session): protocol = MockProtocol() d2 = session.execCommand(protocol, "df -P -k /") d2.addCallback(lambda ign: self.failUnlessIn("1024-blocks", protocol.output)) d2.addCallback(lambda ign: self.failUnless(_lines_end_in_crlf(protocol.output), protocol.output)) d2.addCallback(lambda ign: self.failUnlessEqual(protocol.error, "")) d2.addCallback(lambda ign: self.failUnless(isinstance(protocol.reason.value, ProcessDone))) d2.addCallback(lambda ign: session.eofReceived()) d2.addCallback(lambda ign: session.closed()) return d2 d.addCallback(_exec_df) def _check_unsupported(protocol): d2 = defer.succeed(None) d2.addCallback(lambda ign: self.failUnlessEqual(protocol.output, "")) d2.addCallback(lambda ign: self.failUnlessIn("only the SFTP protocol", protocol.error)) d2.addCallback(lambda ign: self.failUnless(_lines_end_in_crlf(protocol.error), protocol.error)) d2.addCallback(lambda ign: self.failUnless(isinstance(protocol.reason.value, ProcessTerminated))) d2.addCallback(lambda ign: self.failUnlessEqual(protocol.reason.value.exitCode, 1)) return d2 d.addCallback(lambda ign: conch_interfaces.ISession(self.handler)) def _exec_error(session): protocol = MockProtocol() d2 = session.execCommand(protocol, "error") d2.addCallback(lambda ign: session.windowChanged(None)) d2.addCallback(lambda ign: _check_unsupported(protocol)) d2.addCallback(lambda ign: session.closed()) return d2 d.addCallback(_exec_error) d.addCallback(lambda ign: conch_interfaces.ISession(self.handler)) def _openShell(session): protocol = MockProtocol() d2 = session.openShell(protocol) d2.addCallback(lambda ign: _check_unsupported(protocol)) d2.addCallback(lambda ign: session.closed()) return d2 d.addCallback(_openShell) return d def test_extendedRequest(self): d = self._set_up("extendedRequest") d.addCallback(lambda ign: self.handler.extendedRequest(b"statvfs@openssh.com", b"/")) def _check(res): self.failUnless(isinstance(res, bytes)) self.failUnlessEqual(len(res), 8*11) d.addCallback(_check) d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_OP_UNSUPPORTED, "extendedRequest foo bar", self.handler.extendedRequest, b"foo", b"bar")) d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_BAD_MESSAGE, "extendedRequest posix-rename@openssh.com invalid 1", self.handler.extendedRequest, b'posix-rename@openssh.com', b'')) d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_BAD_MESSAGE, "extendedRequest posix-rename@openssh.com invalid 2", self.handler.extendedRequest, b'posix-rename@openssh.com', b'\x00\x00\x00\x01')) d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_BAD_MESSAGE, "extendedRequest posix-rename@openssh.com invalid 3", self.handler.extendedRequest, b'posix-rename@openssh.com', b'\x00\x00\x00\x01_\x00\x00\x00\x01')) return d tahoe_lafs-1.20.0/src/allmydata/test/test_spans.py0000644000000000000000000005203713615410400017101 0ustar00""" Tests for allmydata.util.spans. """ import binascii import hashlib from twisted.trial import unittest from allmydata.util.spans import Spans, overlap, DataSpans def sha256(data): """ :param bytes data: data to hash :returns: a hex-encoded SHA256 hash of the data """ return binascii.hexlify(hashlib.sha256(data).digest()) class SimpleSpans(object): # this is a simple+inefficient form of util.spans.Spans . We compare the # behavior of this reference model against the real (efficient) form. def __init__(self, _span_or_start=None, length=None): self._have = set() if length is not None: for i in range(_span_or_start, _span_or_start+length): self._have.add(i) elif _span_or_start: for (start,length) in _span_or_start: self.add(start, length) def add(self, start, length): for i in range(start, start+length): self._have.add(i) return self def remove(self, start, length): for i in range(start, start+length): self._have.discard(i) return self def each(self): return sorted(self._have) def __iter__(self): items = sorted(self._have) prevstart = None prevend = None for i in items: if prevstart is None: prevstart = prevend = i continue if i == prevend+1: prevend = i continue yield (prevstart, prevend-prevstart+1) prevstart = prevend = i if prevstart is not None: yield (prevstart, prevend-prevstart+1) def __bool__(self): # this gets us bool() return bool(self.len()) def len(self): return len(self._have) def __add__(self, other): s = self.__class__(self) for (start, length) in other: s.add(start, length) return s def __sub__(self, other): s = self.__class__(self) for (start, length) in other: s.remove(start, length) return s def __iadd__(self, other): for (start, length) in other: self.add(start, length) return self def __isub__(self, other): for (start, length) in other: self.remove(start, length) return self def __and__(self, other): s = self.__class__() for i in other.each(): if i in self._have: s.add(i, 1) return s def __contains__(self, start_and_length): (start, length) = start_and_length for i in range(start, start+length): if i not in self._have: return False return True class ByteSpans(unittest.TestCase): def test_basic(self): s = Spans() self.failUnlessEqual(list(s), []) self.failIf(s) self.failIf((0,1) in s) self.failUnlessEqual(s.len(), 0) s1 = Spans(3, 4) # 3,4,5,6 self._check1(s1) s2 = Spans(s1) self._check1(s2) s2.add(10,2) # 10,11 self._check1(s1) self.failUnless((10,1) in s2) self.failIf((10,1) in s1) self.failUnlessEqual(list(s2.each()), [3,4,5,6,10,11]) self.failUnlessEqual(s2.len(), 6) s2.add(15,2).add(20,2) self.failUnlessEqual(list(s2.each()), [3,4,5,6,10,11,15,16,20,21]) self.failUnlessEqual(s2.len(), 10) s2.remove(4,3).remove(15,1) self.failUnlessEqual(list(s2.each()), [3,10,11,16,20,21]) self.failUnlessEqual(s2.len(), 6) s1 = SimpleSpans(3, 4) # 3 4 5 6 s2 = SimpleSpans(5, 4) # 5 6 7 8 i = s1 & s2 self.failUnlessEqual(list(i.each()), [5, 6]) def _check1(self, s): self.failUnlessEqual(list(s), [(3,4)]) self.failUnless(s) self.failUnlessEqual(s.len(), 4) self.failIf((0,1) in s) self.failUnless((3,4) in s) self.failUnless((3,1) in s) self.failUnless((5,2) in s) self.failUnless((6,1) in s) self.failIf((6,2) in s) self.failIf((7,1) in s) self.failUnlessEqual(list(s.each()), [3,4,5,6]) def test_large(self): s = Spans(4, 2**65) # don't do this with a SimpleSpans self.failUnlessEqual(list(s), [(4, 2**65)]) self.failUnless(s) self.failUnlessEqual(s.len(), 2**65) self.failIf((0,1) in s) self.failUnless((4,2) in s) self.failUnless((2**65,2) in s) def test_math(self): s1 = Spans(0, 10) # 0,1,2,3,4,5,6,7,8,9 s2 = Spans(5, 3) # 5,6,7 s3 = Spans(8, 4) # 8,9,10,11 s = s1 - s2 self.failUnlessEqual(list(s.each()), [0,1,2,3,4,8,9]) s = s1 - s3 self.failUnlessEqual(list(s.each()), [0,1,2,3,4,5,6,7]) s = s2 - s3 self.failUnlessEqual(list(s.each()), [5,6,7]) s = s1 & s2 self.failUnlessEqual(list(s.each()), [5,6,7]) s = s2 & s1 self.failUnlessEqual(list(s.each()), [5,6,7]) s = s1 & s3 self.failUnlessEqual(list(s.each()), [8,9]) s = s3 & s1 self.failUnlessEqual(list(s.each()), [8,9]) s = s2 & s3 self.failUnlessEqual(list(s.each()), []) s = s3 & s2 self.failUnlessEqual(list(s.each()), []) s = Spans() & s3 self.failUnlessEqual(list(s.each()), []) s = s3 & Spans() self.failUnlessEqual(list(s.each()), []) s = s1 + s2 self.failUnlessEqual(list(s.each()), [0,1,2,3,4,5,6,7,8,9]) s = s1 + s3 self.failUnlessEqual(list(s.each()), [0,1,2,3,4,5,6,7,8,9,10,11]) s = s2 + s3 self.failUnlessEqual(list(s.each()), [5,6,7,8,9,10,11]) s = Spans(s1) s -= s2 self.failUnlessEqual(list(s.each()), [0,1,2,3,4,8,9]) s = Spans(s1) s -= s3 self.failUnlessEqual(list(s.each()), [0,1,2,3,4,5,6,7]) s = Spans(s2) s -= s3 self.failUnlessEqual(list(s.each()), [5,6,7]) s = Spans(s1) s += s2 self.failUnlessEqual(list(s.each()), [0,1,2,3,4,5,6,7,8,9]) s = Spans(s1) s += s3 self.failUnlessEqual(list(s.each()), [0,1,2,3,4,5,6,7,8,9,10,11]) s = Spans(s2) s += s3 self.failUnlessEqual(list(s.each()), [5,6,7,8,9,10,11]) def test_random(self): # attempt to increase coverage of corner cases by comparing behavior # of a simple-but-slow model implementation against the # complex-but-fast actual implementation, in a large number of random # operations S1 = SimpleSpans S2 = Spans s1 = S1(); s2 = S2() seed = b"" def _create(subseed): ns1 = S1(); ns2 = S2() for i in range(10): what = sha256(subseed+bytes(i)) start = int(what[2:4], 16) length = max(1,int(what[5:6], 16)) ns1.add(start, length); ns2.add(start, length) return ns1, ns2 #print() for i in range(1000): what = sha256(seed+bytes(i)) op = what[0:1] subop = what[1:2] start = int(what[2:4], 16) length = max(1,int(what[5:6], 16)) #print(what) if op in b"0": if subop in b"01234": s1 = S1(); s2 = S2() elif subop in b"5678": s1 = S1(start, length); s2 = S2(start, length) else: s1 = S1(s1); s2 = S2(s2) #print("s2 = %s" % s2.dump()) elif op in b"123": #print("s2.add(%d,%d)" % (start, length)) s1.add(start, length); s2.add(start, length) elif op in b"456": #print("s2.remove(%d,%d)" % (start, length)) s1.remove(start, length); s2.remove(start, length) elif op in b"78": ns1, ns2 = _create(what[7:11]) #print("s2 + %s" % ns2.dump()) s1 = s1 + ns1; s2 = s2 + ns2 elif op in b"9a": ns1, ns2 = _create(what[7:11]) #print("%s - %s" % (s2.dump(), ns2.dump())) s1 = s1 - ns1; s2 = s2 - ns2 elif op in b"bc": ns1, ns2 = _create(what[7:11]) #print("s2 += %s" % ns2.dump()) s1 += ns1; s2 += ns2 elif op in b"de": ns1, ns2 = _create(what[7:11]) #print("%s -= %s" % (s2.dump(), ns2.dump())) s1 -= ns1; s2 -= ns2 else: ns1, ns2 = _create(what[7:11]) #print("%s &= %s" % (s2.dump(), ns2.dump())) s1 = s1 & ns1; s2 = s2 & ns2 #print("s2 now %s" % s2.dump()) self.failUnlessEqual(list(s1.each()), list(s2.each())) self.failUnlessEqual(s1.len(), s2.len()) self.failUnlessEqual(bool(s1), bool(s2)) self.failUnlessEqual(list(s1), list(s2)) for j in range(10): what = sha256(what[12:14]+bytes(j)) start = int(what[2:4], 16) length = max(1, int(what[5:6], 16)) span = (start, length) self.failUnlessEqual(bool(span in s1), bool(span in s2)) # s() # s(start,length) # s(s0) # s.add(start,length) : returns s # s.remove(start,length) # s.each() -> list of byte offsets, mostly for testing # list(s) -> list of (start,length) tuples, one per span # (start,length) in s -> True if (start..start+length-1) are all members # NOT equivalent to x in list(s) # s.len() -> number of bytes, for testing, bool(), and accounting/limiting # bool(s) (__nonzeron__) # s = s1+s2, s1-s2, +=s1, -=s1 def test_overlap(self): for a in range(20): for b in range(10): for c in range(20): for d in range(10): self._test_overlap(a,b,c,d) def _test_overlap(self, a, b, c, d): s1 = set(range(a,a+b)) s2 = set(range(c,c+d)) #print("---") #self._show_overlap(s1, "1") #self._show_overlap(s2, "2") o = overlap(a,b,c,d) expected = s1.intersection(s2) if not expected: self.failUnlessEqual(o, None) else: start,length = o so = set(range(start,start+length)) #self._show(so, "o") self.failUnlessEqual(so, expected) def _show_overlap(self, s, c): import sys out = sys.stdout if s: for i in range(max(s)): if i in s: out.write(c) else: out.write(" ") out.write("\n") def extend(s, start, length, fill): if len(s) >= start+length: return s assert len(fill) == 1 return s + fill*(start+length-len(s)) def replace(s, start, data): assert len(s) >= start+len(data) return s[:start] + data + s[start+len(data):] class SimpleDataSpans(object): def __init__(self, other=None): self.missing = "" # "1" where missing, "0" where found self.data = b"" if other: for (start, data) in other.get_chunks(): self.add(start, data) def __bool__(self): # this gets us bool() return bool(self.len()) def len(self): return len(self.missing.replace("1", "")) def _dump(self): return [i for (i,c) in enumerate(self.missing) if c == "0"] def _have(self, start, length): m = self.missing[start:start+length] if not m or len(m) 1", f, [1.1]); self.should_assert("Should assert if p_i < 0", f, [-.1]); def test_repair_count_pmf(self): survival_pmf = statistics.binomial_distribution_pmf(5, .9) repair_pmf = statistics.repair_count_pmf(survival_pmf, 3) # repair_pmf[0] == sum(survival_pmf[0,1,2,5]) # repair_pmf[1] == survival_pmf[4] # repair_pmf[2] = survival_pmf[3] self.failUnlessListAlmostEqual(repair_pmf, [0.00001 + 0.00045 + 0.0081 + 0.59049, .32805, .0729, 0, 0, 0]) def test_repair_cost(self): survival_pmf = statistics.binomial_distribution_pmf(5, .9) bwcost = statistics.bandwidth_cost_function cost = statistics.mean_repair_cost(bwcost, 1000, survival_pmf, 3, ul_dl_ratio=1.0) self.failUnlessAlmostEqual(cost, 558.90) cost = statistics.mean_repair_cost(bwcost, 1000, survival_pmf, 3, ul_dl_ratio=8.0) self.failUnlessAlmostEqual(cost, 1664.55) # I haven't manually checked the math beyond here -warner cost = statistics.eternal_repair_cost(bwcost, 1000, survival_pmf, 3, discount_rate=0, ul_dl_ratio=1.0) self.failUnlessAlmostEqual(cost, 65292.056074766246) cost = statistics.eternal_repair_cost(bwcost, 1000, survival_pmf, 3, discount_rate=0.05, ul_dl_ratio=1.0) self.failUnlessAlmostEqual(cost, 9133.6097158191551) def test_convolve(self): f = statistics.convolve v1 = [ 1, 2, 3 ] v2 = [ 4, 5, 6 ] v3 = [ 7, 8 ] v1v2result = [ 4, 13, 28, 27, 18 ] # Convolution is commutative r1 = f(v1, v2) r2 = f(v2, v1) self.failUnlessListEqual(r1, r2, "Convolution should be commutative") self.failUnlessListEqual(r1, v1v2result, "Didn't match known result") # Convolution is associative r1 = f(f(v1, v2), v3) r2 = f(v1, f(v2, v3)) self.failUnlessListEqual(r1, r2, "Convolution should be associative") # Convolution is distributive r1 = f(v3, [ a + b for a, b in zip(v1, v2) ]) tmp1 = f(v3, v1) tmp2 = f(v3, v2) r2 = [ a + b for a, b in zip(tmp1, tmp2) ] self.failUnlessListEqual(r1, r2, "Convolution should be distributive") # Convolution is scalar multiplication associative tmp1 = f(v1, v2) r1 = [ a * 4 for a in tmp1 ] tmp2 = [ a * 4 for a in v1 ] r2 = f(tmp2, v2) self.failUnlessListEqual(r1, r2, "Convolution should be scalar multiplication associative") def test_find_k(self): f = statistics.find_k g = statistics.pr_file_loss plist = [.9] * 10 + [.8] * 10 # N=20 t = .0001 k = f(plist, t) self.failUnlessEqual(k, 10) self.failUnless(g(plist, k) < t) def test_pr_file_loss(self): f = statistics.pr_file_loss plist = [.5] * 10 self.failUnlessEqual(f(plist, 3), .0546875) def test_pr_backup_file_loss(self): f = statistics.pr_backup_file_loss plist = [.5] * 10 self.failUnlessEqual(f(plist, .5, 3), .02734375) tahoe_lafs-1.20.0/src/allmydata/test/test_stats.py0000644000000000000000000000247613615410400017115 0ustar00""" Ported to Python 3. """ from twisted.trial import unittest from twisted.application import service from allmydata.stats import CPUUsageMonitor from allmydata.util import pollmixin import allmydata.test.common_util as testutil class FasterMonitor(CPUUsageMonitor): POLL_INTERVAL = 0.01 class CPUUsage(unittest.TestCase, pollmixin.PollMixin, testutil.StallMixin): def setUp(self): self.s = service.MultiService() self.s.startService() def tearDown(self): return self.s.stopService() def test_monitor(self): m = FasterMonitor() s = m.get_stats() # before it has been started self.failIf("cpu_monitor.1min_avg" in s) m.setServiceParent(self.s) def _poller(): return bool(len(m.samples) == m.HISTORY_LENGTH+1) d = self.poll(_poller) # pause a couple more intervals, to make sure that the history-trimming # code is exercised d.addCallback(self.stall, FasterMonitor.POLL_INTERVAL * 2) def _check(res): s = m.get_stats() self.failUnless("cpu_monitor.1min_avg" in s) self.failUnless("cpu_monitor.5min_avg" in s) self.failUnless("cpu_monitor.15min_avg" in s) self.failUnless("cpu_monitor.total" in s) d.addCallback(_check) return d tahoe_lafs-1.20.0/src/allmydata/test/test_storage.py0000644000000000000000000045333113615410400017423 0ustar00""" Tests for allmydata.storage. Ported to Python 3. """ from __future__ import annotations from future.utils import bchr from six import ensure_str from io import ( BytesIO, ) import time import os.path import platform import stat import struct import shutil from functools import partial from uuid import uuid4 from testtools.matchers import ( Equals, NotEquals, Contains, HasLength, IsInstance, ) from twisted.trial import unittest from twisted.internet import defer from twisted.internet.task import Clock from hypothesis import given, strategies, example import itertools from allmydata import interfaces from allmydata.util import fileutil, hashutil, base32 from allmydata.storage.server import ( StorageServer, DEFAULT_RENEWAL_TIME, FoolscapStorageServer, ) from allmydata.storage.shares import get_share_file from allmydata.storage.mutable import MutableShareFile from allmydata.storage.mutable_schema import ( ALL_SCHEMAS as ALL_MUTABLE_SCHEMAS, ) from allmydata.storage.immutable import ( BucketWriter, BucketReader, ShareFile, FoolscapBucketWriter, FoolscapBucketReader, ) from allmydata.storage.immutable_schema import ( ALL_SCHEMAS as ALL_IMMUTABLE_SCHEMAS, ) from allmydata.storage.common import storage_index_to_dir, \ UnknownMutableContainerVersionError, UnknownImmutableContainerVersionError, \ si_b2a, si_a2b from allmydata.storage.lease import LeaseInfo from allmydata.immutable.layout import WriteBucketProxy, WriteBucketProxy_v2, \ ReadBucketProxy, _WriteBuffer from allmydata.mutable.layout import MDMFSlotWriteProxy, MDMFSlotReadProxy, \ LayoutInvalid, MDMFSIGNABLEHEADER, \ SIGNED_PREFIX, MDMFHEADER, \ MDMFOFFSETS, SDMFSlotWriteProxy, \ PRIVATE_KEY_SIZE, \ SIGNATURE_SIZE, \ VERIFICATION_KEY_SIZE, \ SHARE_HASH_CHAIN_SIZE from allmydata.interfaces import ( BadWriteEnablerError, DataTooLargeError, ConflictingWriteError, ) from allmydata.test.no_network import NoNetworkServer from allmydata.storage_client import ( _StorageServer, ) from .common import ( LoggingServiceParent, ShouldFailMixin, FakeDisk, SyncTestCase, AsyncTestCase, ) from .common_util import FakeCanary from .common_storage import ( upload_immutable, upload_mutable, ) from .strategies import ( offsets, lengths, ) class UtilTests(SyncTestCase): """Tests for allmydata.storage.common and .shares.""" def test_encoding(self): """b2a/a2b are the same as base32.""" s = b"\xFF HELLO \xF3" result = si_b2a(s) self.assertThat(base32.b2a(s), Equals(result)) self.assertThat(si_a2b(result), Equals(s)) def test_storage_index_to_dir(self): """storage_index_to_dir creates a native string path.""" s = b"\xFF HELLO \xF3" path = storage_index_to_dir(s) parts = os.path.split(path) self.assertThat(parts[0], Equals(parts[1][:2])) self.assertThat(path, IsInstance(str)) def test_get_share_file_mutable(self): """A mutable share is identified by get_share_file().""" path = self.mktemp() msf = MutableShareFile(path) msf.create(b"12", b"abc") # arbitrary values loaded = get_share_file(path) self.assertThat(loaded, IsInstance(MutableShareFile)) self.assertThat(loaded.home, Equals(path)) def test_get_share_file_immutable(self): """An immutable share is identified by get_share_file().""" path = self.mktemp() _ = ShareFile(path, max_size=1000, create=True) loaded = get_share_file(path) self.assertThat(loaded, IsInstance(ShareFile)) self.assertThat(loaded.home, Equals(path)) class FakeStatsProvider(object): def count(self, name, delta=1): pass def register_producer(self, producer): pass class Bucket(SyncTestCase): def make_workdir(self, name): basedir = os.path.join("storage", "Bucket", name) incoming = os.path.join(basedir, "tmp", "bucket") final = os.path.join(basedir, "bucket") fileutil.make_dirs(basedir) fileutil.make_dirs(os.path.join(basedir, "tmp")) return incoming, final def bucket_writer_closed(self, bw, consumed): pass def add_latency(self, category, latency): pass def count(self, name, delta=1): pass def make_lease(self): owner_num = 0 renew_secret = os.urandom(32) cancel_secret = os.urandom(32) expiration_time = time.time() + 5000 return LeaseInfo(owner_num, renew_secret, cancel_secret, expiration_time, b"\x00" * 20) def test_create(self): incoming, final = self.make_workdir("test_create") bw = BucketWriter(self, incoming, final, 200, self.make_lease(), Clock()) bw.write(0, b"a"*25) bw.write(25, b"b"*25) bw.write(50, b"c"*25) bw.write(75, b"d"*7) bw.close() def test_readwrite(self): incoming, final = self.make_workdir("test_readwrite") bw = BucketWriter(self, incoming, final, 200, self.make_lease(), Clock()) bw.write(0, b"a"*25) bw.write(25, b"b"*25) bw.write(50, b"c"*7) # last block may be short bw.close() # now read from it br = BucketReader(self, bw.finalhome) self.assertThat(br.read(0, 25), Equals(b"a"*25)) self.assertThat(br.read(25, 25), Equals(b"b"*25)) self.assertThat(br.read(50, 7), Equals(b"c"*7)) def test_write_past_size_errors(self): """Writing beyond the size of the bucket throws an exception.""" for (i, (offset, length)) in enumerate([(0, 201), (10, 191), (202, 34)]): incoming, final = self.make_workdir( "test_write_past_size_errors-{}".format(i) ) bw = BucketWriter(self, incoming, final, 200, self.make_lease(), Clock()) with self.assertRaises(DataTooLargeError): bw.write(offset, b"a" * length) @given( maybe_overlapping_offset=strategies.integers(min_value=0, max_value=98), maybe_overlapping_length=strategies.integers(min_value=1, max_value=100), ) def test_overlapping_writes_ok_if_matching( self, maybe_overlapping_offset, maybe_overlapping_length ): """ Writes that overlap with previous writes are OK when the content is the same. """ length = 100 expected_data = b"".join(bchr(i) for i in range(100)) incoming, final = self.make_workdir("overlapping_writes_{}".format(uuid4())) bw = BucketWriter( self, incoming, final, length, self.make_lease(), Clock() ) # Three writes: 10-19, 30-39, 50-59. This allows for a bunch of holes. bw.write(10, expected_data[10:20]) bw.write(30, expected_data[30:40]) bw.write(50, expected_data[50:60]) # Then, an overlapping write but with matching data: bw.write( maybe_overlapping_offset, expected_data[ maybe_overlapping_offset:maybe_overlapping_offset + maybe_overlapping_length ] ) # Now fill in the holes: bw.write(0, expected_data[0:10]) bw.write(20, expected_data[20:30]) bw.write(40, expected_data[40:50]) bw.write(60, expected_data[60:]) bw.close() br = BucketReader(self, bw.finalhome) self.assertEqual(br.read(0, length), expected_data) @given( maybe_overlapping_offset=strategies.integers(min_value=0, max_value=98), maybe_overlapping_length=strategies.integers(min_value=1, max_value=100), ) def test_overlapping_writes_not_ok_if_different( self, maybe_overlapping_offset, maybe_overlapping_length ): """ Writes that overlap with previous writes fail with an exception if the contents don't match. """ length = 100 incoming, final = self.make_workdir("overlapping_writes_{}".format(uuid4())) bw = BucketWriter( self, incoming, final, length, self.make_lease(), Clock() ) # Three writes: 10-19, 30-39, 50-59. This allows for a bunch of holes. bw.write(10, b"1" * 10) bw.write(30, b"1" * 10) bw.write(50, b"1" * 10) # Then, write something that might overlap with some of them, but # conflicts. Then fill in holes left by first three writes. Conflict is # inevitable. with self.assertRaises(ConflictingWriteError): bw.write( maybe_overlapping_offset, b'X' * min(maybe_overlapping_length, length - maybe_overlapping_offset), ) bw.write(0, b"1" * 10) bw.write(20, b"1" * 10) bw.write(40, b"1" * 10) bw.write(60, b"1" * 40) @given( offsets=strategies.lists( strategies.integers(min_value=0, max_value=99), min_size=20, max_size=20 ), ) @example(offsets=[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10, 40, 70]) def test_writes_return_when_finished( self, offsets ): """ The ``BucketWriter.write()`` return true if and only if the maximum size has been reached via potentially overlapping writes. The remaining ranges can be checked via ``BucketWriter.required_ranges()``. """ incoming, final = self.make_workdir("overlapping_writes_{}".format(uuid4())) bw = BucketWriter( self, incoming, final, 100, self.make_lease(), Clock() ) local_written = [0] * 100 for offset in offsets: length = min(30, 100 - offset) data = b"1" * length for i in range(offset, offset+length): local_written[i] = 1 finished = bw.write(offset, data) self.assertEqual(finished, sum(local_written) == 100) required_ranges = bw.required_ranges() for i in range(0, 100): self.assertEqual(local_written[i] == 1, required_ranges.get(i) is None) def test_read_past_end_of_share_data(self): # test vector for immutable files (hard-coded contents of an immutable share # file): # The following immutable share file content is identical to that # generated with storage.immutable.ShareFile from Tahoe-LAFS v1.8.2 # with share data == 'a'. The total size of this content is 85 # bytes. containerdata = struct.pack('>LLL', 1, 1, 1) # A Tahoe-LAFS storage client would send as the share_data a # complicated string involving hash trees and a URI Extension Block # -- see allmydata/immutable/layout.py . This test, which is # simulating a client, just sends 'a'. share_data = b'a' ownernumber = struct.pack('>L', 0) renewsecret = b'THIS LETS ME RENEW YOUR FILE....' assert len(renewsecret) == 32 cancelsecret = b'THIS LETS ME KILL YOUR FILE HAHA' assert len(cancelsecret) == 32 expirationtime = struct.pack('>L', DEFAULT_RENEWAL_TIME) # 31 days in seconds lease_data = ownernumber + renewsecret + cancelsecret + expirationtime share_file_data = containerdata + share_data + lease_data incoming, final = self.make_workdir("test_read_past_end_of_share_data") fileutil.write(final, share_file_data) class MockStorageServer(object): def add_latency(self, category, latency): pass def count(self, name, delta=1): pass mockstorageserver = MockStorageServer() # Now read from it. br = BucketReader(mockstorageserver, final) self.assertThat(br.read(0, len(share_data)), Equals(share_data)) # Read past the end of share data to get the cancel secret. read_length = len(share_data) + len(ownernumber) + len(renewsecret) + len(cancelsecret) result_of_read = br.read(0, read_length) self.assertThat(result_of_read, Equals(share_data)) result_of_read = br.read(0, len(share_data)+1) self.assertThat(result_of_read, Equals(share_data)) def _assert_timeout_only_after_30_minutes(self, clock, bw): """ The ``BucketWriter`` times out and is closed after 30 minutes, but not sooner. """ self.assertFalse(bw.closed) # 29 minutes pass. Everything is fine. for i in range(29): clock.advance(60) self.assertFalse(bw.closed, "Bucket closed after only %d minutes" % (i + 1,)) # After the 30th minute, the bucket is closed due to lack of writes. clock.advance(60) self.assertTrue(bw.closed) def test_bucket_expires_if_no_writes_for_30_minutes(self): """ If a ``BucketWriter`` receives no writes for 30 minutes, it is removed. """ incoming, final = self.make_workdir("test_bucket_expires") clock = Clock() bw = BucketWriter(self, incoming, final, 200, self.make_lease(), clock) self._assert_timeout_only_after_30_minutes(clock, bw) def test_bucket_writes_delay_timeout(self): """ So long as the ``BucketWriter`` receives writes, the the removal timeout is put off. """ incoming, final = self.make_workdir("test_bucket_writes_delay_timeout") clock = Clock() bw = BucketWriter(self, incoming, final, 200, self.make_lease(), clock) # 29 minutes pass, getting close to the timeout... clock.advance(29 * 60) # .. but we receive a write! So that should delay the timeout again to # another 30 minutes. bw.write(0, b"hello") self._assert_timeout_only_after_30_minutes(clock, bw) def test_bucket_closing_cancels_timeout(self): """ Closing cancels the ``BucketWriter`` timeout. """ incoming, final = self.make_workdir("test_bucket_close_timeout") clock = Clock() bw = BucketWriter(self, incoming, final, 10, self.make_lease(), clock) self.assertTrue(clock.getDelayedCalls()) bw.close() self.assertFalse(clock.getDelayedCalls()) def test_bucket_aborting_cancels_timeout(self): """ Closing cancels the ``BucketWriter`` timeout. """ incoming, final = self.make_workdir("test_bucket_abort_timeout") clock = Clock() bw = BucketWriter(self, incoming, final, 10, self.make_lease(), clock) self.assertTrue(clock.getDelayedCalls()) bw.abort() self.assertFalse(clock.getDelayedCalls()) class RemoteBucket(object): def __init__(self, target): self.target = target self.read_count = 0 self.write_count = 0 def callRemote(self, methname, *args, **kwargs): def _call(): meth = getattr(self.target, "remote_" + methname) return meth(*args, **kwargs) if methname == "slot_readv": self.read_count += 1 if "writev" in methname: self.write_count += 1 return defer.maybeDeferred(_call) class BucketProxy(AsyncTestCase): def make_bucket(self, name, size): basedir = os.path.join("storage", "BucketProxy", name) incoming = os.path.join(basedir, "tmp", "bucket") final = os.path.join(basedir, "bucket") fileutil.make_dirs(basedir) fileutil.make_dirs(os.path.join(basedir, "tmp")) bw = BucketWriter(self, incoming, final, size, self.make_lease(), Clock()) rb = RemoteBucket(FoolscapBucketWriter(bw)) return bw, rb, final def make_lease(self): owner_num = 0 renew_secret = os.urandom(32) cancel_secret = os.urandom(32) expiration_time = time.time() + 5000 return LeaseInfo(owner_num, renew_secret, cancel_secret, expiration_time, b"\x00" * 20) def bucket_writer_closed(self, bw, consumed): pass def add_latency(self, category, latency): pass def count(self, name, delta=1): pass def test_create(self): bw, rb, sharefname = self.make_bucket("test_create", 500) bp = WriteBucketProxy(rb, None, data_size=300, block_size=10, num_segments=5, num_share_hashes=3, uri_extension_size=500) self.assertTrue(interfaces.IStorageBucketWriter.providedBy(bp), bp) def _do_test_readwrite(self, name, header_size, wbp_class, rbp_class): # Let's pretend each share has 100 bytes of data, and that there are # 4 segments (25 bytes each), and 8 shares total. So the two # per-segment merkle trees (crypttext_hash_tree, # block_hashes) will have 4 leaves and 7 nodes each. The per-share # merkle tree (share_hashes) has 8 leaves and 15 nodes, and we need 3 # nodes. Furthermore, let's assume the uri_extension is 500 bytes # long. That should make the whole share: # # 0x24 + 100 + 7*32 + 7*32 + 7*32 + 3*(2+32) + 4+500 = 1414 bytes long # 0x44 + 100 + 7*32 + 7*32 + 7*32 + 3*(2+32) + 4+500 = 1446 bytes long sharesize = header_size + 100 + 7*32 + 7*32 + 7*32 + 3*(2+32) + 4+500 crypttext_hashes = [hashutil.tagged_hash(b"crypt", b"bar%d" % i) for i in range(7)] block_hashes = [hashutil.tagged_hash(b"block", b"bar%d" % i) for i in range(7)] share_hashes = [(i, hashutil.tagged_hash(b"share", b"bar%d" % i)) for i in (1,9,13)] uri_extension = b"s" + b"E"*498 + b"e" bw, rb, sharefname = self.make_bucket(name, sharesize) bp = wbp_class(rb, None, data_size=95, block_size=25, num_segments=4, num_share_hashes=3, uri_extension_size=len(uri_extension)) d = bp.put_header() d.addCallback(lambda res: bp.put_block(0, b"a"*25)) d.addCallback(lambda res: bp.put_block(1, b"b"*25)) d.addCallback(lambda res: bp.put_block(2, b"c"*25)) d.addCallback(lambda res: bp.put_block(3, b"d"*20)) d.addCallback(lambda res: bp.put_crypttext_hashes(crypttext_hashes)) d.addCallback(lambda res: bp.put_block_hashes(block_hashes)) d.addCallback(lambda res: bp.put_share_hashes(share_hashes)) d.addCallback(lambda res: bp.put_uri_extension(uri_extension)) d.addCallback(lambda res: bp.close()) # now read everything back def _start_reading(res): br = BucketReader(self, sharefname) rb = RemoteBucket(FoolscapBucketReader(br)) server = NoNetworkServer(b"abc", None) rbp = rbp_class(rb, server, storage_index=b"") self.assertThat(repr(rbp), Contains("to peer")) self.assertTrue(interfaces.IStorageBucketReader.providedBy(rbp), rbp) d1 = rbp.get_block_data(0, 25, 25) d1.addCallback(lambda res: self.failUnlessEqual(res, b"a"*25)) d1.addCallback(lambda res: rbp.get_block_data(1, 25, 25)) d1.addCallback(lambda res: self.failUnlessEqual(res, b"b"*25)) d1.addCallback(lambda res: rbp.get_block_data(2, 25, 25)) d1.addCallback(lambda res: self.failUnlessEqual(res, b"c"*25)) d1.addCallback(lambda res: rbp.get_block_data(3, 25, 20)) d1.addCallback(lambda res: self.failUnlessEqual(res, b"d"*20)) d1.addCallback(lambda res: rbp.get_crypttext_hashes()) d1.addCallback(lambda res: self.failUnlessEqual(res, crypttext_hashes)) d1.addCallback(lambda res: rbp.get_block_hashes(set(range(4)))) d1.addCallback(lambda res: self.failUnlessEqual(res, block_hashes)) d1.addCallback(lambda res: rbp.get_share_hashes()) d1.addCallback(lambda res: self.failUnlessEqual(res, share_hashes)) d1.addCallback(lambda res: rbp.get_uri_extension()) d1.addCallback(lambda res: self.failUnlessEqual(res, uri_extension)) return d1 d.addCallback(_start_reading) return d def test_readwrite_v1(self): return self._do_test_readwrite("test_readwrite_v1", 0x24, WriteBucketProxy, ReadBucketProxy) def test_readwrite_v2(self): return self._do_test_readwrite("test_readwrite_v2", 0x44, WriteBucketProxy_v2, ReadBucketProxy) class Server(AsyncTestCase): def setUp(self): super(Server, self).setUp() self.sparent = LoggingServiceParent() self.sparent.startService() self._lease_secret = itertools.count() self.addCleanup(self.sparent.stopService) def workdir(self, name): basedir = os.path.join("storage", "Server", name) return basedir def create(self, name, reserved_space=0, klass=StorageServer, clock=None): if clock is None: clock = Clock() workdir = self.workdir(name) ss = klass(workdir, b"\x00" * 20, reserved_space=reserved_space, stats_provider=FakeStatsProvider(), clock=clock) ss.setServiceParent(self.sparent) return ss def test_create(self): self.create("test_create") def test_declares_fixed_1528(self): ss = self.create("test_declares_fixed_1528") ver = ss.get_version() sv1 = ver[b'http://allmydata.org/tahoe/protocols/storage/v1'] self.assertTrue(sv1.get(b'prevents-read-past-end-of-share-data'), sv1) def test_declares_maximum_share_sizes(self): ss = self.create("test_declares_maximum_share_sizes") ver = ss.get_version() sv1 = ver[b'http://allmydata.org/tahoe/protocols/storage/v1'] self.assertThat(sv1, Contains(b'maximum-immutable-share-size')) self.assertThat(sv1, Contains(b'maximum-mutable-share-size')) def test_declares_available_space(self): ss = self.create("test_declares_available_space") ver = ss.get_version() sv1 = ver[b'http://allmydata.org/tahoe/protocols/storage/v1'] self.assertThat(sv1, Contains(b'available-space')) def allocate(self, ss, storage_index, sharenums, size, renew_leases=True): """ Call directly into the storage server's allocate_buckets implementation, skipping the Foolscap layer. """ renew_secret = hashutil.my_renewal_secret_hash(b"%d" % next(self._lease_secret)) cancel_secret = hashutil.my_cancel_secret_hash(b"%d" % next(self._lease_secret)) if isinstance(ss, FoolscapStorageServer): ss = ss._server return ss.allocate_buckets( storage_index, renew_secret, cancel_secret, sharenums, size, renew_leases=renew_leases, ) def test_large_share(self): syslow = platform.system().lower() if 'cygwin' in syslow or 'windows' in syslow or 'darwin' in syslow: raise unittest.SkipTest("If your filesystem doesn't support efficient sparse files then it is very expensive (Mac OS X and Windows don't support efficient sparse files).") avail = fileutil.get_available_space('.', 512*2**20) if avail <= 4*2**30: raise unittest.SkipTest("This test will spuriously fail if you have less than 4 GiB free on your filesystem.") ss = self.create("test_large_share") already,writers = self.allocate(ss, b"allocate", [0], 2**32+2) self.assertThat(set(), Equals(already)) self.assertThat(set([0]), Equals(set(writers.keys()))) shnum, bucket = list(writers.items())[0] # This test is going to hammer your filesystem if it doesn't make a sparse file for this. :-( bucket.write(2**32, b"ab") bucket.close() readers = ss.get_buckets(b"allocate") reader = readers[shnum] self.assertThat(b"ab", Equals(reader.read(2**32, 2))) def test_dont_overfill_dirs(self): """ This test asserts that if you add a second share whose storage index share lots of leading bits with an extant share (but isn't the exact same storage index), this won't add an entry to the share directory. """ ss = self.create("test_dont_overfill_dirs") already, writers = self.allocate(ss, b"storageindex", [0], 10) for i, wb in writers.items(): wb.write(0, b"%10d" % i) wb.close() storedir = os.path.join(self.workdir("test_dont_overfill_dirs"), "shares") children_of_storedir = set(os.listdir(storedir)) # Now store another one under another storageindex that has leading # chars the same as the first storageindex. already, writers = self.allocate(ss, b"storageindey", [0], 10) for i, wb in writers.items(): wb.write(0, b"%10d" % i) wb.close() storedir = os.path.join(self.workdir("test_dont_overfill_dirs"), "shares") new_children_of_storedir = set(os.listdir(storedir)) self.assertThat(new_children_of_storedir, Equals(children_of_storedir)) def test_remove_incoming(self): ss = self.create("test_remove_incoming") already, writers = self.allocate(ss, b"vid", list(range(3)), 10) for i,wb in writers.items(): wb.write(0, b"%10d" % i) wb.close() incoming_share_dir = wb.incominghome incoming_bucket_dir = os.path.dirname(incoming_share_dir) incoming_prefix_dir = os.path.dirname(incoming_bucket_dir) incoming_dir = os.path.dirname(incoming_prefix_dir) self.assertFalse(os.path.exists(incoming_bucket_dir), incoming_bucket_dir) self.assertFalse(os.path.exists(incoming_prefix_dir), incoming_prefix_dir) self.assertTrue(os.path.exists(incoming_dir), incoming_dir) def test_abort(self): # remote_abort, when called on a writer, should make sure that # the allocated size of the bucket is not counted by the storage # server when accounting for space. ss = self.create("test_abort") already, writers = self.allocate(ss, b"allocate", [0, 1, 2], 150) self.assertThat(ss.allocated_size(), NotEquals(0)) # Now abort the writers. for writer in writers.values(): writer.abort() self.assertThat(ss.allocated_size(), Equals(0)) def test_immutable_length(self): """ ``get_immutable_share_length()`` returns the length of an immutable share, as does ``BucketWriter.get_length()``.. """ ss = self.create("test_immutable_length") _, writers = self.allocate(ss, b"allocate", [22], 75) bucket = writers[22] bucket.write(0, b"X" * 75) bucket.close() self.assertThat(ss.get_immutable_share_length(b"allocate", 22), Equals(75)) self.assertThat(ss.get_buckets(b"allocate")[22].get_length(), Equals(75)) def test_allocate(self): ss = self.create("test_allocate") self.assertThat(ss.get_buckets(b"allocate"), Equals({})) already,writers = self.allocate(ss, b"allocate", [0,1,2], 75) self.assertThat(already, Equals(set())) self.assertThat(set(writers.keys()), Equals(set([0,1,2]))) # while the buckets are open, they should not count as readable self.assertThat(ss.get_buckets(b"allocate"), Equals({})) # close the buckets for i,wb in writers.items(): wb.write(0, b"%25d" % i) wb.close() # aborting a bucket that was already closed is a no-op wb.abort() # now they should be readable b = ss.get_buckets(b"allocate") self.assertThat(set(b.keys()), Equals(set([0,1,2]))) self.assertThat(b[0].read(0, 25), Equals(b"%25d" % 0)) b_str = str(b[0]) self.assertThat(b_str, Contains("BucketReader")) self.assertThat(b_str, Contains("mfwgy33dmf2g 0")) # now if we ask about writing again, the server should offer those # three buckets as already present. It should offer them even if we # don't ask about those specific ones. already,writers = self.allocate(ss, b"allocate", [2,3,4], 75) self.assertThat(already, Equals(set([0,1,2]))) self.assertThat(set(writers.keys()), Equals(set([3,4]))) # while those two buckets are open for writing, the server should # refuse to offer them to uploaders already2,writers2 = self.allocate(ss, b"allocate", [2,3,4,5], 75) self.assertThat(already2, Equals(set([0,1,2]))) self.assertThat(set(writers2.keys()), Equals(set([5]))) # aborting the writes should remove the tempfiles for i,wb in writers2.items(): wb.abort() already2,writers2 = self.allocate(ss, b"allocate", [2,3,4,5], 75) self.assertThat(already2, Equals(set([0,1,2]))) self.assertThat(set(writers2.keys()), Equals(set([5]))) for i,wb in writers2.items(): wb.abort() for i,wb in writers.items(): wb.abort() def test_allocate_without_lease_renewal(self): """ ``StorageServer._allocate_buckets`` does not renew leases on existing shares if ``renew_leases`` is ``False``. """ first_lease = 456 second_lease = 543 storage_index = b"allocate" clock = Clock() clock.advance(first_lease) ss = self.create( "test_allocate_without_lease_renewal", clock=clock, ) # Put a share on there already, writers = self.allocate( ss, storage_index, [0], 1, renew_leases=False, ) (writer,) = writers.values() writer.write(0, b"x") writer.close() # It should have a lease granted at the current time. shares = dict(ss.get_shares(storage_index)) self.assertEqual( [first_lease], list( lease.get_grant_renew_time_time() for lease in ShareFile(shares[0]).get_leases() ), ) # Let some time pass so we can tell if the lease on share 0 is # renewed. clock.advance(second_lease) # Put another share on there. already, writers = self.allocate( ss, storage_index, [1], 1, renew_leases=False, ) (writer,) = writers.values() writer.write(0, b"x") writer.close() # The first share's lease expiration time is unchanged. shares = dict(ss.get_shares(storage_index)) self.assertThat( [first_lease], Equals(list( lease.get_grant_renew_time_time() for lease in ShareFile(shares[0]).get_leases() )), ) def test_bad_container_version(self): ss = self.create("test_bad_container_version") a,w = self.allocate(ss, b"si1", [0], 10) w[0].write(0, b"\xff"*10) w[0].close() fn = os.path.join(ss.sharedir, storage_index_to_dir(b"si1"), "0") f = open(fn, "rb+") f.seek(0) f.write(struct.pack(">L", 0)) # this is invalid: minimum used is v1 f.close() ss.get_buckets(b"allocate") e = self.failUnlessRaises(UnknownImmutableContainerVersionError, ss.get_buckets, b"si1") self.assertThat(e.filename, Equals(fn)) self.assertThat(e.version, Equals(0)) self.assertThat(str(e), Contains("had unexpected version 0")) def test_disconnect(self): # simulate a disconnection ss = FoolscapStorageServer(self.create("test_disconnect")) renew_secret = b"r" * 32 cancel_secret = b"c" * 32 canary = FakeCanary() already,writers = ss.remote_allocate_buckets( b"disconnect", renew_secret, cancel_secret, sharenums=[0,1,2], allocated_size=75, canary=canary, ) self.assertThat(already, Equals(set())) self.assertThat(set(writers.keys()), Equals(set([0,1,2]))) for (f,args,kwargs) in list(canary.disconnectors.values()): f(*args, **kwargs) del already del writers # that ought to delete the incoming shares already,writers = self.allocate(ss, b"disconnect", [0,1,2], 75) self.assertThat(already, Equals(set())) self.assertThat(set(writers.keys()), Equals(set([0,1,2]))) def test_reserved_space_immutable_lease(self): """ If there is not enough available space to store an additional lease on an immutable share then ``remote_add_lease`` fails with ``NoSpace`` when an attempt is made to use it to create a new lease. """ disk = FakeDisk(total=1024, used=0) self.patch(fileutil, "get_disk_stats", disk.get_disk_stats) ss = self.create("test_reserved_space_immutable_lease") storage_index = b"x" * 16 renew_secret = b"r" * 32 cancel_secret = b"c" * 32 shares = {0: b"y" * 500} upload_immutable(ss, storage_index, renew_secret, cancel_secret, shares) # use up all the available space disk.use(disk.available) # Different secrets to produce a different lease, not a renewal. renew_secret = b"R" * 32 cancel_secret = b"C" * 32 with self.assertRaises(interfaces.NoSpace): ss.add_lease(storage_index, renew_secret, cancel_secret) def test_reserved_space_mutable_lease(self): """ If there is not enough available space to store an additional lease on a mutable share then ``remote_add_lease`` fails with ``NoSpace`` when an attempt is made to use it to create a new lease. """ disk = FakeDisk(total=1024, used=0) self.patch(fileutil, "get_disk_stats", disk.get_disk_stats) ss = self.create("test_reserved_space_mutable_lease") renew_secrets = iter( "{}{}".format("r" * 31, i).encode("ascii") for i in range(5) ) storage_index = b"x" * 16 write_enabler = b"w" * 32 cancel_secret = b"c" * 32 secrets = (write_enabler, next(renew_secrets), cancel_secret) shares = {0: b"y" * 500} upload_mutable(ss, storage_index, secrets, shares) # use up all the available space disk.use(disk.available) # The upload created one lease. There is room for three more leases # in the share header. Even if we're out of disk space, on a boring # enough filesystem we can write these. for i in range(3): ss.add_lease(storage_index, next(renew_secrets), cancel_secret) # Having used all of the space for leases in the header, we would have # to allocate storage for the next lease. Since there is no space # available, this must fail instead. with self.assertRaises(interfaces.NoSpace): ss.add_lease(storage_index, next(renew_secrets), cancel_secret) def test_reserved_space(self): reserved = 10000 allocated = 0 def call_get_disk_stats(whichdir, reserved_space=0): self.failUnlessEqual(reserved_space, reserved) return { 'free_for_nonroot': 15000 - allocated, 'avail': max(15000 - allocated - reserved_space, 0), } self.patch(fileutil, 'get_disk_stats', call_get_disk_stats) ss = FoolscapStorageServer(self.create("test_reserved_space", reserved_space=reserved)) # 15k available, 10k reserved, leaves 5k for shares # a newly created and filled share incurs this much overhead, beyond # the size we request. OVERHEAD = 3*4 LEASE_SIZE = 4+32+32+4 renew_secret = b"r" * 32 cancel_secret = b"c" * 32 canary = FakeCanary() already, writers = ss.remote_allocate_buckets( b"vid1", renew_secret, cancel_secret, sharenums=[0,1,2], allocated_size=1000, canary=canary, ) self.assertThat(writers, HasLength(3)) # now the StorageServer should have 3000 bytes provisionally # allocated, allowing only 2000 more to be claimed self.assertThat(ss._server._bucket_writers, HasLength(3)) # allocating 1001-byte shares only leaves room for one canary2 = FakeCanary() already2, writers2 = self.allocate(ss, b"vid2", [0,1,2], 1001, canary2) self.assertThat(writers2, HasLength(1)) self.assertThat(ss._server._bucket_writers, HasLength(4)) # we abandon the first set, so their provisional allocation should be # returned canary.disconnected() self.assertThat(ss._server._bucket_writers, HasLength(1)) # now we have a provisional allocation of 1001 bytes # and we close the second set, so their provisional allocation should # become real, long-term allocation, and grows to include the # overhead. for bw in writers2.values(): bw.write(0, b"a"*25) bw.close() self.assertThat(ss._server._bucket_writers, HasLength(0)) # this also changes the amount reported as available by call_get_disk_stats allocated = 1001 + OVERHEAD + LEASE_SIZE # now there should be ALLOCATED=1001+12+72=1085 bytes allocated, and # 5000-1085=3915 free, therefore we can fit 39 100byte shares canary3 = FakeCanary() already3, writers3 = ss.remote_allocate_buckets( b"vid3", renew_secret, cancel_secret, sharenums=list(range(100)), allocated_size=100, canary=canary3, ) self.assertThat(writers3, HasLength(39)) self.assertThat(ss._server._bucket_writers, HasLength(39)) canary3.disconnected() self.assertThat(ss._server._bucket_writers, HasLength(0)) ss._server.disownServiceParent() del ss def test_seek(self): basedir = self.workdir("test_seek_behavior") fileutil.make_dirs(basedir) filename = os.path.join(basedir, "testfile") f = open(filename, "wb") f.write(b"start") f.close() # mode="w" allows seeking-to-create-holes, but truncates pre-existing # files. mode="a" preserves previous contents but does not allow # seeking-to-create-holes. mode="r+" allows both. f = open(filename, "rb+") f.seek(100) f.write(b"100") f.close() filelen = os.stat(filename)[stat.ST_SIZE] self.assertThat(filelen, Equals(100+3)) f2 = open(filename, "rb") self.assertThat(f2.read(5), Equals(b"start")) def create_bucket_5_shares( self, ss, storage_index, expected_already=0, expected_writers=5 ): """ Given a StorageServer, create a bucket with 5 shares and return renewal and cancellation secrets. """ sharenums = list(range(5)) size = 100 # Creating a bucket also creates a lease: rs, cs = (hashutil.my_renewal_secret_hash(b"%d" % next(self._lease_secret)), hashutil.my_cancel_secret_hash(b"%d" % next(self._lease_secret))) already, writers = ss.allocate_buckets(storage_index, rs, cs, sharenums, size) self.assertThat(already, HasLength(expected_already)) self.assertThat(writers, HasLength(expected_writers)) for wb in writers.values(): wb.close() return rs, cs def test_leases(self): ss = self.create("test_leases") sharenums = list(range(5)) size = 100 # Create a bucket: rs0, cs0 = self.create_bucket_5_shares(ss, b"si0") # Upload of an immutable implies creation of a single lease with the # supplied secrets. (lease,) = ss.get_leases(b"si0") self.assertTrue(lease.is_renew_secret(rs0)) rs1, cs1 = self.create_bucket_5_shares(ss, b"si1") # take out a second lease on si1 rs2, cs2 = self.create_bucket_5_shares(ss, b"si1", 5, 0) (lease1, lease2) = ss.get_leases(b"si1") self.assertTrue(lease1.is_renew_secret(rs1)) self.assertTrue(lease2.is_renew_secret(rs2)) # and a third lease, using add-lease rs2a,cs2a = (hashutil.my_renewal_secret_hash(b"%d" % next(self._lease_secret)), hashutil.my_cancel_secret_hash(b"%d" % next(self._lease_secret))) ss.add_lease(b"si1", rs2a, cs2a) (lease1, lease2, lease3) = ss.get_leases(b"si1") self.assertTrue(lease1.is_renew_secret(rs1)) self.assertTrue(lease2.is_renew_secret(rs2)) self.assertTrue(lease3.is_renew_secret(rs2a)) # add-lease on a missing storage index is silently ignored self.assertThat(ss.add_lease(b"si18", b"", b""), Equals(None)) # check that si0 is readable readers = ss.get_buckets(b"si0") self.assertThat(readers, HasLength(5)) # renew the first lease. Only the proper renew_secret should work ss.renew_lease(b"si0", rs0) self.failUnlessRaises(IndexError, ss.renew_lease, b"si0", cs0) self.failUnlessRaises(IndexError, ss.renew_lease, b"si0", rs1) # check that si0 is still readable readers = ss.get_buckets(b"si0") self.assertThat(readers, HasLength(5)) # There is no such method as remote_cancel_lease for now -- see # ticket #1528. self.assertFalse(hasattr(FoolscapStorageServer(ss), 'remote_cancel_lease'), \ "ss should not have a 'remote_cancel_lease' method/attribute") # test overlapping uploads rs3,cs3 = (hashutil.my_renewal_secret_hash(b"%d" % next(self._lease_secret)), hashutil.my_cancel_secret_hash(b"%d" % next(self._lease_secret))) rs4,cs4 = (hashutil.my_renewal_secret_hash(b"%d" % next(self._lease_secret)), hashutil.my_cancel_secret_hash(b"%d" % next(self._lease_secret))) already,writers = ss.allocate_buckets(b"si3", rs3, cs3, sharenums, size) self.assertThat(already, HasLength(0)) self.assertThat(writers, HasLength(5)) already2,writers2 = ss.allocate_buckets(b"si3", rs4, cs4, sharenums, size) self.assertThat(already2, HasLength(0)) self.assertThat(writers2, HasLength(0)) for wb in writers.values(): wb.close() leases = list(ss.get_leases(b"si3")) self.assertThat(leases, HasLength(1)) already3,writers3 = ss.allocate_buckets(b"si3", rs4, cs4, sharenums, size) self.assertThat(already3, HasLength(5)) self.assertThat(writers3, HasLength(0)) leases = list(ss.get_leases(b"si3")) self.assertThat(leases, HasLength(2)) def test_immutable_add_lease_renews(self): """ Adding a lease on an already leased immutable with the same secret just renews it. """ clock = Clock() clock.advance(123) ss = self.create("test_immutable_add_lease_renews", clock=clock) # Start out with single lease created with bucket: renewal_secret, cancel_secret = self.create_bucket_5_shares(ss, b"si0") [lease] = ss.get_leases(b"si0") self.assertThat(lease.get_expiration_time(), Equals(123 + DEFAULT_RENEWAL_TIME)) # Time passes: clock.advance(123456) # Adding a lease with matching renewal secret just renews it: ss.add_lease(b"si0", renewal_secret, cancel_secret) [lease] = ss.get_leases(b"si0") self.assertThat(lease.get_expiration_time(), Equals(123 + 123456 + DEFAULT_RENEWAL_TIME)) def test_have_shares(self): """By default the StorageServer has no shares.""" workdir = self.workdir("test_have_shares") ss = StorageServer(workdir, b"\x00" * 20, readonly_storage=True) self.assertFalse(ss.have_shares()) def test_readonly(self): workdir = self.workdir("test_readonly") ss = StorageServer(workdir, b"\x00" * 20, readonly_storage=True) ss.setServiceParent(self.sparent) already,writers = self.allocate(ss, b"vid", [0,1,2], 75) self.assertThat(already, Equals(set())) self.assertThat(writers, Equals({})) stats = ss.get_stats() self.assertThat(stats["storage_server.accepting_immutable_shares"], Equals(0)) if "storage_server.disk_avail" in stats: # Some platforms may not have an API to get disk stats. # But if there are stats, readonly_storage means disk_avail=0 self.assertThat(stats["storage_server.disk_avail"], Equals(0)) def test_discard(self): # discard is really only used for other tests, but we test it anyways workdir = self.workdir("test_discard") ss = StorageServer(workdir, b"\x00" * 20, discard_storage=True) ss.setServiceParent(self.sparent) already,writers = self.allocate(ss, b"vid", [0,1,2], 75) self.assertThat(already, Equals(set())) self.assertThat(set(writers.keys()), Equals(set([0,1,2]))) for i,wb in writers.items(): wb.write(0, b"%25d" % i) wb.close() # since we discard the data, the shares should be present but sparse. # Since we write with some seeks, the data we read back will be all # zeros. b = ss.get_buckets(b"vid") self.assertThat(set(b.keys()), Equals(set([0,1,2]))) self.assertThat(b[0].read(0, 25), Equals(b"\x00" * 25)) def test_reserved_space_advise_corruption(self): """ If there is no available space then ``remote_advise_corrupt_share`` does not write a corruption report. """ disk = FakeDisk(total=1024, used=1024) self.patch(fileutil, "get_disk_stats", disk.get_disk_stats) workdir = self.workdir("test_reserved_space_advise_corruption") ss = StorageServer(workdir, b"\x00" * 20, discard_storage=True) ss.setServiceParent(self.sparent) upload_immutable(ss, b"si0", b"r" * 32, b"c" * 32, {0: b""}) ss.advise_corrupt_share(b"immutable", b"si0", 0, b"This share smells funny.\n") self.assertThat( [], Equals(os.listdir(ss.corruption_advisory_dir)), ) def test_advise_corruption(self): workdir = self.workdir("test_advise_corruption") ss = StorageServer(workdir, b"\x00" * 20, discard_storage=True) ss.setServiceParent(self.sparent) si0_s = base32.b2a(b"si0") upload_immutable(ss, b"si0", b"r" * 32, b"c" * 32, {0: b""}) ss.advise_corrupt_share(b"immutable", b"si0", 0, b"This share smells funny.\n") reportdir = os.path.join(workdir, "corruption-advisories") reports = os.listdir(reportdir) self.assertThat(reports, HasLength(1)) report_si0 = reports[0] self.assertThat(report_si0, Contains(ensure_str(si0_s))) f = open(os.path.join(reportdir, report_si0), "rb") report = f.read() f.close() self.assertThat(report, Contains(b"type: immutable")) self.assertThat(report, Contains(b"storage_index: %s" % si0_s)) self.assertThat(report, Contains(b"share_number: 0")) self.assertThat(report, Contains(b"This share smells funny.")) # test the RIBucketWriter version too si1_s = base32.b2a(b"si1") already,writers = self.allocate(ss, b"si1", [1], 75) self.assertThat(already, Equals(set())) self.assertThat(set(writers.keys()), Equals(set([1]))) writers[1].write(0, b"data") writers[1].close() b = ss.get_buckets(b"si1") self.assertThat(set(b.keys()), Equals(set([1]))) b[1].advise_corrupt_share(b"This share tastes like dust.\n") reports = os.listdir(reportdir) self.assertThat(reports, HasLength(2)) report_si1 = [r for r in reports if si1_s.decode() in r][0] f = open(os.path.join(reportdir, report_si1), "rb") report = f.read() f.close() self.assertThat(report, Contains(b"type: immutable")) self.assertThat(report, Contains(b"storage_index: %s" % si1_s)) self.assertThat(report, Contains(b"share_number: 1")) self.assertThat(report, Contains(b"This share tastes like dust.")) def test_advise_corruption_missing(self): """ If a corruption advisory is received for a share that is not present on this server then it is not persisted. """ workdir = self.workdir("test_advise_corruption_missing") ss = StorageServer(workdir, b"\x00" * 20, discard_storage=True) ss.setServiceParent(self.sparent) # Upload one share for this storage index upload_immutable(ss, b"si0", b"r" * 32, b"c" * 32, {0: b""}) # And try to submit a corruption advisory about a different share ss.advise_corrupt_share(b"immutable", b"si0", 1, b"This share smells funny.\n") self.assertThat( [], Equals(os.listdir(ss.corruption_advisory_dir)), ) class MutableServer(SyncTestCase): def setUp(self): super(MutableServer, self).setUp() self.sparent = LoggingServiceParent() self._lease_secret = itertools.count() self.addCleanup(self.sparent.stopService) def workdir(self, name): basedir = os.path.join("storage", "MutableServer", name) return basedir def create(self, name, clock=None): workdir = self.workdir(name) if clock is None: clock = Clock() ss = StorageServer(workdir, b"\x00" * 20, clock=clock) ss.setServiceParent(self.sparent) return ss def test_create(self): self.create("test_create") def write_enabler(self, we_tag): return hashutil.tagged_hash(b"we_blah", we_tag) def renew_secret(self, tag): if isinstance(tag, int): tag = b"%d" % (tag,) self.assertThat(tag, IsInstance(bytes)) return hashutil.tagged_hash(b"renew_blah", tag) def cancel_secret(self, tag): if isinstance(tag, int): tag = b"%d" % (tag,) self.assertThat(tag, IsInstance(bytes)) return hashutil.tagged_hash(b"cancel_blah", tag) def allocate(self, ss, storage_index, we_tag, lease_tag, sharenums, size): write_enabler = self.write_enabler(we_tag) renew_secret = self.renew_secret(lease_tag) cancel_secret = self.cancel_secret(lease_tag) rstaraw = ss.slot_testv_and_readv_and_writev testandwritev = dict( [ (shnum, ([], [], None) ) for shnum in sharenums ] ) readv = [] rc = rstaraw(storage_index, (write_enabler, renew_secret, cancel_secret), testandwritev, readv) (did_write, readv_data) = rc self.assertTrue(did_write) self.assertThat(readv_data, IsInstance(dict)) self.assertThat(readv_data, HasLength(0)) def test_enumerate_mutable_shares(self): """ ``StorageServer.enumerate_mutable_shares()`` returns a set of share numbers for the given storage index, or an empty set if it does not exist at all. """ ss = self.create("test_enumerate_mutable_shares") # Initially, nothing exists: empty = ss.enumerate_mutable_shares(b"si1") self.allocate(ss, b"si1", b"we1", b"le1", [0, 1, 4, 2], 12) shares0_1_2_4 = ss.enumerate_mutable_shares(b"si1") # Remove share 2, by setting size to 0: secrets = (self.write_enabler(b"we1"), self.renew_secret(b"le1"), self.cancel_secret(b"le1")) ss.slot_testv_and_readv_and_writev(b"si1", secrets, {2: ([], [], 0)}, []) shares0_1_4 = ss.enumerate_mutable_shares(b"si1") self.assertThat( (empty, shares0_1_2_4, shares0_1_4), Equals((set(), {0, 1, 2, 4}, {0, 1, 4})) ) def test_mutable_share_length(self): """``get_mutable_share_length()`` returns the length of the share.""" ss = self.create("test_mutable_share_length") self.allocate(ss, b"si1", b"we1", b"le1", [16], 23) ss.slot_testv_and_readv_and_writev( b"si1", (self.write_enabler(b"we1"), self.renew_secret(b"le1"), self.cancel_secret(b"le1")), {16: ([], [(0, b"x" * 23)], None)}, [] ) self.assertThat(ss.get_mutable_share_length(b"si1", 16), Equals(23)) def test_mutable_share_length_unknown(self): """ ``get_mutable_share_length()`` raises a ``KeyError`` on unknown shares. """ ss = self.create("test_mutable_share_length_unknown") self.allocate(ss, b"si1", b"we1", b"le1", [16], 23) ss.slot_testv_and_readv_and_writev( b"si1", (self.write_enabler(b"we1"), self.renew_secret(b"le1"), self.cancel_secret(b"le1")), {16: ([], [(0, b"x" * 23)], None)}, [] ) with self.assertRaises(KeyError): # Wrong share number. ss.get_mutable_share_length(b"si1", 17) with self.assertRaises(KeyError): # Wrong storage index ss.get_mutable_share_length(b"unknown", 16) def test_bad_magic(self): ss = self.create("test_bad_magic") self.allocate(ss, b"si1", b"we1", next(self._lease_secret), set([0]), 10) fn = os.path.join(ss.sharedir, storage_index_to_dir(b"si1"), "0") f = open(fn, "rb+") f.seek(0) f.write(b"BAD MAGIC") f.close() read = ss.slot_readv e = self.failUnlessRaises(UnknownMutableContainerVersionError, read, b"si1", [0], [(0,10)]) self.assertThat(e.filename, Equals(fn)) self.assertTrue(e.version.startswith(b"BAD MAGIC")) self.assertThat(str(e), Contains("had unexpected version")) self.assertThat(str(e), Contains("BAD MAGIC")) def test_container_size(self): ss = self.create("test_container_size") self.allocate(ss, b"si1", b"we1", next(self._lease_secret), set([0,1,2]), 100) read = ss.slot_readv rstaraw = ss.slot_testv_and_readv_and_writev secrets = ( self.write_enabler(b"we1"), self.renew_secret(b"we1"), self.cancel_secret(b"we1") ) data = b"".join([ (b"%d" % i) * 10 for i in range(10) ]) answer = rstaraw(b"si1", secrets, {0: ([], [(0,data)], len(data)+12)}, []) self.assertThat(answer, Equals((True, {0:[],1:[],2:[]}))) # Trying to make the container too large (by sending a write vector # whose offset is too high) will raise an exception. TOOBIG = MutableShareFile.MAX_SIZE + 10 self.failUnlessRaises(DataTooLargeError, rstaraw, b"si1", secrets, {0: ([], [(TOOBIG,data)], None)}, []) answer = rstaraw(b"si1", secrets, {0: ([], [(0,data)], None)}, []) self.assertThat(answer, Equals((True, {0:[],1:[],2:[]}))) read_answer = read(b"si1", [0], [(0,10)]) self.assertThat(read_answer, Equals({0: [data[:10]]})) # Sending a new_length shorter than the current length truncates the # data. answer = rstaraw(b"si1", secrets, {0: ([], [], 9)}, []) read_answer = read(b"si1", [0], [(0,10)]) self.assertThat(read_answer, Equals({0: [data[:9]]})) # Sending a new_length longer than the current length doesn't change # the data. answer = rstaraw(b"si1", secrets, {0: ([], [], 20)}, []) assert answer == (True, {0:[],1:[],2:[]}) read_answer = read(b"si1", [0], [(0, 20)]) self.assertThat(read_answer, Equals({0: [data[:9]]})) # Sending a write vector whose start is after the end of the current # data doesn't reveal "whatever was there last time" (palimpsest), # but instead fills with zeroes. # To test this, we fill the data area with a recognizable pattern. pattern = u''.join([chr(i) for i in range(100)]).encode("utf-8") answer = rstaraw(b"si1", secrets, {0: ([], [(0, pattern)], None)}, []) assert answer == (True, {0:[],1:[],2:[]}) # Then truncate the data... answer = rstaraw(b"si1", secrets, {0: ([], [], 20)}, []) assert answer == (True, {0:[],1:[],2:[]}) # Just confirm that you get an empty string if you try to read from # past the (new) endpoint now. answer = rstaraw(b"si1", secrets, {0: ([], [], None)}, [(20, 1980)]) self.assertThat(answer, Equals((True, {0:[b''],1:[b''],2:[b'']}))) # Then the extend the file by writing a vector which starts out past # the end... answer = rstaraw(b"si1", secrets, {0: ([], [(50, b'hellothere')], None)}, []) assert answer == (True, {0:[],1:[],2:[]}) # Now if you read the stuff between 20 (where we earlier truncated) # and 50, it had better be all zeroes. answer = rstaraw(b"si1", secrets, {0: ([], [], None)}, [(20, 30)]) self.assertThat(answer, Equals((True, {0:[b'\x00'*30],1:[b''],2:[b'']}))) # Also see if the server explicitly declares that it supports this # feature. ver = ss.get_version() storage_v1_ver = ver[b"http://allmydata.org/tahoe/protocols/storage/v1"] self.assertTrue(storage_v1_ver.get(b"fills-holes-with-zero-bytes")) # If the size is dropped to zero the share is deleted. answer = rstaraw(b"si1", secrets, {0: ([], [(0,data)], 0)}, []) self.assertThat(answer, Equals((True, {0:[],1:[],2:[]}))) read_answer = read(b"si1", [0], [(0,10)]) self.assertThat(read_answer, Equals({})) def test_allocate(self): ss = self.create("test_allocate") self.allocate(ss, b"si1", b"we1", next(self._lease_secret), set([0,1,2]), 100) read = ss.slot_readv self.assertThat(read(b"si1", [0], [(0, 10)]), Equals({0: [b""]})) self.assertThat(read(b"si1", [], [(0, 10)]), Equals({0: [b""], 1: [b""], 2: [b""]})) self.assertThat(read(b"si1", [0], [(100, 10)]), Equals({0: [b""]})) # try writing to one secrets = ( self.write_enabler(b"we1"), self.renew_secret(b"we1"), self.cancel_secret(b"we1") ) data = b"".join([ (b"%d" % i) * 10 for i in range(10) ]) write = ss.slot_testv_and_readv_and_writev answer = write(b"si1", secrets, {0: ([], [(0,data)], None)}, []) self.assertThat(answer, Equals((True, {0:[],1:[],2:[]}))) self.assertThat(read(b"si1", [0], [(0,20)]), Equals({0: [b"00000000001111111111"]})) self.assertThat(read(b"si1", [0], [(95,10)]), Equals({0: [b"99999"]})) #self.failUnlessEqual(s0.get_length(), 100) bad_secrets = (b"bad write enabler", secrets[1], secrets[2]) f = self.failUnlessRaises(BadWriteEnablerError, write, b"si1", bad_secrets, {}, []) self.assertThat(str(f), Contains("The write enabler was recorded by nodeid 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'.")) # this testv should fail answer = write(b"si1", secrets, {0: ([(0, 12, b"eq", b"444444444444"), (20, 5, b"eq", b"22222"), ], [(0, b"x"*100)], None), }, [(0,12), (20,5)], ) self.assertThat(answer, Equals((False, {0: [b"000000000011", b"22222"], 1: [b"", b""], 2: [b"", b""], }))) self.assertThat(read(b"si1", [0], [(0,100)]), Equals({0: [data]})) def test_operators(self): # test operators, the data we're comparing is '11111' in all cases. # test both fail+pass, reset data after each one. ss = self.create("test_operators") secrets = ( self.write_enabler(b"we1"), self.renew_secret(b"we1"), self.cancel_secret(b"we1") ) data = b"".join([ (b"%d" % i) * 10 for i in range(10) ]) write = ss.slot_testv_and_readv_and_writev read = ss.slot_readv def reset(): write(b"si1", secrets, {0: ([], [(0,data)], None)}, []) reset() # eq answer = write(b"si1", secrets, {0: ([(10, 5, b"eq", b"11112"), ], [(0, b"x"*100)], None, )}, [(10,5)]) self.assertThat(answer, Equals((False, {0: [b"11111"]}))) self.assertThat(read(b"si1", [0], [(0,100)]), Equals({0: [data]})) reset() answer = write(b"si1", secrets, {0: ([(10, 5, b"eq", b"11111"), ], [(0, b"y"*100)], None, )}, [(10,5)]) self.assertThat(answer, Equals((True, {0: [b"11111"]}))) self.assertThat(read(b"si1", [0], [(0,100)]), Equals({0: [b"y"*100]})) reset() # finally, test some operators against empty shares answer = write(b"si1", secrets, {1: ([(10, 5, b"eq", b"11112"), ], [(0, b"x"*100)], None, )}, [(10,5)]) self.assertThat(answer, Equals((False, {0: [b"11111"]}))) self.assertThat(read(b"si1", [0], [(0,100)]), Equals({0: [data]})) reset() def test_readv(self): ss = self.create("test_readv") secrets = ( self.write_enabler(b"we1"), self.renew_secret(b"we1"), self.cancel_secret(b"we1") ) data = b"".join([ (b"%d" % i) * 10 for i in range(10) ]) write = ss.slot_testv_and_readv_and_writev read = ss.slot_readv data = [(b"%d" % i) * 100 for i in range(3)] rc = write(b"si1", secrets, {0: ([], [(0,data[0])], None), 1: ([], [(0,data[1])], None), 2: ([], [(0,data[2])], None), }, []) self.assertThat(rc, Equals((True, {}))) answer = read(b"si1", [], [(0, 10)]) self.assertThat(answer, Equals({0: [b"0"*10], 1: [b"1"*10], 2: [b"2"*10]})) def compare_leases_without_timestamps(self, leases_a, leases_b): """ Assert that, except for expiration times, ``leases_a`` contains the same lease information as ``leases_b``. """ for a, b in zip(leases_a, leases_b): # The leases aren't always of the same type (though of course # corresponding elements in the two lists should be of the same # type as each other) so it's inconvenient to just reach in and # normalize the expiration timestamp. We don't want to call # `renew` on both objects to normalize the expiration timestamp in # case `renew` is broken and gives us back equal outputs from # non-equal inputs (expiration timestamp aside). It seems # reasonably safe to use `renew` to make _one_ of the timestamps # equal to the other though. self.assertThat( a.renew(b.get_expiration_time()), Equals(b), ) self.assertThat(len(leases_a), Equals(len(leases_b))) def test_leases(self): ss = self.create("test_leases") def secrets(n): return ( self.write_enabler(b"we1"), self.renew_secret(b"we1-%d" % n), self.cancel_secret(b"we1-%d" % n) ) data = b"".join([ (b"%d" % i) * 10 for i in range(10) ]) write = ss.slot_testv_and_readv_and_writev read = ss.slot_readv rc = write(b"si1", secrets(0), {0: ([], [(0,data)], None)}, []) self.assertThat(rc, Equals((True, {}))) # create a random non-numeric file in the bucket directory, to # exercise the code that's supposed to ignore those. bucket_dir = os.path.join(self.workdir("test_leases"), "shares", storage_index_to_dir(b"si1")) f = open(os.path.join(bucket_dir, "ignore_me.txt"), "w") f.write("you ought to be ignoring me\n") f.close() s0 = MutableShareFile(os.path.join(bucket_dir, "0")) self.assertThat(list(s0.get_leases()), HasLength(1)) # add-lease on a missing storage index is silently ignored self.assertThat(ss.add_lease(b"si18", b"", b""), Equals(None)) # re-allocate the slots and use the same secrets, that should update # the lease write(b"si1", secrets(0), {0: ([], [(0,data)], None)}, []) self.assertThat(list(s0.get_leases()), HasLength(1)) # renew it directly ss.renew_lease(b"si1", secrets(0)[1]) self.assertThat(list(s0.get_leases()), HasLength(1)) # now allocate them with a bunch of different secrets, to trigger the # extended lease code. Use add_lease for one of them. write(b"si1", secrets(1), {0: ([], [(0,data)], None)}, []) self.assertThat(list(s0.get_leases()), HasLength(2)) secrets2 = secrets(2) ss.add_lease(b"si1", secrets2[1], secrets2[2]) self.assertThat(list(s0.get_leases()), HasLength(3)) write(b"si1", secrets(3), {0: ([], [(0,data)], None)}, []) write(b"si1", secrets(4), {0: ([], [(0,data)], None)}, []) write(b"si1", secrets(5), {0: ([], [(0,data)], None)}, []) self.assertThat(list(s0.get_leases()), HasLength(6)) all_leases = list(s0.get_leases()) # and write enough data to expand the container, forcing the server # to move the leases write(b"si1", secrets(0), {0: ([], [(0,data)], 200), }, []) # read back the leases, make sure they're still intact. self.compare_leases_without_timestamps(all_leases, list(s0.get_leases())) ss.renew_lease(b"si1", secrets(0)[1]) ss.renew_lease(b"si1", secrets(1)[1]) ss.renew_lease(b"si1", secrets(2)[1]) ss.renew_lease(b"si1", secrets(3)[1]) ss.renew_lease(b"si1", secrets(4)[1]) self.compare_leases_without_timestamps(all_leases, list(s0.get_leases())) # get a new copy of the leases, with the current timestamps. Reading # data and failing to renew/cancel leases should leave the timestamps # alone. all_leases = list(s0.get_leases()) # renewing with a bogus token should prompt an error message # examine the exception thus raised, make sure the old nodeid is # present, to provide for share migration e = self.failUnlessRaises(IndexError, ss.renew_lease, b"si1", secrets(20)[1]) e_s = str(e) self.assertThat(e_s, Contains("Unable to renew non-existent lease")) self.assertThat(e_s, Contains("I have leases accepted by nodeids:")) self.assertThat(e_s, Contains("nodeids: 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa' .")) self.assertThat(all_leases, Equals(list(s0.get_leases()))) # reading shares should not modify the timestamp read(b"si1", [], [(0,200)]) self.assertThat(all_leases, Equals(list(s0.get_leases()))) write(b"si1", secrets(0), {0: ([], [(200, b"make me bigger")], None)}, []) self.compare_leases_without_timestamps(all_leases, list(s0.get_leases())) write(b"si1", secrets(0), {0: ([], [(500, b"make me really bigger")], None)}, []) self.compare_leases_without_timestamps(all_leases, list(s0.get_leases())) def test_mutable_add_lease_renews(self): """ Adding a lease on an already leased mutable with the same secret just renews it. """ clock = Clock() clock.advance(235) ss = self.create("test_mutable_add_lease_renews", clock=clock) def secrets(n): return ( self.write_enabler(b"we1"), self.renew_secret(b"we1-%d" % n), self.cancel_secret(b"we1-%d" % n) ) data = b"".join([ (b"%d" % i) * 10 for i in range(10) ]) write = ss.slot_testv_and_readv_and_writev write_enabler, renew_secret, cancel_secret = secrets(0) rc = write(b"si1", (write_enabler, renew_secret, cancel_secret), {0: ([], [(0,data)], None)}, []) self.assertThat(rc, Equals((True, {}))) bucket_dir = os.path.join(self.workdir("test_mutable_add_lease_renews"), "shares", storage_index_to_dir(b"si1")) s0 = MutableShareFile(os.path.join(bucket_dir, "0")) [lease] = s0.get_leases() self.assertThat(lease.get_expiration_time(), Equals(235 + DEFAULT_RENEWAL_TIME)) # Time passes... clock.advance(835) # Adding a lease renews it: ss.add_lease(b"si1", renew_secret, cancel_secret) [lease] = s0.get_leases() self.assertThat(lease.get_expiration_time(), Equals(235 + 835 + DEFAULT_RENEWAL_TIME)) def test_remove(self): ss = self.create("test_remove") self.allocate(ss, b"si1", b"we1", next(self._lease_secret), set([0,1,2]), 100) readv = ss.slot_readv writev = ss.slot_testv_and_readv_and_writev secrets = ( self.write_enabler(b"we1"), self.renew_secret(b"we1"), self.cancel_secret(b"we1") ) # delete sh0 by setting its size to zero answer = writev(b"si1", secrets, {0: ([], [], 0)}, []) # the answer should mention all the shares that existed before the # write self.assertThat(answer, Equals((True, {0:[],1:[],2:[]}))) # but a new read should show only sh1 and sh2 self.assertThat(readv(b"si1", [], [(0,10)]), Equals({1: [b""], 2: [b""]})) # delete sh1 by setting its size to zero answer = writev(b"si1", secrets, {1: ([], [], 0)}, []) self.assertThat(answer, Equals((True, {1:[],2:[]}))) self.assertThat(readv(b"si1", [], [(0,10)]), Equals({2: [b""]})) # delete sh2 by setting its size to zero answer = writev(b"si1", secrets, {2: ([], [], 0)}, []) self.assertThat(answer, Equals((True, {2:[]}))) self.assertThat(readv(b"si1", [], [(0,10)]), Equals({})) # and the bucket directory should now be gone si = base32.b2a(b"si1").decode() # note: this is a detail of the storage server implementation, and # may change in the future # filesystem paths are native strings prefix = si[:2] prefixdir = os.path.join(self.workdir("test_remove"), "shares", prefix) bucketdir = os.path.join(prefixdir, si) self.assertTrue(os.path.exists(prefixdir), prefixdir) self.assertFalse(os.path.exists(bucketdir), bucketdir) def test_writev_without_renew_lease(self): """ The helper method ``slot_testv_and_readv_and_writev`` does not renew leases if ``False`` is passed for the ``renew_leases`` parameter. """ ss = self.create("test_writev_without_renew_lease") storage_index = b"si2" secrets = ( self.write_enabler(storage_index), self.renew_secret(storage_index), self.cancel_secret(storage_index), ) sharenum = 3 datav = [(0, b"Hello, world")] ss.slot_testv_and_readv_and_writev( storage_index=storage_index, secrets=secrets, test_and_write_vectors={ sharenum: ([], datav, None), }, read_vector=[], renew_leases=False, ) leases = list(ss.get_slot_leases(storage_index)) self.assertThat([], Equals(leases)) def test_get_slot_leases_empty_slot(self): """ When ``get_slot_leases`` is called for a slot for which the server has no shares, it returns an empty iterable. """ ss = self.create("test_get_slot_leases_empty_slot") self.assertThat( list(ss.get_slot_leases(b"si1")), Equals([]), ) def test_remove_non_present(self): """ A write vector which would remove a share completely is applied as a no-op by a server which does not have the share. """ ss = self.create("test_remove_non_present") storage_index = b"si1" secrets = ( self.write_enabler(storage_index), self.renew_secret(storage_index), self.cancel_secret(storage_index), ) sharenum = 3 testv = [] datav = [] new_length = 0 read_vector = [] # We don't even need to create any shares to exercise this # functionality. Just go straight to sending a truncate-to-zero # write. testv_is_good, read_data = ss.slot_testv_and_readv_and_writev( storage_index=storage_index, secrets=secrets, test_and_write_vectors={ sharenum: (testv, datav, new_length), }, read_vector=read_vector, ) self.assertTrue(testv_is_good) self.assertThat({}, Equals(read_data)) class MDMFProxies(AsyncTestCase, ShouldFailMixin): def setUp(self): super(MDMFProxies, self).setUp() self.sparent = LoggingServiceParent() self._lease_secret = itertools.count() self.ss = self.create("MDMFProxies storage test server") self.rref = RemoteBucket(FoolscapStorageServer(self.ss)) self.storage_server = _StorageServer(lambda: self.rref) self.secrets = (self.write_enabler(b"we_secret"), self.renew_secret(b"renew_secret"), self.cancel_secret(b"cancel_secret")) self.segment = b"aaaaaa" self.block = b"aa" self.salt = b"a" * 16 self.block_hash = b"a" * 32 self.block_hash_tree = [self.block_hash for i in range(6)] self.share_hash = self.block_hash self.share_hash_chain = dict([(i, self.share_hash) for i in range(6)]) self.signature = b"foobarbaz" self.verification_key = b"vvvvvv" self.encprivkey = b"private" self.root_hash = self.block_hash self.salt_hash = self.root_hash self.salt_hash_tree = [self.salt_hash for i in range(6)] self.block_hash_tree_s = self.serialize_blockhashes(self.block_hash_tree) self.share_hash_chain_s = self.serialize_sharehashes(self.share_hash_chain) # blockhashes and salt hashes are serialized in the same way, # only we lop off the first element and store that in the # header. self.salt_hash_tree_s = self.serialize_blockhashes(self.salt_hash_tree[1:]) def tearDown(self): super(MDMFProxies, self).tearDown() self.sparent.stopService() shutil.rmtree(self.workdir("MDMFProxies storage test server")) def write_enabler(self, we_tag): return hashutil.tagged_hash(b"we_blah", we_tag) def renew_secret(self, tag): if isinstance(tag, int): tag = b"%d" % tag return hashutil.tagged_hash(b"renew_blah", tag) def cancel_secret(self, tag): if isinstance(tag, int): tag = b"%d" % tag return hashutil.tagged_hash(b"cancel_blah", tag) def workdir(self, name): basedir = os.path.join("storage", "MutableServer", name) return basedir def create(self, name): workdir = self.workdir(name) ss = StorageServer(workdir, b"\x00" * 20) ss.setServiceParent(self.sparent) return ss def build_test_mdmf_share(self, tail_segment=False, empty=False): # Start with the checkstring data = struct.pack(">BQ32s", 1, 0, self.root_hash) self.checkstring = data # Next, the encoding parameters if tail_segment: data += struct.pack(">BBQQ", 3, 10, 6, 33) elif empty: data += struct.pack(">BBQQ", 3, 10, 0, 0) else: data += struct.pack(">BBQQ", 3, 10, 6, 36) # Now we'll build the offsets. sharedata = b"" if not tail_segment and not empty: for i in range(6): sharedata += self.salt + self.block elif tail_segment: for i in range(5): sharedata += self.salt + self.block sharedata += self.salt + b"a" # The encrypted private key comes after the shares + salts offset_size = struct.calcsize(MDMFOFFSETS) encrypted_private_key_offset = len(data) + offset_size # The share has chain comes after the private key sharehashes_offset = encrypted_private_key_offset + \ len(self.encprivkey) # The signature comes after the share hash chain. signature_offset = sharehashes_offset + len(self.share_hash_chain_s) verification_key_offset = signature_offset + len(self.signature) verification_key_end = verification_key_offset + \ len(self.verification_key) share_data_offset = offset_size share_data_offset += PRIVATE_KEY_SIZE share_data_offset += SIGNATURE_SIZE share_data_offset += VERIFICATION_KEY_SIZE share_data_offset += SHARE_HASH_CHAIN_SIZE blockhashes_offset = share_data_offset + len(sharedata) eof_offset = blockhashes_offset + len(self.block_hash_tree_s) data += struct.pack(MDMFOFFSETS, encrypted_private_key_offset, sharehashes_offset, signature_offset, verification_key_offset, verification_key_end, share_data_offset, blockhashes_offset, eof_offset) self.offsets = {} self.offsets['enc_privkey'] = encrypted_private_key_offset self.offsets['block_hash_tree'] = blockhashes_offset self.offsets['share_hash_chain'] = sharehashes_offset self.offsets['signature'] = signature_offset self.offsets['verification_key'] = verification_key_offset self.offsets['share_data'] = share_data_offset self.offsets['verification_key_end'] = verification_key_end self.offsets['EOF'] = eof_offset # the private key, data += self.encprivkey # the sharehashes data += self.share_hash_chain_s # the signature, data += self.signature # and the verification key data += self.verification_key # Then we'll add in gibberish until we get to the right point. nulls = b"".join([b" " for i in range(len(data), share_data_offset)]) data += nulls # Then the share data data += sharedata # the blockhashes data += self.block_hash_tree_s return data def write_test_share_to_server(self, storage_index, tail_segment=False, empty=False): """ I write some data for the read tests to read to self.ss If tail_segment=True, then I will write a share that has a smaller tail segment than other segments. """ write = self.ss.slot_testv_and_readv_and_writev data = self.build_test_mdmf_share(tail_segment, empty) # Finally, we write the whole thing to the storage server in one # pass. testvs = [(0, 1, b"eq", b"")] tws = {} tws[0] = (testvs, [(0, data)], None) readv = [(0, 1)] results = write(storage_index, self.secrets, tws, readv) self.assertTrue(results[0]) def build_test_sdmf_share(self, empty=False): if empty: sharedata = b"" else: sharedata = self.segment * 6 self.sharedata = sharedata blocksize = len(sharedata) // 3 block = sharedata[:blocksize] self.blockdata = block prefix = struct.pack(">BQ32s16s BBQQ", 0, # version, 0, self.root_hash, self.salt, 3, 10, len(sharedata), len(sharedata), ) post_offset = struct.calcsize(">BQ32s16sBBQQLLLLQQ") signature_offset = post_offset + len(self.verification_key) sharehashes_offset = signature_offset + len(self.signature) blockhashes_offset = sharehashes_offset + len(self.share_hash_chain_s) sharedata_offset = blockhashes_offset + len(self.block_hash_tree_s) encprivkey_offset = sharedata_offset + len(block) eof_offset = encprivkey_offset + len(self.encprivkey) offsets = struct.pack(">LLLLQQ", signature_offset, sharehashes_offset, blockhashes_offset, sharedata_offset, encprivkey_offset, eof_offset) final_share = b"".join([prefix, offsets, self.verification_key, self.signature, self.share_hash_chain_s, self.block_hash_tree_s, block, self.encprivkey]) self.offsets = {} self.offsets['signature'] = signature_offset self.offsets['share_hash_chain'] = sharehashes_offset self.offsets['block_hash_tree'] = blockhashes_offset self.offsets['share_data'] = sharedata_offset self.offsets['enc_privkey'] = encprivkey_offset self.offsets['EOF'] = eof_offset return final_share def write_sdmf_share_to_server(self, storage_index, empty=False): # Some tests need SDMF shares to verify that we can still # read them. This method writes one, which resembles but is not write = self.ss.slot_testv_and_readv_and_writev share = self.build_test_sdmf_share(empty) testvs = [(0, 1, b"eq", b"")] tws = {} tws[0] = (testvs, [(0, share)], None) readv = [] results = write(storage_index, self.secrets, tws, readv) self.assertTrue(results[0]) def test_read(self): self.write_test_share_to_server(b"si1") mr = MDMFSlotReadProxy(self.storage_server, b"si1", 0) # Check that every method equals what we expect it to. d = defer.succeed(None) def _check_block_and_salt(block_and_salt): (block, salt) = block_and_salt self.assertThat(block, Equals(self.block)) self.assertThat(salt, Equals(self.salt)) for i in range(6): d.addCallback(lambda ignored, i=i: mr.get_block_and_salt(i)) d.addCallback(_check_block_and_salt) d.addCallback(lambda ignored: mr.get_encprivkey()) d.addCallback(lambda encprivkey: self.assertThat(self.encprivkey, Equals(encprivkey))) d.addCallback(lambda ignored: mr.get_blockhashes()) d.addCallback(lambda blockhashes: self.assertThat(self.block_hash_tree, Equals(blockhashes))) d.addCallback(lambda ignored: mr.get_sharehashes()) d.addCallback(lambda sharehashes: self.assertThat(self.share_hash_chain, Equals(sharehashes))) d.addCallback(lambda ignored: mr.get_signature()) d.addCallback(lambda signature: self.assertThat(signature, Equals(self.signature))) d.addCallback(lambda ignored: mr.get_verification_key()) d.addCallback(lambda verification_key: self.assertThat(verification_key, Equals(self.verification_key))) d.addCallback(lambda ignored: mr.get_seqnum()) d.addCallback(lambda seqnum: self.assertThat(seqnum, Equals(0))) d.addCallback(lambda ignored: mr.get_root_hash()) d.addCallback(lambda root_hash: self.assertThat(self.root_hash, Equals(root_hash))) d.addCallback(lambda ignored: mr.get_seqnum()) d.addCallback(lambda seqnum: self.assertThat(seqnum, Equals(0))) d.addCallback(lambda ignored: mr.get_encoding_parameters()) def _check_encoding_parameters(args): (k, n, segsize, datalen) = args self.assertThat(k, Equals(3)) self.assertThat(n, Equals(10)) self.assertThat(segsize, Equals(6)) self.assertThat(datalen, Equals(36)) d.addCallback(_check_encoding_parameters) d.addCallback(lambda ignored: mr.get_checkstring()) d.addCallback(lambda checkstring: self.assertThat(checkstring, Equals(checkstring))) return d def test_read_with_different_tail_segment_size(self): self.write_test_share_to_server(b"si1", tail_segment=True) mr = MDMFSlotReadProxy(self.storage_server, b"si1", 0) d = mr.get_block_and_salt(5) def _check_tail_segment(results): block, salt = results self.assertThat(block, HasLength(1)) self.assertThat(block, Equals(b"a")) d.addCallback(_check_tail_segment) return d def test_get_block_with_invalid_segnum(self): self.write_test_share_to_server(b"si1") mr = MDMFSlotReadProxy(self.storage_server, b"si1", 0) d = defer.succeed(None) d.addCallback(lambda ignored: self.shouldFail(LayoutInvalid, "test invalid segnum", None, mr.get_block_and_salt, 7)) return d def test_get_encoding_parameters_first(self): self.write_test_share_to_server(b"si1") mr = MDMFSlotReadProxy(self.storage_server, b"si1", 0) d = mr.get_encoding_parameters() def _check_encoding_parameters(args): (k, n, segment_size, datalen) = args self.assertThat(k, Equals(3)) self.assertThat(n, Equals(10)) self.assertThat(segment_size, Equals(6)) self.assertThat(datalen, Equals(36)) d.addCallback(_check_encoding_parameters) return d def test_get_seqnum_first(self): self.write_test_share_to_server(b"si1") mr = MDMFSlotReadProxy(self.storage_server, b"si1", 0) d = mr.get_seqnum() d.addCallback(lambda seqnum: self.assertThat(seqnum, Equals(0))) return d def test_get_root_hash_first(self): self.write_test_share_to_server(b"si1") mr = MDMFSlotReadProxy(self.storage_server, b"si1", 0) d = mr.get_root_hash() d.addCallback(lambda root_hash: self.assertThat(root_hash, Equals(self.root_hash))) return d def test_get_checkstring_first(self): self.write_test_share_to_server(b"si1") mr = MDMFSlotReadProxy(self.storage_server, b"si1", 0) d = mr.get_checkstring() d.addCallback(lambda checkstring: self.assertThat(checkstring, Equals(self.checkstring))) return d def test_write_read_vectors(self): # When writing for us, the storage server will return to us a # read vector, along with its result. If a write fails because # the test vectors failed, this read vector can help us to # diagnose the problem. This test ensures that the read vector # is working appropriately. mw = self._make_new_mw(b"si1", 0) for i in range(6): mw.put_block(self.block, i, self.salt) mw.put_encprivkey(self.encprivkey) mw.put_blockhashes(self.block_hash_tree) mw.put_sharehashes(self.share_hash_chain) mw.put_root_hash(self.root_hash) mw.put_signature(self.signature) mw.put_verification_key(self.verification_key) d = mw.finish_publishing() def _then(results): self.assertThat(results, HasLength(2)) result, readv = results self.assertTrue(result) self.assertFalse(readv) self.old_checkstring = mw.get_checkstring() mw.set_checkstring(b"") d.addCallback(_then) d.addCallback(lambda ignored: mw.finish_publishing()) def _then_again(results): self.assertThat(results, HasLength(2)) result, readvs = results self.assertFalse(result) self.assertThat(readvs, Contains(0)) readv = readvs[0][0] self.assertThat(readv, Equals(self.old_checkstring)) d.addCallback(_then_again) # The checkstring remains the same for the rest of the process. return d def test_private_key_after_share_hash_chain(self): mw = self._make_new_mw(b"si1", 0) d = defer.succeed(None) for i in range(6): d.addCallback(lambda ignored, i=i: mw.put_block(self.block, i, self.salt)) d.addCallback(lambda ignored: mw.put_encprivkey(self.encprivkey)) d.addCallback(lambda ignored: mw.put_sharehashes(self.share_hash_chain)) # Now try to put the private key again. d.addCallback(lambda ignored: self.shouldFail(LayoutInvalid, "test repeat private key", None, mw.put_encprivkey, self.encprivkey)) return d def test_signature_after_verification_key(self): mw = self._make_new_mw(b"si1", 0) d = defer.succeed(None) # Put everything up to and including the verification key. for i in range(6): d.addCallback(lambda ignored, i=i: mw.put_block(self.block, i, self.salt)) d.addCallback(lambda ignored: mw.put_encprivkey(self.encprivkey)) d.addCallback(lambda ignored: mw.put_blockhashes(self.block_hash_tree)) d.addCallback(lambda ignored: mw.put_sharehashes(self.share_hash_chain)) d.addCallback(lambda ignored: mw.put_root_hash(self.root_hash)) d.addCallback(lambda ignored: mw.put_signature(self.signature)) d.addCallback(lambda ignored: mw.put_verification_key(self.verification_key)) # Now try to put the signature again. This should fail d.addCallback(lambda ignored: self.shouldFail(LayoutInvalid, "signature after verification", None, mw.put_signature, self.signature)) return d def test_uncoordinated_write(self): # Make two mutable writers, both pointing to the same storage # server, both at the same storage index, and try writing to the # same share. mw1 = self._make_new_mw(b"si1", 0) mw2 = self._make_new_mw(b"si1", 0) def _check_success(results): result, readvs = results self.assertTrue(result) def _check_failure(results): result, readvs = results self.assertFalse(result) def _write_share(mw): for i in range(6): mw.put_block(self.block, i, self.salt) mw.put_encprivkey(self.encprivkey) mw.put_blockhashes(self.block_hash_tree) mw.put_sharehashes(self.share_hash_chain) mw.put_root_hash(self.root_hash) mw.put_signature(self.signature) mw.put_verification_key(self.verification_key) return mw.finish_publishing() d = _write_share(mw1) d.addCallback(_check_success) d.addCallback(lambda ignored: _write_share(mw2)) d.addCallback(_check_failure) return d def test_invalid_salt_size(self): # Salts need to be 16 bytes in size. Writes that attempt to # write more or less than this should be rejected. mw = self._make_new_mw(b"si1", 0) invalid_salt = b"a" * 17 # 17 bytes another_invalid_salt = b"b" * 15 # 15 bytes d = defer.succeed(None) d.addCallback(lambda ignored: self.shouldFail(LayoutInvalid, "salt too big", None, mw.put_block, self.block, 0, invalid_salt)) d.addCallback(lambda ignored: self.shouldFail(LayoutInvalid, "salt too small", None, mw.put_block, self.block, 0, another_invalid_salt)) return d def test_write_test_vectors(self): # If we give the write proxy a bogus test vector at # any point during the process, it should fail to write when we # tell it to write. def _check_failure(results): self.assertThat(results, HasLength(2)) res, d = results self.assertFalse(res) def _check_success(results): self.assertThat(results, HasLength(2)) res, d = results self.assertTrue(results) mw = self._make_new_mw(b"si1", 0) mw.set_checkstring(b"this is a lie") for i in range(6): mw.put_block(self.block, i, self.salt) mw.put_encprivkey(self.encprivkey) mw.put_blockhashes(self.block_hash_tree) mw.put_sharehashes(self.share_hash_chain) mw.put_root_hash(self.root_hash) mw.put_signature(self.signature) mw.put_verification_key(self.verification_key) d = mw.finish_publishing() d.addCallback(_check_failure) d.addCallback(lambda ignored: mw.set_checkstring(b"")) d.addCallback(lambda ignored: mw.finish_publishing()) d.addCallback(_check_success) return d def serialize_blockhashes(self, blockhashes): return b"".join(blockhashes) def serialize_sharehashes(self, sharehashes): ret = b"".join([struct.pack(">H32s", i, sharehashes[i]) for i in sorted(sharehashes.keys())]) return ret def test_write(self): # This translates to a file with 6 6-byte segments, and with 2-byte # blocks. mw = self._make_new_mw(b"si1", 0) # Test writing some blocks. read = self.ss.slot_readv expected_private_key_offset = struct.calcsize(MDMFHEADER) expected_sharedata_offset = struct.calcsize(MDMFHEADER) + \ PRIVATE_KEY_SIZE + \ SIGNATURE_SIZE + \ VERIFICATION_KEY_SIZE + \ SHARE_HASH_CHAIN_SIZE written_block_size = 2 + len(self.salt) written_block = self.block + self.salt for i in range(6): mw.put_block(self.block, i, self.salt) mw.put_encprivkey(self.encprivkey) mw.put_blockhashes(self.block_hash_tree) mw.put_sharehashes(self.share_hash_chain) mw.put_root_hash(self.root_hash) mw.put_signature(self.signature) mw.put_verification_key(self.verification_key) d = mw.finish_publishing() def _check_publish(results): self.assertThat(results, HasLength(2)) result, ign = results self.assertTrue(result, "publish failed") for i in range(6): self.assertThat(read(b"si1", [0], [(expected_sharedata_offset + (i * written_block_size), written_block_size)]), Equals({0: [written_block]})) self.assertThat(self.encprivkey, HasLength(7)) self.assertThat(read(b"si1", [0], [(expected_private_key_offset, 7)]), Equals({0: [self.encprivkey]})) expected_block_hash_offset = expected_sharedata_offset + \ (6 * written_block_size) self.assertThat(self.block_hash_tree_s, HasLength(32 * 6)) self.assertThat(read(b"si1", [0], [(expected_block_hash_offset, 32 * 6)]), Equals({0: [self.block_hash_tree_s]})) expected_share_hash_offset = expected_private_key_offset + len(self.encprivkey) self.assertThat(read(b"si1", [0],[(expected_share_hash_offset, (32 + 2) * 6)]), Equals({0: [self.share_hash_chain_s]})) self.assertThat(read(b"si1", [0], [(9, 32)]), Equals({0: [self.root_hash]})) expected_signature_offset = expected_share_hash_offset + \ len(self.share_hash_chain_s) self.assertThat(self.signature, HasLength(9)) self.assertThat(read(b"si1", [0], [(expected_signature_offset, 9)]), Equals({0: [self.signature]})) expected_verification_key_offset = expected_signature_offset + len(self.signature) self.assertThat(self.verification_key, HasLength(6)) self.assertThat(read(b"si1", [0], [(expected_verification_key_offset, 6)]), Equals({0: [self.verification_key]})) signable = mw.get_signable() verno, seq, roothash, k, n, segsize, datalen = \ struct.unpack(">BQ32sBBQQ", signable) self.assertThat(verno, Equals(1)) self.assertThat(seq, Equals(0)) self.assertThat(roothash, Equals(self.root_hash)) self.assertThat(k, Equals(3)) self.assertThat(n, Equals(10)) self.assertThat(segsize, Equals(6)) self.assertThat(datalen, Equals(36)) expected_eof_offset = expected_block_hash_offset + \ len(self.block_hash_tree_s) # Check the version number to make sure that it is correct. expected_version_number = struct.pack(">B", 1) self.assertThat(read(b"si1", [0], [(0, 1)]), Equals({0: [expected_version_number]})) # Check the sequence number to make sure that it is correct expected_sequence_number = struct.pack(">Q", 0) self.assertThat(read(b"si1", [0], [(1, 8)]), Equals({0: [expected_sequence_number]})) # Check that the encoding parameters (k, N, segement size, data # length) are what they should be. These are 3, 10, 6, 36 expected_k = struct.pack(">B", 3) self.assertThat(read(b"si1", [0], [(41, 1)]), Equals({0: [expected_k]})) expected_n = struct.pack(">B", 10) self.assertThat(read(b"si1", [0], [(42, 1)]), Equals({0: [expected_n]})) expected_segment_size = struct.pack(">Q", 6) self.assertThat(read(b"si1", [0], [(43, 8)]), Equals({0: [expected_segment_size]})) expected_data_length = struct.pack(">Q", 36) self.assertThat(read(b"si1", [0], [(51, 8)]), Equals({0: [expected_data_length]})) expected_offset = struct.pack(">Q", expected_private_key_offset) self.assertThat(read(b"si1", [0], [(59, 8)]), Equals({0: [expected_offset]})) expected_offset = struct.pack(">Q", expected_share_hash_offset) self.assertThat(read(b"si1", [0], [(67, 8)]), Equals({0: [expected_offset]})) expected_offset = struct.pack(">Q", expected_signature_offset) self.assertThat(read(b"si1", [0], [(75, 8)]), Equals({0: [expected_offset]})) expected_offset = struct.pack(">Q", expected_verification_key_offset) self.assertThat(read(b"si1", [0], [(83, 8)]), Equals({0: [expected_offset]})) expected_offset = struct.pack(">Q", expected_verification_key_offset + len(self.verification_key)) self.assertThat(read(b"si1", [0], [(91, 8)]), Equals({0: [expected_offset]})) expected_offset = struct.pack(">Q", expected_sharedata_offset) self.assertThat(read(b"si1", [0], [(99, 8)]), Equals({0: [expected_offset]})) expected_offset = struct.pack(">Q", expected_block_hash_offset) self.assertThat(read(b"si1", [0], [(107, 8)]), Equals({0: [expected_offset]})) expected_offset = struct.pack(">Q", expected_eof_offset) self.assertThat(read(b"si1", [0], [(115, 8)]), Equals({0: [expected_offset]})) d.addCallback(_check_publish) return d def _make_new_mw(self, si, share, datalength=36): # This is a file of size 36 bytes. Since it has a segment # size of 6, we know that it has 6 byte segments, which will # be split into blocks of 2 bytes because our FEC k # parameter is 3. mw = MDMFSlotWriteProxy(share, self.storage_server, si, self.secrets, 0, 3, 10, 6, datalength) return mw def test_write_rejected_with_too_many_blocks(self): mw = self._make_new_mw(b"si0", 0) # Try writing too many blocks. We should not be able to write # more than 6 # blocks into each share. d = defer.succeed(None) for i in range(6): d.addCallback(lambda ignored, i=i: mw.put_block(self.block, i, self.salt)) d.addCallback(lambda ignored: self.shouldFail(LayoutInvalid, "too many blocks", None, mw.put_block, self.block, 7, self.salt)) return d def test_write_rejected_with_invalid_salt(self): # Try writing an invalid salt. Salts are 16 bytes -- any more or # less should cause an error. mw = self._make_new_mw(b"si1", 0) bad_salt = b"a" * 17 # 17 bytes d = defer.succeed(None) d.addCallback(lambda ignored: self.shouldFail(LayoutInvalid, "test_invalid_salt", None, mw.put_block, self.block, 7, bad_salt)) return d def test_write_rejected_with_invalid_root_hash(self): # Try writing an invalid root hash. This should be SHA256d, and # 32 bytes long as a result. mw = self._make_new_mw(b"si2", 0) # 17 bytes != 32 bytes invalid_root_hash = b"a" * 17 d = defer.succeed(None) # Before this test can work, we need to put some blocks + salts, # a block hash tree, and a share hash tree. Otherwise, we'll see # failures that match what we are looking for, but are caused by # the constraints imposed on operation ordering. for i in range(6): d.addCallback(lambda ignored, i=i: mw.put_block(self.block, i, self.salt)) d.addCallback(lambda ignored: mw.put_encprivkey(self.encprivkey)) d.addCallback(lambda ignored: mw.put_blockhashes(self.block_hash_tree)) d.addCallback(lambda ignored: mw.put_sharehashes(self.share_hash_chain)) d.addCallback(lambda ignored: self.shouldFail(LayoutInvalid, "invalid root hash", None, mw.put_root_hash, invalid_root_hash)) return d def test_write_rejected_with_invalid_blocksize(self): # The blocksize implied by the writer that we get from # _make_new_mw is 2bytes -- any more or any less than this # should be cause for failure, unless it is the tail segment, in # which case it may not be failure. invalid_block = b"a" mw = self._make_new_mw(b"si3", 0, 33) # implies a tail segment with # one byte blocks # 1 bytes != 2 bytes d = defer.succeed(None) d.addCallback(lambda ignored, invalid_block=invalid_block: self.shouldFail(LayoutInvalid, "test blocksize too small", None, mw.put_block, invalid_block, 0, self.salt)) invalid_block = invalid_block * 3 # 3 bytes != 2 bytes d.addCallback(lambda ignored: self.shouldFail(LayoutInvalid, "test blocksize too large", None, mw.put_block, invalid_block, 0, self.salt)) for i in range(5): d.addCallback(lambda ignored, i=i: mw.put_block(self.block, i, self.salt)) # Try to put an invalid tail segment d.addCallback(lambda ignored: self.shouldFail(LayoutInvalid, "test invalid tail segment", None, mw.put_block, self.block, 5, self.salt)) valid_block = b"a" d.addCallback(lambda ignored: mw.put_block(valid_block, 5, self.salt)) return d def test_write_enforces_order_constraints(self): # We require that the MDMFSlotWriteProxy be interacted with in a # specific way. # That way is: # 0: __init__ # 1: write blocks and salts # 2: Write the encrypted private key # 3: Write the block hashes # 4: Write the share hashes # 5: Write the root hash and salt hash # 6: Write the signature and verification key # 7: Write the file. # # Some of these can be performed out-of-order, and some can't. # The dependencies that I want to test here are: # - Private key before block hashes # - share hashes and block hashes before root hash # - root hash before signature # - signature before verification key mw0 = self._make_new_mw(b"si0", 0) # Write some shares d = defer.succeed(None) for i in range(6): d.addCallback(lambda ignored, i=i: mw0.put_block(self.block, i, self.salt)) # Try to write the share hash chain without writing the # encrypted private key d.addCallback(lambda ignored: self.shouldFail(LayoutInvalid, "share hash chain before " "private key", None, mw0.put_sharehashes, self.share_hash_chain)) # Write the private key. d.addCallback(lambda ignored: mw0.put_encprivkey(self.encprivkey)) # Now write the block hashes and try again d.addCallback(lambda ignored: mw0.put_blockhashes(self.block_hash_tree)) # We haven't yet put the root hash on the share, so we shouldn't # be able to sign it. d.addCallback(lambda ignored: self.shouldFail(LayoutInvalid, "signature before root hash", None, mw0.put_signature, self.signature)) d.addCallback(lambda ignored: self.failUnlessRaises(LayoutInvalid, mw0.get_signable)) # ..and, since that fails, we also shouldn't be able to put the # verification key. d.addCallback(lambda ignored: self.shouldFail(LayoutInvalid, "key before signature", None, mw0.put_verification_key, self.verification_key)) # Now write the share hashes. d.addCallback(lambda ignored: mw0.put_sharehashes(self.share_hash_chain)) # We should be able to write the root hash now too d.addCallback(lambda ignored: mw0.put_root_hash(self.root_hash)) # We should still be unable to put the verification key d.addCallback(lambda ignored: self.shouldFail(LayoutInvalid, "key before signature", None, mw0.put_verification_key, self.verification_key)) d.addCallback(lambda ignored: mw0.put_signature(self.signature)) # We shouldn't be able to write the offsets to the remote server # until the offset table is finished; IOW, until we have written # the verification key. d.addCallback(lambda ignored: self.shouldFail(LayoutInvalid, "offsets before verification key", None, mw0.finish_publishing)) d.addCallback(lambda ignored: mw0.put_verification_key(self.verification_key)) return d def test_end_to_end(self): mw = self._make_new_mw(b"si1", 0) # Write a share using the mutable writer, and make sure that the # reader knows how to read everything back to us. d = defer.succeed(None) for i in range(6): d.addCallback(lambda ignored, i=i: mw.put_block(self.block, i, self.salt)) d.addCallback(lambda ignored: mw.put_encprivkey(self.encprivkey)) d.addCallback(lambda ignored: mw.put_blockhashes(self.block_hash_tree)) d.addCallback(lambda ignored: mw.put_sharehashes(self.share_hash_chain)) d.addCallback(lambda ignored: mw.put_root_hash(self.root_hash)) d.addCallback(lambda ignored: mw.put_signature(self.signature)) d.addCallback(lambda ignored: mw.put_verification_key(self.verification_key)) d.addCallback(lambda ignored: mw.finish_publishing()) mr = MDMFSlotReadProxy(self.storage_server, b"si1", 0) def _check_block_and_salt(block_and_salt): (block, salt) = block_and_salt self.assertThat(block, Equals(self.block)) self.assertThat(salt, Equals(self.salt)) for i in range(6): d.addCallback(lambda ignored, i=i: mr.get_block_and_salt(i)) d.addCallback(_check_block_and_salt) d.addCallback(lambda ignored: mr.get_encprivkey()) d.addCallback(lambda encprivkey: self.assertThat(self.encprivkey, Equals(encprivkey))) d.addCallback(lambda ignored: mr.get_blockhashes()) d.addCallback(lambda blockhashes: self.assertThat(self.block_hash_tree, Equals(blockhashes))) d.addCallback(lambda ignored: mr.get_sharehashes()) d.addCallback(lambda sharehashes: self.assertThat(self.share_hash_chain, Equals(sharehashes))) d.addCallback(lambda ignored: mr.get_signature()) d.addCallback(lambda signature: self.assertThat(signature, Equals(self.signature))) d.addCallback(lambda ignored: mr.get_verification_key()) d.addCallback(lambda verification_key: self.assertThat(verification_key, Equals(self.verification_key))) d.addCallback(lambda ignored: mr.get_seqnum()) d.addCallback(lambda seqnum: self.assertThat(seqnum, Equals(0))) d.addCallback(lambda ignored: mr.get_root_hash()) d.addCallback(lambda root_hash: self.assertThat(self.root_hash, Equals(root_hash))) d.addCallback(lambda ignored: mr.get_encoding_parameters()) def _check_encoding_parameters(args): (k, n, segsize, datalen) = args self.assertThat(k, Equals(3)) self.assertThat(n, Equals(10)) self.assertThat(segsize, Equals(6)) self.assertThat(datalen, Equals(36)) d.addCallback(_check_encoding_parameters) d.addCallback(lambda ignored: mr.get_checkstring()) d.addCallback(lambda checkstring: self.assertThat(checkstring, Equals(mw.get_checkstring()))) return d def test_is_sdmf(self): # The MDMFSlotReadProxy should also know how to read SDMF files, # since it will encounter them on the grid. Callers use the # is_sdmf method to test this. self.write_sdmf_share_to_server(b"si1") mr = MDMFSlotReadProxy(self.storage_server, b"si1", 0) d = mr.is_sdmf() d.addCallback(lambda issdmf: self.assertTrue(issdmf)) return d def test_reads_sdmf(self): # The slot read proxy should, naturally, know how to tell us # about data in the SDMF format self.write_sdmf_share_to_server(b"si1") mr = MDMFSlotReadProxy(self.storage_server, b"si1", 0) d = defer.succeed(None) d.addCallback(lambda ignored: mr.is_sdmf()) d.addCallback(lambda issdmf: self.assertTrue(issdmf)) # What do we need to read? # - The sharedata # - The salt d.addCallback(lambda ignored: mr.get_block_and_salt(0)) def _check_block_and_salt(results): block, salt = results # Our original file is 36 bytes long. Then each share is 12 # bytes in size. The share is composed entirely of the # letter a. self.block contains 2 as, so 6 * self.block is # what we are looking for. self.assertThat(block, Equals(self.block * 6)) self.assertThat(salt, Equals(self.salt)) d.addCallback(_check_block_and_salt) # - The blockhashes d.addCallback(lambda ignored: mr.get_blockhashes()) d.addCallback(lambda blockhashes: self.assertThat(self.block_hash_tree, Equals(blockhashes), blockhashes)) # - The sharehashes d.addCallback(lambda ignored: mr.get_sharehashes()) d.addCallback(lambda sharehashes: self.assertThat(self.share_hash_chain, Equals(sharehashes))) # - The keys d.addCallback(lambda ignored: mr.get_encprivkey()) d.addCallback(lambda encprivkey: self.assertThat(encprivkey, Equals(self.encprivkey), encprivkey)) d.addCallback(lambda ignored: mr.get_verification_key()) d.addCallback(lambda verification_key: self.assertThat(verification_key, Equals(self.verification_key), verification_key)) # - The signature d.addCallback(lambda ignored: mr.get_signature()) d.addCallback(lambda signature: self.assertThat(signature, Equals(self.signature), signature)) # - The sequence number d.addCallback(lambda ignored: mr.get_seqnum()) d.addCallback(lambda seqnum: self.assertThat(seqnum, Equals(0), seqnum)) # - The root hash d.addCallback(lambda ignored: mr.get_root_hash()) d.addCallback(lambda root_hash: self.assertThat(root_hash, Equals(self.root_hash), root_hash)) return d def test_only_reads_one_segment_sdmf(self): # SDMF shares have only one segment, so it doesn't make sense to # read more segments than that. The reader should know this and # complain if we try to do that. self.write_sdmf_share_to_server(b"si1") mr = MDMFSlotReadProxy(self.storage_server, b"si1", 0) d = defer.succeed(None) d.addCallback(lambda ignored: mr.is_sdmf()) d.addCallback(lambda issdmf: self.assertTrue(issdmf)) d.addCallback(lambda ignored: self.shouldFail(LayoutInvalid, "test bad segment", None, mr.get_block_and_salt, 1)) return d def test_read_with_prefetched_mdmf_data(self): # The MDMFSlotReadProxy will prefill certain fields if you pass # it data that you have already fetched. This is useful for # cases like the Servermap, which prefetches ~2kb of data while # finding out which shares are on the remote peer so that it # doesn't waste round trips. mdmf_data = self.build_test_mdmf_share() self.write_test_share_to_server(b"si1") def _make_mr(ignored, length): mr = MDMFSlotReadProxy(self.storage_server, b"si1", 0, mdmf_data[:length]) return mr d = defer.succeed(None) # This should be enough to fill in both the encoding parameters # and the table of offsets, which will complete the version # information tuple. d.addCallback(_make_mr, 123) d.addCallback(lambda mr: mr.get_verinfo()) def _check_verinfo(verinfo): self.assertTrue(verinfo) self.assertThat(verinfo, HasLength(9)) (seqnum, root_hash, salt_hash, segsize, datalen, k, n, prefix, offsets) = verinfo self.assertThat(seqnum, Equals(0)) self.assertThat(root_hash, Equals(self.root_hash)) self.assertThat(segsize, Equals(6)) self.assertThat(datalen, Equals(36)) self.assertThat(k, Equals(3)) self.assertThat(n, Equals(10)) expected_prefix = struct.pack(MDMFSIGNABLEHEADER, 1, seqnum, root_hash, k, n, segsize, datalen) self.assertThat(expected_prefix, Equals(prefix)) self.assertThat(self.rref.read_count, Equals(0)) d.addCallback(_check_verinfo) # This is not enough data to read a block and a share, so the # wrapper should attempt to read this from the remote server. d.addCallback(_make_mr, 123) d.addCallback(lambda mr: mr.get_block_and_salt(0)) def _check_block_and_salt(block_and_salt): (block, salt) = block_and_salt self.assertThat(block, Equals(self.block)) self.assertThat(salt, Equals(self.salt)) self.assertThat(self.rref.read_count, Equals(1)) # This should be enough data to read one block. d.addCallback(_make_mr, 123 + PRIVATE_KEY_SIZE + SIGNATURE_SIZE + VERIFICATION_KEY_SIZE + SHARE_HASH_CHAIN_SIZE + 140) d.addCallback(lambda mr: mr.get_block_and_salt(0)) d.addCallback(_check_block_and_salt) return d def test_read_with_prefetched_sdmf_data(self): sdmf_data = self.build_test_sdmf_share() self.write_sdmf_share_to_server(b"si1") def _make_mr(ignored, length): mr = MDMFSlotReadProxy(self.storage_server, b"si1", 0, sdmf_data[:length]) return mr d = defer.succeed(None) # This should be enough to get us the encoding parameters, # offset table, and everything else we need to build a verinfo # string. d.addCallback(_make_mr, 123) d.addCallback(lambda mr: mr.get_verinfo()) def _check_verinfo(verinfo): self.assertTrue(verinfo) self.assertThat(verinfo, HasLength(9)) (seqnum, root_hash, salt, segsize, datalen, k, n, prefix, offsets) = verinfo self.assertThat(seqnum, Equals(0)) self.assertThat(root_hash, Equals(self.root_hash)) self.assertThat(salt, Equals(self.salt)) self.assertThat(segsize, Equals(36)) self.assertThat(datalen, Equals(36)) self.assertThat(k, Equals(3)) self.assertThat(n, Equals(10)) expected_prefix = struct.pack(SIGNED_PREFIX, 0, seqnum, root_hash, salt, k, n, segsize, datalen) self.assertThat(expected_prefix, Equals(prefix)) self.assertThat(self.rref.read_count, Equals(0)) d.addCallback(_check_verinfo) # This shouldn't be enough to read any share data. d.addCallback(_make_mr, 123) d.addCallback(lambda mr: mr.get_block_and_salt(0)) def _check_block_and_salt(block_and_salt): (block, salt) = block_and_salt self.assertThat(block, Equals(self.block * 6)) self.assertThat(salt, Equals(self.salt)) # TODO: Fix the read routine so that it reads only the data # that it has cached if it can't read all of it. self.assertThat(self.rref.read_count, Equals(2)) # This should be enough to read share data. d.addCallback(_make_mr, self.offsets['share_data']) d.addCallback(lambda mr: mr.get_block_and_salt(0)) d.addCallback(_check_block_and_salt) return d def test_read_with_empty_mdmf_file(self): # Some tests upload a file with no contents to test things # unrelated to the actual handling of the content of the file. # The reader should behave intelligently in these cases. self.write_test_share_to_server(b"si1", empty=True) mr = MDMFSlotReadProxy(self.storage_server, b"si1", 0) # We should be able to get the encoding parameters, and they # should be correct. d = defer.succeed(None) d.addCallback(lambda ignored: mr.get_encoding_parameters()) def _check_encoding_parameters(params): self.assertThat(params, HasLength(4)) k, n, segsize, datalen = params self.assertThat(k, Equals(3)) self.assertThat(n, Equals(10)) self.assertThat(segsize, Equals(0)) self.assertThat(datalen, Equals(0)) d.addCallback(_check_encoding_parameters) # We should not be able to fetch a block, since there are no # blocks to fetch d.addCallback(lambda ignored: self.shouldFail(LayoutInvalid, "get block on empty file", None, mr.get_block_and_salt, 0)) return d def test_read_with_empty_sdmf_file(self): self.write_sdmf_share_to_server(b"si1", empty=True) mr = MDMFSlotReadProxy(self.storage_server, b"si1", 0) # We should be able to get the encoding parameters, and they # should be correct d = defer.succeed(None) d.addCallback(lambda ignored: mr.get_encoding_parameters()) def _check_encoding_parameters(params): self.assertThat(params, HasLength(4)) k, n, segsize, datalen = params self.assertThat(k, Equals(3)) self.assertThat(n, Equals(10)) self.assertThat(segsize, Equals(0)) self.assertThat(datalen, Equals(0)) d.addCallback(_check_encoding_parameters) # It does not make sense to get a block in this format, so we # should not be able to. d.addCallback(lambda ignored: self.shouldFail(LayoutInvalid, "get block on an empty file", None, mr.get_block_and_salt, 0)) return d def test_verinfo_with_sdmf_file(self): self.write_sdmf_share_to_server(b"si1") mr = MDMFSlotReadProxy(self.storage_server, b"si1", 0) # We should be able to get the version information. d = defer.succeed(None) d.addCallback(lambda ignored: mr.get_verinfo()) def _check_verinfo(verinfo): self.assertTrue(verinfo) self.assertThat(verinfo, HasLength(9)) (seqnum, root_hash, salt, segsize, datalen, k, n, prefix, offsets) = verinfo self.assertThat(seqnum, Equals(0)) self.assertThat(root_hash, Equals(self.root_hash)) self.assertThat(salt, Equals(self.salt)) self.assertThat(segsize, Equals(36)) self.assertThat(datalen, Equals(36)) self.assertThat(k, Equals(3)) self.assertThat(n, Equals(10)) expected_prefix = struct.pack(">BQ32s16s BBQQ", 0, seqnum, root_hash, salt, k, n, segsize, datalen) self.assertThat(prefix, Equals(expected_prefix)) self.assertThat(offsets, Equals(self.offsets)) d.addCallback(_check_verinfo) return d def test_verinfo_with_mdmf_file(self): self.write_test_share_to_server(b"si1") mr = MDMFSlotReadProxy(self.storage_server, b"si1", 0) d = defer.succeed(None) d.addCallback(lambda ignored: mr.get_verinfo()) def _check_verinfo(verinfo): self.assertTrue(verinfo) self.assertThat(verinfo, HasLength(9)) (seqnum, root_hash, IV, segsize, datalen, k, n, prefix, offsets) = verinfo self.assertThat(seqnum, Equals(0)) self.assertThat(root_hash, Equals(self.root_hash)) self.assertFalse(IV) self.assertThat(segsize, Equals(6)) self.assertThat(datalen, Equals(36)) self.assertThat(k, Equals(3)) self.assertThat(n, Equals(10)) expected_prefix = struct.pack(">BQ32s BBQQ", 1, seqnum, root_hash, k, n, segsize, datalen) self.assertThat(prefix, Equals(expected_prefix)) self.assertThat(offsets, Equals(self.offsets)) d.addCallback(_check_verinfo) return d def test_sdmf_writer(self): # Go through the motions of writing an SDMF share to the storage # server. Then read the storage server to see that the share got # written in the way that we think it should have. # We do this first so that the necessary instance variables get # set the way we want them for the tests below. data = self.build_test_sdmf_share() sdmfr = SDMFSlotWriteProxy(0, self.storage_server, b"si1", self.secrets, 0, 3, 10, 36, 36) # Put the block and salt. sdmfr.put_block(self.blockdata, 0, self.salt) # Put the encprivkey sdmfr.put_encprivkey(self.encprivkey) # Put the block and share hash chains sdmfr.put_blockhashes(self.block_hash_tree) sdmfr.put_sharehashes(self.share_hash_chain) sdmfr.put_root_hash(self.root_hash) # Put the signature sdmfr.put_signature(self.signature) # Put the verification key sdmfr.put_verification_key(self.verification_key) # Now check to make sure that nothing has been written yet. self.assertThat(self.rref.write_count, Equals(0)) # Now finish publishing d = sdmfr.finish_publishing() def _then(ignored): self.assertThat(self.rref.write_count, Equals(1)) read = self.ss.slot_readv self.assertThat(read(b"si1", [0], [(0, len(data))]), Equals({0: [data]})) d.addCallback(_then) return d def test_sdmf_writer_preexisting_share(self): data = self.build_test_sdmf_share() self.write_sdmf_share_to_server(b"si1") # Now there is a share on the storage server. To successfully # write, we need to set the checkstring correctly. When we # don't, no write should occur. sdmfw = SDMFSlotWriteProxy(0, self.storage_server, b"si1", self.secrets, 1, 3, 10, 36, 36) sdmfw.put_block(self.blockdata, 0, self.salt) # Put the encprivkey sdmfw.put_encprivkey(self.encprivkey) # Put the block and share hash chains sdmfw.put_blockhashes(self.block_hash_tree) sdmfw.put_sharehashes(self.share_hash_chain) # Put the root hash sdmfw.put_root_hash(self.root_hash) # Put the signature sdmfw.put_signature(self.signature) # Put the verification key sdmfw.put_verification_key(self.verification_key) # We shouldn't have a checkstring yet self.assertThat(sdmfw.get_checkstring(), Equals(b"")) d = sdmfw.finish_publishing() def _then(results): self.assertFalse(results[0]) # this is the correct checkstring self._expected_checkstring = results[1][0][0] return self._expected_checkstring d.addCallback(_then) d.addCallback(sdmfw.set_checkstring) d.addCallback(lambda ignored: sdmfw.get_checkstring()) d.addCallback(lambda checkstring: self.assertThat(checkstring, Equals(self._expected_checkstring))) d.addCallback(lambda ignored: sdmfw.finish_publishing()) def _then_again(results): self.assertTrue(results[0]) read = self.ss.slot_readv self.assertThat(read(b"si1", [0], [(1, 8)]), Equals({0: [struct.pack(">Q", 1)]})) self.assertThat(read(b"si1", [0], [(9, len(data) - 9)]), Equals({0: [data[9:]]})) d.addCallback(_then_again) return d class Stats(SyncTestCase): def setUp(self): super(Stats, self).setUp() self.sparent = LoggingServiceParent() self._lease_secret = itertools.count() self.addCleanup(self.sparent.stopService) def workdir(self, name): basedir = os.path.join("storage", "Server", name) return basedir def create(self, name): workdir = self.workdir(name) ss = StorageServer(workdir, b"\x00" * 20) ss.setServiceParent(self.sparent) return ss def test_latencies(self): ss = self.create("test_latencies") for i in range(10000): ss.add_latency("allocate", 1.0 * i) for i in range(1000): ss.add_latency("renew", 1.0 * i) for i in range(20): ss.add_latency("write", 1.0 * i) for i in range(10): ss.add_latency("cancel", 2.0 * i) ss.add_latency("get", 5.0) output = ss.get_latencies() self.assertThat(sorted(output.keys()), Equals(sorted(["allocate", "renew", "cancel", "write", "get"]))) self.assertThat(ss.latencies["allocate"], HasLength(1000)) self.assertTrue(abs(output["allocate"]["mean"] - 9500) < 1, output) self.assertTrue(abs(output["allocate"]["01_0_percentile"] - 9010) < 1, output) self.assertTrue(abs(output["allocate"]["10_0_percentile"] - 9100) < 1, output) self.assertTrue(abs(output["allocate"]["50_0_percentile"] - 9500) < 1, output) self.assertTrue(abs(output["allocate"]["90_0_percentile"] - 9900) < 1, output) self.assertTrue(abs(output["allocate"]["95_0_percentile"] - 9950) < 1, output) self.assertTrue(abs(output["allocate"]["99_0_percentile"] - 9990) < 1, output) self.assertTrue(abs(output["allocate"]["99_9_percentile"] - 9999) < 1, output) self.assertThat(ss.latencies["renew"], HasLength(1000)) self.assertTrue(abs(output["renew"]["mean"] - 500) < 1, output) self.assertTrue(abs(output["renew"]["01_0_percentile"] - 10) < 1, output) self.assertTrue(abs(output["renew"]["10_0_percentile"] - 100) < 1, output) self.assertTrue(abs(output["renew"]["50_0_percentile"] - 500) < 1, output) self.assertTrue(abs(output["renew"]["90_0_percentile"] - 900) < 1, output) self.assertTrue(abs(output["renew"]["95_0_percentile"] - 950) < 1, output) self.assertTrue(abs(output["renew"]["99_0_percentile"] - 990) < 1, output) self.assertTrue(abs(output["renew"]["99_9_percentile"] - 999) < 1, output) self.assertThat(ss.latencies["write"], HasLength(20)) self.assertTrue(abs(output["write"]["mean"] - 9) < 1, output) self.assertTrue(output["write"]["01_0_percentile"] is None, output) self.assertTrue(abs(output["write"]["10_0_percentile"] - 2) < 1, output) self.assertTrue(abs(output["write"]["50_0_percentile"] - 10) < 1, output) self.assertTrue(abs(output["write"]["90_0_percentile"] - 18) < 1, output) self.assertTrue(abs(output["write"]["95_0_percentile"] - 19) < 1, output) self.assertTrue(output["write"]["99_0_percentile"] is None, output) self.assertTrue(output["write"]["99_9_percentile"] is None, output) self.assertThat(ss.latencies["cancel"], HasLength(10)) self.assertTrue(abs(output["cancel"]["mean"] - 9) < 1, output) self.assertTrue(output["cancel"]["01_0_percentile"] is None, output) self.assertTrue(abs(output["cancel"]["10_0_percentile"] - 2) < 1, output) self.assertTrue(abs(output["cancel"]["50_0_percentile"] - 10) < 1, output) self.assertTrue(abs(output["cancel"]["90_0_percentile"] - 18) < 1, output) self.assertTrue(output["cancel"]["95_0_percentile"] is None, output) self.assertTrue(output["cancel"]["99_0_percentile"] is None, output) self.assertTrue(output["cancel"]["99_9_percentile"] is None, output) self.assertThat(ss.latencies["get"], HasLength(1)) self.assertTrue(output["get"]["mean"] is None, output) self.assertTrue(output["get"]["01_0_percentile"] is None, output) self.assertTrue(output["get"]["10_0_percentile"] is None, output) self.assertTrue(output["get"]["50_0_percentile"] is None, output) self.assertTrue(output["get"]["90_0_percentile"] is None, output) self.assertTrue(output["get"]["95_0_percentile"] is None, output) self.assertTrue(output["get"]["99_0_percentile"] is None, output) self.assertTrue(output["get"]["99_9_percentile"] is None, output) immutable_schemas = strategies.sampled_from(list(ALL_IMMUTABLE_SCHEMAS)) class ShareFileTests(SyncTestCase): """Tests for allmydata.storage.immutable.ShareFile.""" def get_sharefile(self, **kwargs): sf = ShareFile(self.mktemp(), max_size=1000, create=True, **kwargs) sf.write_share_data(0, b"abc") sf.write_share_data(2, b"DEF") # Should be b'abDEF' now. return sf @given(immutable_schemas) def test_read_write(self, schema): """Basic writes can be read.""" sf = self.get_sharefile(schema=schema) self.assertEqual(sf.read_share_data(0, 3), b"abD") self.assertEqual(sf.read_share_data(1, 4), b"bDEF") @given(immutable_schemas) def test_reads_beyond_file_end(self, schema): """Reads beyond the file size are truncated.""" sf = self.get_sharefile(schema=schema) self.assertEqual(sf.read_share_data(0, 10), b"abDEF") self.assertEqual(sf.read_share_data(5, 10), b"") @given(immutable_schemas) def test_too_large_write(self, schema): """Can't do write larger than file size.""" sf = self.get_sharefile(schema=schema) with self.assertRaises(DataTooLargeError): sf.write_share_data(0, b"x" * 3000) @given(immutable_schemas) def test_no_leases_cancelled(self, schema): """If no leases were cancelled, IndexError is raised.""" sf = self.get_sharefile(schema=schema) with self.assertRaises(IndexError): sf.cancel_lease(b"garbage") @given(immutable_schemas) def test_long_lease_count_format(self, schema): """ ``ShareFile.__init__`` raises ``ValueError`` if the lease count format given is longer than one character. """ with self.assertRaises(ValueError): self.get_sharefile(schema=schema, lease_count_format="BB") @given(immutable_schemas) def test_large_lease_count_format(self, schema): """ ``ShareFile.__init__`` raises ``ValueError`` if the lease count format encodes to a size larger than 8 bytes. """ with self.assertRaises(ValueError): self.get_sharefile(schema=schema, lease_count_format="Q") @given(immutable_schemas) def test_avoid_lease_overflow(self, schema): """ If the share file already has the maximum number of leases supported then ``ShareFile.add_lease`` raises ``struct.error`` and makes no changes to the share file contents. """ make_lease = partial( LeaseInfo, renew_secret=b"r" * 32, cancel_secret=b"c" * 32, expiration_time=2 ** 31, ) # Make it a little easier to reach the condition by limiting the # number of leases to only 255. sf = self.get_sharefile(schema=schema, lease_count_format="B") # Add the leases. for i in range(2 ** 8 - 1): lease = make_lease(owner_num=i) sf.add_lease(lease) # Capture the state of the share file at this point so we can # determine whether the next operation modifies it or not. with open(sf.home, "rb") as f: before_data = f.read() # It is not possible to add a 256th lease. lease = make_lease(owner_num=256) with self.assertRaises(struct.error): sf.add_lease(lease) # Compare the share file state to what we captured earlier. Any # change is a bug. with open(sf.home, "rb") as f: after_data = f.read() self.assertEqual(before_data, after_data) @given(immutable_schemas) def test_renew_secret(self, schema): """ A lease loaded from an immutable share file at any schema version can have its renew secret verified. """ renew_secret = b"r" * 32 cancel_secret = b"c" * 32 expiration_time = 2 ** 31 sf = self.get_sharefile(schema=schema) lease = LeaseInfo( owner_num=0, renew_secret=renew_secret, cancel_secret=cancel_secret, expiration_time=expiration_time, ) sf.add_lease(lease) (loaded_lease,) = sf.get_leases() self.assertTrue(loaded_lease.is_renew_secret(renew_secret)) @given(immutable_schemas) def test_cancel_secret(self, schema): """ A lease loaded from an immutable share file at any schema version can have its cancel secret verified. """ renew_secret = b"r" * 32 cancel_secret = b"c" * 32 expiration_time = 2 ** 31 sf = self.get_sharefile(schema=schema) lease = LeaseInfo( owner_num=0, renew_secret=renew_secret, cancel_secret=cancel_secret, expiration_time=expiration_time, ) sf.add_lease(lease) (loaded_lease,) = sf.get_leases() self.assertTrue(loaded_lease.is_cancel_secret(cancel_secret)) mutable_schemas = strategies.sampled_from(list(ALL_MUTABLE_SCHEMAS)) class MutableShareFileTests(SyncTestCase): """ Tests for allmydata.storage.mutable.MutableShareFile. """ def get_sharefile(self, **kwargs): return MutableShareFile(self.mktemp(), **kwargs) @given( schema=mutable_schemas, nodeid=strategies.just(b"x" * 20), write_enabler=strategies.just(b"y" * 32), datav=strategies.lists( # Limit the max size of these so we don't write *crazy* amounts of # data to disk. strategies.tuples(offsets(), strategies.binary(max_size=2 ** 8)), max_size=2 ** 8, ), new_length=offsets(), ) def test_readv_reads_share_data(self, schema, nodeid, write_enabler, datav, new_length): """ ``MutableShareFile.readv`` returns bytes from the share data portion of the share file. """ sf = self.get_sharefile(schema=schema) sf.create(my_nodeid=nodeid, write_enabler=write_enabler) sf.writev(datav=datav, new_length=new_length) # Apply all of the writes to a simple in-memory buffer so we can # resolve the final state of the share data. In particular, this # helps deal with overlapping writes which otherwise make it tricky to # figure out what data to expect to be able to read back. buf = BytesIO() for (offset, data) in datav: buf.seek(offset) buf.write(data) buf.truncate(new_length) # Using that buffer, determine the expected result of a readv for all # of the data just written. def read_from_buf(offset, length): buf.seek(offset) return buf.read(length) expected_data = list( read_from_buf(offset, len(data)) for (offset, data) in datav ) # Perform a read that gives back all of the data written to the share # file. read_vectors = list((offset, len(data)) for (offset, data) in datav) read_data = sf.readv(read_vectors) # Make sure the read reproduces the value we computed using our local # buffer. self.assertEqual(expected_data, read_data) @given( schema=mutable_schemas, nodeid=strategies.just(b"x" * 20), write_enabler=strategies.just(b"y" * 32), readv=strategies.lists(strategies.tuples(offsets(), lengths()), min_size=1), random=strategies.randoms(), ) def test_readv_rejects_negative_length(self, schema, nodeid, write_enabler, readv, random): """ If a negative length is given to ``MutableShareFile.readv`` in a read vector then ``AssertionError`` is raised. """ # Pick a read vector to break with a negative value readv_index = random.randrange(len(readv)) # Decide on whether we're breaking offset or length offset_or_length = random.randrange(2) # A helper function that will take a valid offset and length and break # one of them. def corrupt(break_length, offset, length): if break_length: # length must not be 0 or flipping the sign does nothing # length must not be negative or flipping the sign *fixes* it assert length > 0 return (offset, -length) else: if offset > 0: # We can break offset just by flipping the sign. return (-offset, length) else: # Otherwise it has to be zero. If it was negative, what's # going on? assert offset == 0 # Since we can't just flip the sign on 0 to break things, # replace a 0 offset with a simple negative value. All # other negative values will be tested by the `offset > 0` # case above. return (-1, length) # Break the read vector very slightly! broken_readv = readv[:] broken_readv[readv_index] = corrupt( offset_or_length, *broken_readv[readv_index] ) sf = self.get_sharefile(schema=schema) sf.create(my_nodeid=nodeid, write_enabler=write_enabler) # A read with a broken read vector is an error. with self.assertRaises(AssertionError): sf.readv(broken_readv) class LeaseInfoTests(SyncTestCase): """ Tests for ``allmydata.storage.lease.LeaseInfo``. """ def test_is_renew_secret(self): """ ``LeaseInfo.is_renew_secret`` returns ``True`` if the value given is the renew secret. """ renew_secret = b"r" * 32 cancel_secret = b"c" * 32 lease = LeaseInfo( owner_num=1, renew_secret=renew_secret, cancel_secret=cancel_secret, ) self.assertTrue(lease.is_renew_secret(renew_secret)) def test_is_not_renew_secret(self): """ ``LeaseInfo.is_renew_secret`` returns ``False`` if the value given is not the renew secret. """ renew_secret = b"r" * 32 cancel_secret = b"c" * 32 lease = LeaseInfo( owner_num=1, renew_secret=renew_secret, cancel_secret=cancel_secret, ) self.assertFalse(lease.is_renew_secret(cancel_secret)) def test_is_cancel_secret(self): """ ``LeaseInfo.is_cancel_secret`` returns ``True`` if the value given is the cancel secret. """ renew_secret = b"r" * 32 cancel_secret = b"c" * 32 lease = LeaseInfo( owner_num=1, renew_secret=renew_secret, cancel_secret=cancel_secret, ) self.assertTrue(lease.is_cancel_secret(cancel_secret)) def test_is_not_cancel_secret(self): """ ``LeaseInfo.is_cancel_secret`` returns ``False`` if the value given is not the cancel secret. """ renew_secret = b"r" * 32 cancel_secret = b"c" * 32 lease = LeaseInfo( owner_num=1, renew_secret=renew_secret, cancel_secret=cancel_secret, ) self.assertFalse(lease.is_cancel_secret(renew_secret)) @given( strategies.tuples( strategies.integers(min_value=0, max_value=2 ** 31 - 1), strategies.binary(min_size=32, max_size=32), strategies.binary(min_size=32, max_size=32), strategies.integers(min_value=0, max_value=2 ** 31 - 1), strategies.binary(min_size=20, max_size=20), ), ) def test_immutable_size(self, initializer_args): """ ``LeaseInfo.immutable_size`` returns the length of the result of ``LeaseInfo.to_immutable_data``. ``LeaseInfo.mutable_size`` returns the length of the result of ``LeaseInfo.to_mutable_data``. """ info = LeaseInfo(*initializer_args) self.expectThat( info.to_immutable_data(), HasLength(info.immutable_size()), ) self.expectThat( info.to_mutable_data(), HasLength(info.mutable_size()), ) class WriteBufferTests(SyncTestCase): """Tests for ``_WriteBuffer``.""" @given( small_writes=strategies.lists( strategies.binary(min_size=1, max_size=20), min_size=10, max_size=20), batch_size=strategies.integers(min_value=5, max_value=10) ) def test_write_buffer(self, small_writes: list[bytes], batch_size: int): """ ``_WriteBuffer`` coalesces small writes into bigger writes based on the batch size. """ wb = _WriteBuffer(batch_size) result = b"" for data in small_writes: should_flush = wb.queue_write(data) if should_flush: flushed_offset, flushed_data = wb.flush() self.assertEqual(flushed_offset, len(result)) # The flushed data is in batch sizes, or closest approximation # given queued inputs: self.assertTrue(batch_size <= len(flushed_data) < batch_size + len(data)) result += flushed_data # Final flush: remaining_length = wb.get_queued_bytes() flushed_offset, flushed_data = wb.flush() self.assertEqual(remaining_length, len(flushed_data)) self.assertEqual(flushed_offset, len(result)) result += flushed_data self.assertEqual(result, b"".join(small_writes)) tahoe_lafs-1.20.0/src/allmydata/test/test_storage_client.py0000644000000000000000000007327513615410400020766 0ustar00""" Tests for allmydata.storage_client. """ from __future__ import annotations from json import ( loads, ) import hashlib from typing import Union, Any, Optional from hyperlink import DecodedURL from fixtures import ( TempDir, ) from testtools.content import ( text_content, ) from testtools.matchers import ( MatchesAll, IsInstance, MatchesStructure, Equals, Is, AfterPreprocessing, ) from zope.interface import ( implementer, ) from zope.interface.verify import ( verifyObject, ) from hyperlink import ( URL, ) import attr from twisted.internet.interfaces import ( IStreamClientEndpoint, IProtocolFactory, ) from twisted.application.service import ( Service, ) from twisted.trial import unittest from twisted.internet.defer import ( Deferred, inlineCallbacks, ) from twisted.python.filepath import ( FilePath, ) from twisted.internet.task import Clock from foolscap.api import ( Tub, ) from foolscap.ipb import ( IConnectionHintHandler, ) from allmydata.util.deferredutil import MultiFailure from .no_network import LocalWrapper from .common import ( EMPTY_CLIENT_CONFIG, SyncTestCase, AsyncTestCase, UseTestPlugins, UseNode, SameProcessStreamEndpointAssigner, MemoryIntroducerClient, ) from .common_web import ( do_http, ) from .storage_plugin import ( DummyStorageClient, ) from allmydata.webish import ( WebishServer, ) from allmydata.util import base32, yamlutil from allmydata.storage_client import ( IFoolscapStorageServer, NativeStorageServer, HTTPNativeStorageServer, StorageFarmBroker, StorageClientConfig, MissingPlugin, _FoolscapStorage, _NullStorage, _pick_a_http_server, ANONYMOUS_STORAGE_NURLS, ) from ..storage.server import ( StorageServer, ) from ..client import config_from_string from allmydata.interfaces import ( IConnectionStatus, IStorageServer, ) SOME_FURL = "pb://abcde@nowhere/fake" class NativeStorageServerWithVersion(NativeStorageServer): # type: ignore # tahoe-lafs/ticket/3573 def __init__(self, version): # note: these instances won't work for anything other than # get_available_space() because we don't upcall self.version = version def get_version(self): return self.version class TestNativeStorageServer(unittest.TestCase): def test_get_available_space_new(self): nss = NativeStorageServerWithVersion( { b"http://allmydata.org/tahoe/protocols/storage/v1": { b"maximum-immutable-share-size": 111, b"available-space": 222, } }) self.failUnlessEqual(nss.get_available_space(), 222) def test_get_available_space_old(self): nss = NativeStorageServerWithVersion( { b"http://allmydata.org/tahoe/protocols/storage/v1": { b"maximum-immutable-share-size": 111, } }) self.failUnlessEqual(nss.get_available_space(), 111) def test_missing_nickname(self): ann = {"anonymous-storage-FURL": "pb://w2hqnbaa25yw4qgcvghl5psa3srpfgw3@tcp:127.0.0.1:51309/vucto2z4fxment3vfxbqecblbf6zyp6x", "permutation-seed-base32": "w2hqnbaa25yw4qgcvghl5psa3srpfgw3", } nss = NativeStorageServer(b"server_id", ann, None, {}, EMPTY_CLIENT_CONFIG) self.assertEqual(nss.get_nickname(), "") class GetConnectionStatus(unittest.TestCase): """ Tests for ``NativeStorageServer.get_connection_status``. """ def test_unrecognized_announcement(self): """ When ``NativeStorageServer`` is constructed with a storage announcement it doesn't recognize, its ``get_connection_status`` nevertheless returns an object which provides ``IConnectionStatus``. """ # Pretty hard to recognize anything from an empty announcement. ann = {} nss = NativeStorageServer(b"server_id", ann, Tub, {}, EMPTY_CLIENT_CONFIG) nss.start_connecting(lambda: None) connection_status = nss.get_connection_status() self.assertTrue(IConnectionStatus.providedBy(connection_status)) class UnrecognizedAnnouncement(unittest.TestCase): """ Tests for handling of announcements that aren't recognized and don't use *anonymous-storage-FURL*. Recognition failure is created by making up something completely novel for these tests. In real use, recognition failure would most likely come from an announcement generated by a storage server plugin which is not loaded in the client. """ plugin_name = u"tahoe-lafs-testing-v1" ann = { u"storage-options": [ { u"name": plugin_name, u"any-parameter": 12345, }, ], } server_id = b"abc" def _tub_maker(self, overrides): return Service() def native_storage_server(self, config: Optional[StorageClientConfig] = None) -> NativeStorageServer: """ Make a ``NativeStorageServer`` out of an unrecognizable announcement. """ return NativeStorageServer( self.server_id, self.ann, self._tub_maker, {}, node_config=EMPTY_CLIENT_CONFIG, config=config if config is not None else StorageClientConfig(), ) def test_no_exceptions(self): """ ``NativeStorageServer`` can be instantiated with an unrecognized announcement. """ self.native_storage_server() def test_start_connecting(self): """ ``NativeStorageServer.start_connecting`` does not raise an exception. """ server = self.native_storage_server() server.start_connecting(None) def test_stop_connecting(self): """ ``NativeStorageServer.stop_connecting`` does not raise an exception. """ server = self.native_storage_server() server.start_connecting(None) server.stop_connecting() def test_try_to_connect(self): """ ``NativeStorageServer.try_to_connect`` does not raise an exception. """ server = self.native_storage_server() server.start_connecting(None) server.try_to_connect() def test_various_data_methods(self): """ The data accessors of ``NativeStorageServer`` that depend on the announcement do not raise an exception. """ server = self.native_storage_server() server.get_permutation_seed() server.get_name() server.get_longname() server.get_tubid() server.get_lease_seed() server.get_foolscap_write_enabler_seed() server.get_nickname() def test_missing_plugin(self) -> None: """ An exception is produced if the plugin is missing """ with self.assertRaises(MissingPlugin): self.native_storage_server( StorageClientConfig( storage_plugins={ "missing-plugin-name": {} } ) ) class PluginMatchedAnnouncement(SyncTestCase): """ Tests for handling by ``NativeStorageServer`` of storage server announcements that are handled by an ``IFoolscapStoragePlugin``. """ @inlineCallbacks def make_node(self, introducer_furl, storage_plugin, plugin_config): """ Create a client node with the given configuration. :param bytes introducer_furl: The introducer furl with which to configure the client. :param bytes storage_plugin: The name of a storage plugin to enable. :param dict[bytes, bytes] plugin_config: Configuration to supply to the enabled plugin. May also be ``None`` for no configuration section (distinct from ``{}`` which creates an empty configuration section). """ tempdir = TempDir() self.useFixture(tempdir) self.basedir = FilePath(tempdir.path) self.basedir.child(u"private").makedirs() self.useFixture(UseTestPlugins()) self.node_fixture = self.useFixture(UseNode( plugin_config, storage_plugin, self.basedir, introducer_furl, )) self.config = self.node_fixture.config self.node = yield self.node_fixture.create_node() [self.introducer_client] = self.node.introducer_clients def publish(self, server_id, announcement, introducer_client): for subscription in introducer_client.subscribed_to: if subscription.service_name == u"storage": subscription.cb( server_id, announcement, *subscription.args, **subscription.kwargs ) def get_storage(self, server_id, node): storage_broker = node.get_storage_broker() native_storage_server = storage_broker.servers[server_id] return native_storage_server._storage def set_rref(self, server_id, node, rref): storage_broker = node.get_storage_broker() native_storage_server = storage_broker.servers[server_id] native_storage_server._rref = rref @inlineCallbacks def test_ignored_non_enabled_plugin(self): """ An announcement that could be matched by a plugin that is not enabled is not matched. """ yield self.make_node( introducer_furl=SOME_FURL, storage_plugin="tahoe-lafs-dummy-v1", plugin_config=None, ) server_id = b"v0-abcdef" ann = { u"service-name": u"storage", u"storage-options": [{ # notice how the announcement is for a different storage plugin # than the one that is enabled. u"name": u"tahoe-lafs-dummy-v2", u"storage-server-FURL": SOME_FURL, }], } self.publish(server_id, ann, self.introducer_client) storage = self.get_storage(server_id, self.node) self.assertIsInstance(storage, _NullStorage) @inlineCallbacks def test_enabled_plugin(self): """ An announcement that could be matched by a plugin that is enabled with configuration is matched and the plugin's storage client is used. """ plugin_config = { "abc": "xyz", } plugin_name = "tahoe-lafs-dummy-v1" yield self.make_node( introducer_furl=SOME_FURL, storage_plugin=plugin_name, plugin_config=plugin_config, ) server_id = b"v0-abcdef" ann = { u"service-name": u"storage", u"storage-options": [{ # and this announcement is for a plugin with a matching name u"name": plugin_name, u"storage-server-FURL": SOME_FURL, }], } self.publish(server_id, ann, self.introducer_client) storage = self.get_storage(server_id, self.node) self.assertTrue( verifyObject( IFoolscapStorageServer, storage, ), ) expected_rref = object() # Can't easily establish a real Foolscap connection so fake the result # of doing so... self.set_rref(server_id, self.node, expected_rref) self.expectThat( storage.storage_server, MatchesAll( IsInstance(DummyStorageClient), MatchesStructure( get_rref=AfterPreprocessing( lambda get_rref: get_rref(), Is(expected_rref), ), configuration=Equals(plugin_config), announcement=Equals({ u'name': plugin_name, u'storage-server-FURL': u'pb://abcde@nowhere/fake', }), ), ), ) @inlineCallbacks def test_enabled_no_configuration_plugin(self): """ An announcement that could be matched by a plugin that is enabled with no configuration is matched and the plugin's storage client is used. """ plugin_name = "tahoe-lafs-dummy-v1" yield self.make_node( introducer_furl=SOME_FURL, storage_plugin=plugin_name, plugin_config=None, ) server_id = b"v0-abcdef" ann = { u"service-name": u"storage", u"storage-options": [{ # and this announcement is for a plugin with a matching name u"name": plugin_name, u"storage-server-FURL": SOME_FURL, }], } self.publish(server_id, ann, self.introducer_client) storage = self.get_storage(server_id, self.node) self.addDetail("storage", text_content(str(storage))) self.expectThat( storage.storage_server, MatchesAll( IsInstance(DummyStorageClient), MatchesStructure( configuration=Equals({}), ), ), ) class FoolscapStorageServers(unittest.TestCase): """ Tests for implementations of ``IFoolscapStorageServer``. """ def test_null_provider(self): """ Instances of ``_NullStorage`` provide ``IFoolscapStorageServer``. """ self.assertTrue( verifyObject( IFoolscapStorageServer, _NullStorage(), ), ) def test_foolscap_provider(self): """ Instances of ``_FoolscapStorage`` provide ``IFoolscapStorageServer``. """ @implementer(IStorageServer) class NotStorageServer(object): pass self.assertTrue( verifyObject( IFoolscapStorageServer, _FoolscapStorage.from_announcement( b"server-id", SOME_FURL, {u"permutation-seed-base32": base32.b2a(b"permutationseed")}, NotStorageServer(), ), ), ) class StoragePluginWebPresence(AsyncTestCase): """ Tests for the web resources ``IFoolscapStorageServer`` plugins may expose. """ @inlineCallbacks def setUp(self): super(StoragePluginWebPresence, self).setUp() self.useFixture(UseTestPlugins()) self.port_assigner = SameProcessStreamEndpointAssigner() self.port_assigner.setUp() self.addCleanup(self.port_assigner.tearDown) self.storage_plugin = u"tahoe-lafs-dummy-v1" from twisted.internet import reactor _, webport_endpoint = self.port_assigner.assign(reactor) tubport_location, tubport_endpoint = self.port_assigner.assign(reactor) tempdir = TempDir() self.useFixture(tempdir) self.basedir = FilePath(tempdir.path) self.basedir.child(u"private").makedirs() self.node_fixture = self.useFixture(UseNode( plugin_config={ "web": "1", }, node_config={ # We don't really need the main Tub listening but if we # disable it then we also have to disable storage (because # config validation policy). "tub.port": tubport_endpoint, "tub.location": tubport_location, "web.port": str(webport_endpoint), }, storage_plugin=self.storage_plugin, basedir=self.basedir, introducer_furl=SOME_FURL, )) self.node = yield self.node_fixture.create_node() self.webish = self.node.getServiceNamed(WebishServer.name) self.node.startService() self.addCleanup(self.node.stopService) self.port = self.webish.getPortnum() @inlineCallbacks def test_plugin_resource_path(self): """ The plugin's resource is published at */storage-plugins/*. """ url = u"http://127.0.0.1:{port}/storage-plugins/{plugin_name}".format( port=self.port, plugin_name=self.storage_plugin, ).encode("utf-8") result = yield do_http("get", url) self.assertThat(loads(result), Equals({"web": "1"})) @inlineCallbacks def test_plugin_resource_persistent_across_requests(self): """ The plugin's resource is loaded and then saved and re-used for future requests. """ url = URL( scheme=u"http", host=u"127.0.0.1", port=self.port, path=( u"storage-plugins", self.storage_plugin, u"counter", ), ).to_text().encode("utf-8") values = { loads((yield do_http("get", url)))[u"value"], loads((yield do_http("get", url)))[u"value"], } self.assertThat( values, # If the counter manages to go up then the state stuck around. Equals({1, 2}), ) _aCertPEM = Tub().myCertificate.dumpPEM() def new_tub(): """ Make a new ``Tub`` with a hard-coded private key. """ # Use a private key / certificate generated by Tub how it wants. But just # re-use the same one every time so we don't waste a lot of time # generating them over and over in the tests. return Tub(certData=_aCertPEM) def make_broker(tub_maker=None): """ Create a ``StorageFarmBroker`` with the given tub maker and an empty client configuration. """ if tub_maker is None: tub_maker = lambda handler_overrides: new_tub() return StorageFarmBroker(True, tub_maker, EMPTY_CLIENT_CONFIG) @implementer(IStreamClientEndpoint) @attr.s class SpyEndpoint(object): """ Observe and record connection attempts. :ivar list _append: A callable that accepts two-tuples. For each attempted connection, it will be called with ``Deferred`` that was returned and the ``Factory`` that was passed in. """ _append = attr.ib() def connect(self, factory): """ Record the connection attempt. :return: A ``Deferred`` that ``SpyEndpoint`` will not fire. """ d = Deferred() self._append((d, factory)) return d @implementer(IConnectionHintHandler) # type: ignore # warner/foolscap#78 @attr.s class SpyHandler(object): """ A Foolscap connection hint handler for the "spy" hint type. Connections are handled by just observing and recording them. :ivar list _connects: A list containing one element for each connection attempted with this handler. Each element is a two-tuple of the ``Deferred`` that was returned from ``connect`` and the factory that was passed to ``connect``. """ _connects : list[tuple[Deferred[object], IProtocolFactory]]= attr.ib(default=attr.Factory(list)) def hint_to_endpoint(self, hint, reactor, update_status): return (SpyEndpoint(self._connects.append), hint) class TestStorageFarmBroker(unittest.TestCase): def test_static_servers(self): broker = make_broker() key_s = b'v0-1234-1' servers_yaml = """\ storage: v0-1234-1: ann: anonymous-storage-FURL: {furl} permutation-seed-base32: aaaaaaaaaaaaaaaaaaaaaaaa """.format(furl=SOME_FURL) servers = yamlutil.safe_load(servers_yaml) permseed = base32.a2b(b"aaaaaaaaaaaaaaaaaaaaaaaa") broker.set_static_servers(servers["storage"]) self.failUnlessEqual(len(broker._static_server_ids), 1) s = broker.servers[key_s] self.failUnlessEqual(s.announcement, servers["storage"]["v0-1234-1"]["ann"]) self.failUnlessEqual(s.get_serverid(), key_s) self.assertEqual(s.get_permutation_seed(), permseed) # if the Introducer announces the same thing, we're supposed to # ignore it ann2 = { "service-name": "storage", "anonymous-storage-FURL": "pb://{}@nowhere/fake2".format(str(base32.b2a(b"1"), "utf-8")), "permutation-seed-base32": "bbbbbbbbbbbbbbbbbbbbbbbb", } broker._got_announcement(key_s, ann2) s2 = broker.servers[key_s] self.assertIdentical(s2, s) self.assertEqual(s2.get_permutation_seed(), permseed) def test_upgrade_from_foolscap_to_http(self): """ When an announcement is initially Foolscap but then switches to HTTP, HTTP is used, assuming HTTP is enabled. """ tub_maker = lambda _: new_tub() config = config_from_string( "/dev/null", "", "[client]\nforce_foolscap = False\n" ) broker = StorageFarmBroker(True, tub_maker, config) broker.startService() self.addCleanup(broker.stopService) key_s = b'v0-1234-1' ones = str(base32.b2a(b"1"), "utf-8") initial_announcement = { "service-name": "storage", "anonymous-storage-FURL": f"pb://{ones}@nowhere/fake2", "permutation-seed-base32": "bbbbbbbbbbbbbbbbbbbbbbbb", } broker._got_announcement(key_s, initial_announcement) initial_service = broker.servers[key_s] self.assertIsInstance(initial_service, NativeStorageServer) self.assertTrue(initial_service.running) self.assertIdentical(initial_service.parent, broker) http_announcement = initial_announcement.copy() http_announcement[ANONYMOUS_STORAGE_NURLS] = {f"pb://{ones}@nowhere/fake2#v=1"} broker._got_announcement(key_s, http_announcement) self.assertFalse(initial_service.running) self.assertEqual(initial_service.parent, None) new_service = broker.servers[key_s] self.assertIsInstance(new_service, HTTPNativeStorageServer) self.assertTrue(new_service.running) self.assertIdentical(new_service.parent, broker) def test_static_permutation_seed_pubkey(self): broker = make_broker() server_id = b"v0-4uazse3xb6uu5qpkb7tel2bm6bpea4jhuigdhqcuvvse7hugtsia" k = b"4uazse3xb6uu5qpkb7tel2bm6bpea4jhuigdhqcuvvse7hugtsia" ann = { "anonymous-storage-FURL": SOME_FURL, } broker.set_static_servers({server_id.decode("ascii"): {"ann": ann}}) s = broker.servers[server_id] self.assertEqual(s.get_permutation_seed(), base32.a2b(k)) def test_static_permutation_seed_explicit(self): broker = make_broker() server_id = b"v0-4uazse3xb6uu5qpkb7tel2bm6bpea4jhuigdhqcuvvse7hugtsia" k = b"w5gl5igiexhwmftwzhai5jy2jixn7yx7" ann = { "anonymous-storage-FURL": SOME_FURL, "permutation-seed-base32": k, } broker.set_static_servers({server_id.decode("ascii"): {"ann": ann}}) s = broker.servers[server_id] self.assertEqual(s.get_permutation_seed(), base32.a2b(k)) def test_static_permutation_seed_hashed(self): broker = make_broker() server_id = b"unparseable" ann = { "anonymous-storage-FURL": SOME_FURL, } broker.set_static_servers({server_id.decode("ascii"): {"ann": ann}}) s = broker.servers[server_id] self.assertEqual(s.get_permutation_seed(), hashlib.sha256(server_id).digest()) @inlineCallbacks def test_threshold_reached(self): """ ``StorageFarmBroker.when_connected_enough`` returns a ``Deferred`` which only fires after the ``StorageFarmBroker`` has established at least as many connections as requested. """ introducer = MemoryIntroducerClient( new_tub(), SOME_FURL, b"", None, None, None, None, ) new_tubs = [] def make_tub(*args, **kwargs): return new_tubs.pop() broker = make_broker(make_tub) # Start the broker so that it will start Tubs attached to it so they # will attempt to make connections as necessary so that we can observe # those connections. broker.startService() self.addCleanup(broker.stopService) done = broker.when_connected_enough(5) broker.use_introducer(introducer) # subscribes to "storage" to learn of new storage nodes [subscribe] = introducer.subscribed_to self.assertEqual( subscribe.service_name, "storage", ) got_announcement = subscribe.cb data = { "service-name": "storage", "anonymous-storage-FURL": None, "permutation-seed-base32": "aaaaaaaaaaaaaaaaaaaaaaaa", } def add_one_server(x): data["anonymous-storage-FURL"] = "pb://%s@spy:nowhere/fake" % (str(base32.b2a(b"%d" % x), "ascii"),) tub = new_tub() connects = [] spy = SpyHandler(connects) tub.addConnectionHintHandler("spy", spy) new_tubs.append(tub) got_announcement(b'v0-1234-%d' % x, data) self.assertEqual( 1, len(connects), "Expected one connection attempt, got {!r} instead".format(connects), ) # Skip over all the Foolscap negotiation. It's complex with lots # of pieces and I don't want to figure out how to fake # it. -exarkun native = broker.servers[b"v0-1234-%d" % (x,)] rref = LocalWrapper(StorageServer(self.mktemp(), b"x" * 20)) native._got_connection(rref) # first 4 shouldn't trigger connected_threashold for x in range(4): add_one_server(x) self.assertFalse(done.called) # ...but the 5th *should* trigger the threshold add_one_server(42) # so: the OneShotObserverList only notifies via # foolscap.eventually() -- which forces the Deferred call # through the reactor -- so it's no longer synchronous, # meaning that we have to do "real reactor stuff" for the # Deferred from when_connected_enough() to actually fire. (or # @patch() out the reactor in foolscap.eventually to be a # Clock() so we can advance time ourselves, but ... luckily # eventually() uses 0 as the timeout currently) yield done self.assertTrue(done.called) def test_should_we_use_http_default(self): """Default is to use HTTP.""" basedir = self.mktemp() node_config = config_from_string(basedir, "", "") announcement = {ANONYMOUS_STORAGE_NURLS: ["pb://..."]} self.assertTrue( StorageFarmBroker._should_we_use_http(node_config, announcement) ) # Lacking NURLs, we can't use HTTP: self.assertFalse( StorageFarmBroker._should_we_use_http(node_config, {}) ) def test_should_we_use_http(self): """ If HTTP is allowed, it will only be used if the announcement includes some NURLs. """ basedir = self.mktemp() no_nurls = {} empty_nurls = {ANONYMOUS_STORAGE_NURLS: []} has_nurls = {ANONYMOUS_STORAGE_NURLS: ["pb://.."]} for force_foolscap, announcement, expected_http_usage in [ ("false", no_nurls, False), ("false", empty_nurls, False), ("false", has_nurls, True), ("true", empty_nurls, False), ("true", no_nurls, False), ("true", has_nurls, False), ]: node_config = config_from_string( basedir, "", f"[client]\nforce_foolscap = {force_foolscap}" ) self.assertEqual( StorageFarmBroker._should_we_use_http(node_config, announcement), expected_http_usage ) class PickHTTPServerTests(unittest.SynchronousTestCase): """Tests for ``_pick_a_http_server``.""" def pick_result(self, url_to_results: dict[DecodedURL, tuple[float, Union[Exception, Any]]]) -> Deferred[DecodedURL]: """ Given mapping of URLs to (delay, result), return the URL of the first selected server, or None. """ clock = Clock() def request(reactor, url): delay, value = url_to_results[url] result = Deferred() def add_result_value(): if isinstance(value, Exception): result.errback(value) else: result.callback(value) reactor.callLater(delay, add_result_value) return result d = _pick_a_http_server(clock, list(url_to_results.keys()), request) for i in range(100): clock.advance(0.1) return d def test_first_successful_connect_is_picked(self): """ Given multiple good URLs, the first one that connects is chosen. """ earliest_url = DecodedURL.from_text("http://a") latest_url = DecodedURL.from_text("http://b") bad_url = DecodedURL.from_text("http://bad") result = self.pick_result({ latest_url: (2, None), earliest_url: (1, None), bad_url: (0.5, RuntimeError()), }) self.assertEqual(self.successResultOf(result), earliest_url) def test_failures_include_all_reasons(self): """ If all the requests fail, ``_pick_a_http_server`` raises a ``allmydata.util.deferredutil.MultiFailure``. """ eventually_good_url = DecodedURL.from_text("http://good") bad_url = DecodedURL.from_text("http://bad") exception1 = RuntimeError() exception2 = ZeroDivisionError() result = self.pick_result({ eventually_good_url: (1, exception1), bad_url: (0.1, exception2), }) exc = self.failureResultOf(result).value self.assertIsInstance(exc, MultiFailure) self.assertEqual({f.value for f in exc.failures}, {exception2, exception1}) tahoe_lafs-1.20.0/src/allmydata/test/test_storage_http.py0000644000000000000000000017504413615410400020464 0ustar00""" Tests for HTTP storage client + server. The tests here are synchronous and don't involve running a real reactor. This works, but has some caveats when it comes to testing HTTP endpoints: * Some HTTP endpoints are synchronous, some are not. * For synchronous endpoints, the result is immediately available on the ``Deferred`` coming out of ``StubTreq``. * For asynchronous endpoints, you need to use ``StubTreq.flush()`` and iterate the fake in-memory clock/reactor to advance time . So for HTTP endpoints, you should use ``HttpTestFixture.result_of_with_flush()`` which handles both, and patches and moves forward the global Twisted ``Cooperator`` since that is used to drive pull producers. This is, sadly, an internal implementation detail of Twisted being leaked to tests... For definitely synchronous calls, you can just use ``result_of()``. """ import time from base64 import b64encode from contextlib import contextmanager from os import urandom from typing import Union, Callable, Tuple, Iterable from queue import Queue from pycddl import ValidationError as CDDLValidationError from hypothesis import assume, given, strategies as st, settings as hypothesis_settings from fixtures import Fixture, TempDir, MonkeyPatch from treq.client import HTTPClient from treq.testing import StubTreq, RequestTraversalAgent from klein import Klein from hyperlink import DecodedURL from collections_extended import RangeMap from twisted.internet.task import Clock, Cooperator from twisted.internet.interfaces import IReactorTime, IReactorFromThreads from twisted.internet.defer import CancelledError, Deferred, ensureDeferred from twisted.web import http from twisted.web.http_headers import Headers from werkzeug import routing from werkzeug.exceptions import NotFound as WNotFound from testtools.matchers import Equals from zope.interface import implementer from ..util.cbor import dumps from ..util.deferredutil import async_to_deferred from ..util.cputhreadpool import disable_thread_pool_for_test from .common import SyncTestCase from ..storage.http_common import ( get_content_type, CBOR_MIME_TYPE, response_is_not_html, ) from ..storage.common import si_b2a from ..storage.lease import LeaseInfo from ..storage.server import StorageServer from ..storage.http_server import ( HTTPServer, _extract_secrets, Secrets, ClientSecretsException, _authorized_route, StorageIndexConverter, _add_error_handling, read_encoded, _SCHEMAS as SERVER_SCHEMAS, BaseApp, ) from ..storage.http_client import ( StorageClient, StorageClientFactory, ClientException, StorageClientImmutables, ImmutableCreateResult, UploadProgress, StorageClientGeneral, _encode_si, StorageClientMutables, TestWriteVectors, WriteVector, ReadVector, ReadTestWriteResult, TestVector, limited_content, ) class HTTPUtilities(SyncTestCase): """Tests for HTTP common utilities.""" def test_get_content_type(self): """``get_content_type()`` extracts the content-type from the header.""" def assert_header_values_result(values, expected_content_type): headers = Headers() if values: headers.setRawHeaders("Content-Type", values) content_type = get_content_type(headers) self.assertEqual(content_type, expected_content_type) assert_header_values_result(["text/html"], "text/html") assert_header_values_result([], None) assert_header_values_result(["text/plain", "application/json"], "text/plain") assert_header_values_result(["text/html;encoding=utf-8"], "text/html") def _post_process(params): secret_types, secrets = params secrets = {t: s for (t, s) in zip(secret_types, secrets)} headers = [ "{} {}".format( secret_type.value, str(b64encode(secrets[secret_type]), "ascii").strip() ) for secret_type in secret_types ] return secrets, headers # Creates a tuple of ({Secret enum value: secret_bytes}, [http headers with secrets]). SECRETS_STRATEGY = ( st.sets(st.sampled_from(Secrets)) .flatmap( lambda secret_types: st.tuples( st.just(secret_types), st.lists( st.binary(min_size=32, max_size=32), min_size=len(secret_types), max_size=len(secret_types), ), ) ) .map(_post_process) ) class ExtractSecretsTests(SyncTestCase): """ Tests for ``_extract_secrets``. """ @given(secrets_to_send=SECRETS_STRATEGY) def test_extract_secrets(self, secrets_to_send): """ ``_extract_secrets()`` returns a dictionary with the extracted secrets if the input secrets match the required secrets. """ secrets, headers = secrets_to_send # No secrets needed, none given: self.assertEqual(_extract_secrets(headers, secrets.keys()), secrets) @given( secrets_to_send=SECRETS_STRATEGY, secrets_to_require=st.sets(st.sampled_from(Secrets)), ) def test_wrong_number_of_secrets(self, secrets_to_send, secrets_to_require): """ If the wrong number of secrets are passed to ``_extract_secrets``, a ``ClientSecretsException`` is raised. """ secrets_to_send, headers = secrets_to_send assume(secrets_to_send.keys() != secrets_to_require) with self.assertRaises(ClientSecretsException): _extract_secrets(headers, secrets_to_require) def test_bad_secret_missing_value(self): """ Missing value in ``_extract_secrets`` result in ``ClientSecretsException``. """ with self.assertRaises(ClientSecretsException): _extract_secrets(["lease-renew-secret"], {Secrets.LEASE_RENEW}) def test_bad_secret_unknown_prefix(self): """ Missing value in ``_extract_secrets`` result in ``ClientSecretsException``. """ with self.assertRaises(ClientSecretsException): _extract_secrets(["FOO eA=="], set()) def test_bad_secret_not_base64(self): """ A non-base64 value in ``_extract_secrets`` result in ``ClientSecretsException``. """ with self.assertRaises(ClientSecretsException): _extract_secrets(["lease-renew-secret x"], {Secrets.LEASE_RENEW}) def test_bad_secret_wrong_length_lease_renew(self): """ Lease renewal secrets must be 32-bytes long. """ with self.assertRaises(ClientSecretsException): _extract_secrets(["lease-renew-secret eA=="], {Secrets.LEASE_RENEW}) def test_bad_secret_wrong_length_lease_cancel(self): """ Lease cancel secrets must be 32-bytes long. """ with self.assertRaises(ClientSecretsException): _extract_secrets(["lease-cancel-secret eA=="], {Secrets.LEASE_RENEW}) class RouteConverterTests(SyncTestCase): """Tests for custom werkzeug path segment converters.""" adapter = routing.Map( [ routing.Rule( "//", endpoint="si", methods=["GET"] ) ], converters={"storage_index": StorageIndexConverter}, ).bind("example.com", "/") @given(storage_index=st.binary(min_size=16, max_size=16)) def test_good_storage_index_is_parsed(self, storage_index): """ A valid storage index is accepted and parsed back out by StorageIndexConverter. """ self.assertEqual( self.adapter.match( "/{}/".format(str(si_b2a(storage_index), "ascii")), method="GET" ), ("si", {"storage_index": storage_index}), ) def test_long_storage_index_is_not_parsed(self): """An overly long storage_index string is not parsed.""" with self.assertRaises(WNotFound): self.adapter.match("/{}/".format("a" * 27), method="GET") def test_short_storage_index_is_not_parsed(self): """An overly short storage_index string is not parsed.""" with self.assertRaises(WNotFound): self.adapter.match("/{}/".format("a" * 25), method="GET") def test_bad_characters_storage_index_is_not_parsed(self): """A storage_index string with bad characters is not parsed.""" with self.assertRaises(WNotFound): self.adapter.match("/{}_/".format("a" * 25), method="GET") def test_invalid_storage_index_is_not_parsed(self): """An invalid storage_index string is not parsed.""" with self.assertRaises(WNotFound): self.adapter.match("/nomd2a65ylxjbqzsw7gcfh4ivr/", method="GET") # TODO should be actual swissnum SWISSNUM_FOR_TEST = b"abcd" def gen_bytes(length: int) -> bytes: """Generate bytes to the given length.""" result = (b"0123456789abcdef" * ((length // 16) + 1))[:length] assert len(result) == length return result class TestApp(BaseApp): """HTTP API for testing purposes.""" clock: IReactorTime _app = Klein() _add_error_handling(_app) _swissnum = SWISSNUM_FOR_TEST # Match what the test client is using @_authorized_route(_app, set(), "/noop", methods=["GET"]) def noop(self, request, authorization): return "noop" @_authorized_route(_app, {Secrets.UPLOAD}, "/upload_secret", methods=["GET"]) def validate_upload_secret(self, request, authorization): if authorization == {Secrets.UPLOAD: b"MAGIC"}: return "GOOD SECRET" else: return "BAD: {}".format(authorization) @_authorized_route(_app, set(), "/storage/v1/version", methods=["GET"]) def bad_version(self, request, authorization): """Return version result that violates the expected schema.""" request.setHeader("content-type", CBOR_MIME_TYPE) return dumps({"garbage": 123}) @_authorized_route(_app, set(), "/bytes/", methods=["GET"]) def generate_bytes(self, request, authorization, length): """Return bytes to the given length using ``gen_bytes()``.""" return gen_bytes(length) @_authorized_route(_app, set(), "/slowly_never_finish_result", methods=["GET"]) def slowly_never_finish_result(self, request, authorization): """ Send data immediately, after 59 seconds, after another 59 seconds, and then never again, without finishing the response. """ request.write(b"a") self.clock.callLater(59, request.write, b"b") self.clock.callLater(59 + 59, request.write, b"c") return Deferred() @_authorized_route(_app, set(), "/die_unfinished", methods=["GET"]) def die(self, request, authorization): """ Dies half-way. """ request.transport.loseConnection() return Deferred() @_authorized_route(_app, set(), "/read_body", methods=["POST"]) @async_to_deferred async def read_body(self, request, authorization): """ Accept an advise_corrupt_share message, return the reason. I.e. exercise codepaths used for reading CBOR from the body. """ data = await read_encoded( self.clock, request, SERVER_SCHEMAS["advise_corrupt_share"] ) return data["reason"] def result_of(d): """ Synchronously extract the result of a Deferred. """ result = [] error = [] d.addCallbacks(result.append, error.append) if result: return result[0] if error: error[0].raiseException() raise RuntimeError( "We expected given Deferred to have result already, but it wasn't. " + "This is probably a test design issue." ) class CustomHTTPServerTests(SyncTestCase): """ Tests that use a custom HTTP server. """ def setUp(self): super(CustomHTTPServerTests, self).setUp() disable_thread_pool_for_test(self) StorageClientFactory.start_test_mode( lambda pool: self.addCleanup(pool.closeCachedConnections) ) self.addCleanup(StorageClientFactory.stop_test_mode) # Could be a fixture, but will only be used in this test class so not # going to bother: self._http_server = TestApp() treq = StubTreq(self._http_server._app.resource()) self.client = StorageClient( DecodedURL.from_text("http://127.0.0.1"), SWISSNUM_FOR_TEST, treq=treq, pool=None, # We're using a Treq private API to get the reactor, alas, but only # in a test, so not going to worry about it too much. This would be # fixed if https://github.com/twisted/treq/issues/226 were ever # fixed. clock=treq._agent._memoryReactor, analyze_response=response_is_not_html, ) self._http_server.clock = self.client._clock def test_bad_swissnum_from_client(self) -> None: """ If the swissnum is invalid, a BAD REQUEST response code is returned. """ headers = Headers() # The value is not UTF-8. headers.addRawHeader("Authorization", b"\x00\xFF\x00\xFF") response = result_of( self.client._treq.request( "GET", DecodedURL.from_text("http://127.0.0.1/noop"), headers=headers, ) ) self.assertEqual(response.code, 400) def test_bad_secret(self) -> None: """ If the secret is invalid (not base64), a BAD REQUEST response code is returned. """ bad_secret = b"upload-secret []<>" headers = Headers() headers.addRawHeader( "X-Tahoe-Authorization", bad_secret, ) response = result_of( self.client.request( "GET", DecodedURL.from_text("http://127.0.0.1/upload_secret"), headers=headers, ) ) self.assertEqual(response.code, 400) def test_authorization_enforcement(self): """ The requirement for secrets is enforced by the ``_authorized_route`` decorator; if they are not given, a 400 response code is returned. Note that this refers to ``X-Tahoe-Authorization``, not the ``Authorization`` header used for the swissnum. """ # Without secret, get a 400 error. response = result_of( self.client.request( "GET", DecodedURL.from_text("http://127.0.0.1/upload_secret"), ) ) self.assertEqual(response.code, 400) # With secret, we're good. response = result_of( self.client.request( "GET", DecodedURL.from_text("http://127.0.0.1/upload_secret"), upload_secret=b"MAGIC", ) ) self.assertEqual(response.code, 200) self.assertEqual(result_of(response.content()), b"GOOD SECRET") def test_client_side_schema_validation(self): """ The client validates returned CBOR message against a schema. """ client = StorageClientGeneral(self.client) with self.assertRaises(CDDLValidationError): result_of(client.get_version()) @given(length=st.integers(min_value=1, max_value=1_000_000)) # On Python 3.12 we're getting weird deadline issues in CI, so disabling # for now. @hypothesis_settings(deadline=None) def test_limited_content_fits(self, length): """ ``http_client.limited_content()`` returns the body if it is less than the max length. """ for at_least_length in (length, length + 1, length + 1000, length + 100_000): response = result_of( self.client.request( "GET", DecodedURL.from_text(f"http://127.0.0.1/bytes/{length}"), ) ) self.assertEqual( result_of( limited_content(response, self._http_server.clock, at_least_length) ).read(), gen_bytes(length), ) @given(length=st.integers(min_value=10, max_value=1_000_000)) def test_limited_content_does_not_fit(self, length): """ If the body is longer than than max length, ``http_client.limited_content()`` fails with a ``ValueError``. """ for too_short in (length - 1, 5): response = result_of( self.client.request( "GET", DecodedURL.from_text(f"http://127.0.0.1/bytes/{length}"), ) ) with self.assertRaises(ValueError): result_of(limited_content(response, self._http_server.clock, too_short)) def test_limited_content_silence_causes_timeout(self): """ ``http_client.limited_content() times out if it receives no data for 60 seconds. """ response = result_of( self.client.request( "GET", DecodedURL.from_text("http://127.0.0.1/slowly_never_finish_result"), ) ) body_deferred = limited_content(response, self._http_server.clock, 4) result = [] error = [] body_deferred.addCallbacks(result.append, error.append) for i in range(59 + 59 + 60): self.assertEqual((result, error), ([], [])) self._http_server.clock.advance(1) # Push data between in-memory client and in-memory server: self.client._treq._agent.flush() # After 59 (second write) + 59 (third write) + 60 seconds (quiescent # timeout) the limited_content() response times out. self.assertTrue(error) with self.assertRaises(CancelledError): error[0].raiseException() def test_limited_content_cancels_timeout_on_failed_response(self): """ If the response fails somehow, the timeout is still cancelled. """ response = result_of( self.client.request( "GET", DecodedURL.from_text("http://127.0.0.1/die"), ) ) d = limited_content(response, self._http_server.clock, 4) with self.assertRaises(ValueError): result_of(d) self.assertEqual(len(self._http_server.clock.getDelayedCalls()), 0) def test_request_with_no_content_type_same_as_cbor(self): """ If no ``Content-Type`` header is set when sending a body, it is assumed to be CBOR. """ response = result_of( self.client.request( "POST", DecodedURL.from_text("http://127.0.0.1/read_body"), data=dumps({"reason": "test"}), ) ) self.assertEqual( result_of(limited_content(response, self._http_server.clock, 100)).read(), b"test", ) def test_request_with_wrong_content(self): """ If a non-CBOR ``Content-Type`` header is set when sending a body, the server complains appropriatly. """ headers = Headers() headers.setRawHeaders("content-type", ["some/value"]) response = result_of( self.client.request( "POST", DecodedURL.from_text("http://127.0.0.1/read_body"), data=dumps({"reason": "test"}), headers=headers, ) ) self.assertEqual(response.code, http.UNSUPPORTED_MEDIA_TYPE) @implementer(IReactorFromThreads) class Reactor(Clock): """ Fake reactor that supports time APIs and callFromThread. Advancing the clock also runs any callbacks scheduled via callFromThread. """ def __init__(self): Clock.__init__(self) self._queue = Queue() def callFromThread(self, callable, *args, **kwargs): self._queue.put((callable, args, kwargs)) def advance(self, *args, **kwargs): Clock.advance(self, *args, **kwargs) while not self._queue.empty(): f, args, kwargs = self._queue.get() f(*args, **kwargs) class HttpTestFixture(Fixture): """ Setup HTTP tests' infrastructure, the storage server and corresponding client. """ def _setUp(self): StorageClientFactory.start_test_mode( lambda pool: self.addCleanup(pool.closeCachedConnections) ) self.addCleanup(StorageClientFactory.stop_test_mode) self.clock = Reactor() self.tempdir = self.useFixture(TempDir()) # The global Cooperator used by Twisted (a) used by pull producers in # twisted.web, (b) is driven by a real reactor. We want to push time # forward ourselves since we rely on pull producers in the HTTP storage # server. self.mock = self.useFixture( MonkeyPatch( "twisted.internet.task._theCooperator", Cooperator(scheduler=lambda c: self.clock.callLater(0.000001, c)), ) ) self.storage_server = StorageServer( self.tempdir.path, b"\x00" * 20, clock=self.clock ) self.http_server = HTTPServer( self.clock, self.storage_server, SWISSNUM_FOR_TEST ) self.treq = StubTreq(self.http_server.get_resource()) self.client = StorageClient( DecodedURL.from_text("http://127.0.0.1"), SWISSNUM_FOR_TEST, treq=self.treq, pool=None, clock=self.clock, analyze_response=response_is_not_html, ) def result_of_with_flush(self, d): """ Like ``result_of``, but supports fake reactor and ``treq`` testing infrastructure necessary to support asynchronous HTTP server endpoints. """ d = ensureDeferred(d) result = [] error = [] d.addCallbacks(result.append, error.append) # Check for synchronous HTTP endpoint handler: if result: return result[0] if error: error[0].raiseException() # OK, no result yet, probably async HTTP endpoint handler, so advance # time, flush treq, and try again: for i in range(10_000): self.clock.advance(0.001) self.treq.flush() if result: break # By putting the sleep at the end, tests that are completely # synchronous and don't use threads will have already broken out of # the loop, and so will finish without any sleeps. This allows them # to run as quickly as possible. # # However, some tests do talk to APIs that use a thread pool on the # backend, so we need to allow actual time to pass for those. time.sleep(0.001) if result: return result[0] if error: error[0].raiseException() raise RuntimeError( "We expected given Deferred to have result already, but it wasn't. " + "This is probably a test design issue." ) class StorageClientWithHeadersOverride(object): """Wrap ``StorageClient`` and override sent headers.""" def __init__(self, storage_client, add_headers): self.storage_client = storage_client self.add_headers = add_headers def __getattr__(self, attr): return getattr(self.storage_client, attr) def request(self, *args, headers=None, **kwargs): if headers is None: headers = Headers() for key, value in self.add_headers.items(): headers.setRawHeaders(key, [value]) return self.storage_client.request(*args, headers=headers, **kwargs) @contextmanager def assert_fails_with_http_code(test_case: SyncTestCase, code: int): """ Context manager that asserts the code fails with the given HTTP response code. """ with test_case.assertRaises(ClientException) as e: try: yield finally: pass test_case.assertEqual(e.exception.code, code) class GenericHTTPAPITests(SyncTestCase): """ Tests of HTTP client talking to the HTTP server, for generic HTTP API endpoints and concerns. """ def setUp(self): super(GenericHTTPAPITests, self).setUp() disable_thread_pool_for_test(self) self.http = self.useFixture(HttpTestFixture()) def test_missing_authentication(self) -> None: """ If nothing is given in the ``Authorization`` header at all an ``Unauthorized`` response is returned. """ client = HTTPClient( RequestTraversalAgent(self.http.http_server.get_resource()) ) response = self.http.result_of_with_flush( client.request( "GET", "http://127.0.0.1/storage/v1/version", ), ) self.assertThat(response.code, Equals(http.UNAUTHORIZED)) def test_bad_authentication(self): """ If the wrong swissnum is used, an ``Unauthorized`` response code is returned. """ client = StorageClientGeneral( StorageClient( DecodedURL.from_text("http://127.0.0.1"), b"something wrong", treq=StubTreq(self.http.http_server.get_resource()), pool=None, clock=self.http.clock, analyze_response=response_is_not_html, ) ) with assert_fails_with_http_code(self, http.UNAUTHORIZED): self.http.result_of_with_flush(client.get_version()) def test_unsupported_mime_type(self): """ The client can request mime types other than CBOR, and if they are unsupported a NOT ACCEPTABLE (406) error will be returned. """ client = StorageClientGeneral( StorageClientWithHeadersOverride(self.http.client, {"accept": "image/gif"}) ) with assert_fails_with_http_code(self, http.NOT_ACCEPTABLE): self.http.result_of_with_flush(client.get_version()) def test_version(self): """ The client can return the version. We ignore available disk space and max immutable share size, since that might change across calls. """ client = StorageClientGeneral(self.http.client) version = self.http.result_of_with_flush(client.get_version()) version[b"http://allmydata.org/tahoe/protocols/storage/v1"].pop( b"available-space" ) version[b"http://allmydata.org/tahoe/protocols/storage/v1"].pop( b"maximum-immutable-share-size" ) expected_version = self.http.storage_server.get_version() expected_version[b"http://allmydata.org/tahoe/protocols/storage/v1"].pop( b"available-space" ) expected_version[b"http://allmydata.org/tahoe/protocols/storage/v1"].pop( b"maximum-immutable-share-size" ) self.assertEqual(version, expected_version) def test_server_side_schema_validation(self): """ Ensure that schema validation is happening: invalid CBOR should result in bad request response code (error 400). We don't bother checking every single request, the API on the server-side is designed to require a schema, so it validates everywhere. But we check at least one to ensure we get correct response code on bad input, so we know validation happened. """ upload_secret = urandom(32) lease_secret = urandom(32) storage_index = urandom(16) url = self.http.client.relative_url( "/storage/v1/immutable/" + _encode_si(storage_index) ) message = {"bad-message": "missing expected keys"} response = self.http.result_of_with_flush( self.http.client.request( "POST", url, lease_renew_secret=lease_secret, lease_cancel_secret=lease_secret, upload_secret=upload_secret, message_to_serialize=message, ) ) self.assertEqual(response.code, http.BAD_REQUEST) class ImmutableHTTPAPITests(SyncTestCase): """ Tests for immutable upload/download APIs. """ def setUp(self): super(ImmutableHTTPAPITests, self).setUp() disable_thread_pool_for_test(self) self.http = self.useFixture(HttpTestFixture()) self.imm_client = StorageClientImmutables(self.http.client) self.general_client = StorageClientGeneral(self.http.client) def create_upload(self, share_numbers, length): """ Create a write bucket on server, return: (upload_secret, lease_secret, storage_index, result) """ upload_secret = urandom(32) lease_secret = urandom(32) storage_index = urandom(16) created = self.http.result_of_with_flush( self.imm_client.create( storage_index, share_numbers, length, upload_secret, lease_secret, lease_secret, ) ) return (upload_secret, lease_secret, storage_index, created) def test_upload_can_be_downloaded(self): """ A single share can be uploaded in (possibly overlapping) chunks, and then a random chunk can be downloaded, and it will match the original file. We don't exercise the full variation of overlapping chunks because that's already done in test_storage.py. """ length = 100 expected_data = bytes(range(100)) # Create a upload: (upload_secret, _, storage_index, created) = self.create_upload({1}, 100) self.assertEqual( created, ImmutableCreateResult(already_have=set(), allocated={1}) ) remaining = RangeMap() remaining.set(True, 0, 100) # Three writes: 10-19, 30-39, 50-59. This allows for a bunch of holes. def write(offset, length): remaining.empty(offset, offset + length) return self.imm_client.write_share_chunk( storage_index, 1, upload_secret, offset, expected_data[offset : offset + length], ) upload_progress = self.http.result_of_with_flush(write(10, 10)) self.assertEqual( upload_progress, UploadProgress(finished=False, required=remaining) ) upload_progress = self.http.result_of_with_flush(write(30, 10)) self.assertEqual( upload_progress, UploadProgress(finished=False, required=remaining) ) upload_progress = self.http.result_of_with_flush(write(50, 10)) self.assertEqual( upload_progress, UploadProgress(finished=False, required=remaining) ) # Then, an overlapping write with matching data (15-35): upload_progress = self.http.result_of_with_flush(write(15, 20)) self.assertEqual( upload_progress, UploadProgress(finished=False, required=remaining) ) # Now fill in the holes: upload_progress = self.http.result_of_with_flush(write(0, 10)) self.assertEqual( upload_progress, UploadProgress(finished=False, required=remaining) ) upload_progress = self.http.result_of_with_flush(write(40, 10)) self.assertEqual( upload_progress, UploadProgress(finished=False, required=remaining) ) upload_progress = self.http.result_of_with_flush(write(60, 40)) self.assertEqual( upload_progress, UploadProgress(finished=True, required=RangeMap()) ) # We can now read: for offset, length in [(0, 100), (10, 19), (99, 1), (49, 200)]: downloaded = self.http.result_of_with_flush( self.imm_client.read_share_chunk(storage_index, 1, offset, length) ) self.assertEqual(downloaded, expected_data[offset : offset + length]) def test_write_with_wrong_upload_key(self): """ A write with an upload key that is different than the original upload key will fail. """ (upload_secret, _, storage_index, _) = self.create_upload({1}, 100) with assert_fails_with_http_code(self, http.UNAUTHORIZED): self.http.result_of_with_flush( self.imm_client.write_share_chunk( storage_index, 1, upload_secret + b"X", 0, b"123", ) ) def test_allocate_buckets_second_time_different_shares(self): """ If allocate buckets endpoint is called second time with different upload key on potentially different shares, that creates the buckets on those shares that are different. """ # Create a upload: (upload_secret, lease_secret, storage_index, created) = self.create_upload( {1, 2, 3}, 100 ) # Write half of share 1 self.http.result_of_with_flush( self.imm_client.write_share_chunk( storage_index, 1, upload_secret, 0, b"a" * 50, ) ) # Add same shares with a different upload key share 1 overlaps with # existing shares, this call shouldn't overwrite the existing # work-in-progress. upload_secret2 = b"x" * 2 created2 = self.http.result_of_with_flush( self.imm_client.create( storage_index, {1, 4, 6}, 100, upload_secret2, lease_secret, lease_secret, ) ) self.assertEqual(created2.allocated, {4, 6}) # Write second half of share 1 self.assertTrue( self.http.result_of_with_flush( self.imm_client.write_share_chunk( storage_index, 1, upload_secret, 50, b"b" * 50, ) ).finished ) # The upload of share 1 succeeded, demonstrating that second create() # call didn't overwrite work-in-progress. downloaded = self.http.result_of_with_flush( self.imm_client.read_share_chunk(storage_index, 1, 0, 100) ) self.assertEqual(downloaded, b"a" * 50 + b"b" * 50) # We can successfully upload the shares created with the second upload secret. self.assertTrue( self.http.result_of_with_flush( self.imm_client.write_share_chunk( storage_index, 4, upload_secret2, 0, b"x" * 100, ) ).finished ) def test_list_shares(self): """ Once a share is finished uploading, it's possible to list it. """ (upload_secret, _, storage_index, created) = self.create_upload({1, 2, 3}, 10) # Initially there are no shares: self.assertEqual( self.http.result_of_with_flush(self.imm_client.list_shares(storage_index)), set(), ) # Upload shares 1 and 3: for share_number in [1, 3]: progress = self.http.result_of_with_flush( self.imm_client.write_share_chunk( storage_index, share_number, upload_secret, 0, b"0123456789", ) ) self.assertTrue(progress.finished) # Now shares 1 and 3 exist: self.assertEqual( self.http.result_of_with_flush(self.imm_client.list_shares(storage_index)), {1, 3}, ) def test_upload_bad_content_range(self): """ Malformed or invalid Content-Range headers to the immutable upload endpoint result in a 416 error. """ (upload_secret, _, storage_index, created) = self.create_upload({1}, 10) def check_invalid(bad_content_range_value): client = StorageClientImmutables( StorageClientWithHeadersOverride( self.http.client, {"content-range": bad_content_range_value} ) ) with assert_fails_with_http_code( self, http.REQUESTED_RANGE_NOT_SATISFIABLE ): self.http.result_of_with_flush( client.write_share_chunk( storage_index, 1, upload_secret, 0, b"0123456789", ) ) check_invalid("not a valid content-range header at all") check_invalid("bytes -1-9/10") check_invalid("bytes 0--9/10") check_invalid("teapots 0-9/10") def test_list_shares_unknown_storage_index(self): """ Listing unknown storage index's shares results in empty list of shares. """ storage_index = bytes(range(16)) self.assertEqual( self.http.result_of_with_flush(self.imm_client.list_shares(storage_index)), set(), ) def test_upload_non_existent_storage_index(self): """ Uploading to a non-existent storage index or share number results in 404. """ (upload_secret, _, storage_index, _) = self.create_upload({1}, 10) def unknown_check(storage_index, share_number): with assert_fails_with_http_code(self, http.NOT_FOUND): self.http.result_of_with_flush( self.imm_client.write_share_chunk( storage_index, share_number, upload_secret, 0, b"0123456789", ) ) # Wrong share number: unknown_check(storage_index, 7) # Wrong storage index: unknown_check(b"X" * 16, 7) def test_multiple_shares_uploaded_to_different_place(self): """ If a storage index has multiple shares, uploads to different shares are stored separately and can be downloaded separately. """ (upload_secret, _, storage_index, _) = self.create_upload({1, 2}, 10) self.http.result_of_with_flush( self.imm_client.write_share_chunk( storage_index, 1, upload_secret, 0, b"1" * 10, ) ) self.http.result_of_with_flush( self.imm_client.write_share_chunk( storage_index, 2, upload_secret, 0, b"2" * 10, ) ) self.assertEqual( self.http.result_of_with_flush( self.imm_client.read_share_chunk(storage_index, 1, 0, 10) ), b"1" * 10, ) self.assertEqual( self.http.result_of_with_flush( self.imm_client.read_share_chunk(storage_index, 2, 0, 10) ), b"2" * 10, ) def test_mismatching_upload_fails(self): """ If an uploaded chunk conflicts with an already uploaded chunk, a CONFLICT error is returned. """ (upload_secret, _, storage_index, created) = self.create_upload({1}, 100) # Write: self.http.result_of_with_flush( self.imm_client.write_share_chunk( storage_index, 1, upload_secret, 0, b"0" * 10, ) ) # Conflicting write: with assert_fails_with_http_code(self, http.CONFLICT): self.http.result_of_with_flush( self.imm_client.write_share_chunk( storage_index, 1, upload_secret, 0, b"0123456789", ) ) def test_timed_out_upload_allows_reupload(self): """ If an in-progress upload times out, it is cancelled altogether, allowing a new upload to occur. """ self._test_abort_or_timed_out_upload_to_existing_storage_index( lambda **kwargs: self.http.clock.advance(30 * 60 + 1) ) def test_abort_upload_allows_reupload(self): """ If an in-progress upload is aborted, it is cancelled altogether, allowing a new upload to occur. """ def abort(storage_index, share_number, upload_secret): return self.http.result_of_with_flush( self.imm_client.abort_upload(storage_index, share_number, upload_secret) ) self._test_abort_or_timed_out_upload_to_existing_storage_index(abort) def _test_abort_or_timed_out_upload_to_existing_storage_index(self, cancel_upload): """Start uploading to an existing storage index that then times out or aborts. Re-uploading should work. """ # Start an upload: (upload_secret, _, storage_index, _) = self.create_upload({1}, 100) self.http.result_of_with_flush( self.imm_client.write_share_chunk( storage_index, 1, upload_secret, 0, b"123", ) ) # Now, the upload is cancelled somehow: cancel_upload( storage_index=storage_index, upload_secret=upload_secret, share_number=1 ) # Now we can create a new share with the same storage index without # complaint: upload_secret = urandom(32) lease_secret = urandom(32) created = self.http.result_of_with_flush( self.imm_client.create( storage_index, {1}, 100, upload_secret, lease_secret, lease_secret, ) ) self.assertEqual(created.allocated, {1}) # And write to it, too: self.http.result_of_with_flush( self.imm_client.write_share_chunk( storage_index, 1, upload_secret, 0, b"ABC", ) ) def test_unknown_aborts(self): """ Aborting uploads with an unknown storage index or share number will result 404 HTTP response code. """ (upload_secret, _, storage_index, _) = self.create_upload({1}, 100) for si, num in [(storage_index, 3), (b"x" * 16, 1)]: with assert_fails_with_http_code(self, http.NOT_FOUND): self.http.result_of_with_flush( self.imm_client.abort_upload(si, num, upload_secret) ) def test_unauthorized_abort(self): """ An abort with the wrong key will return an unauthorized error, and will not abort the upload. """ (upload_secret, _, storage_index, _) = self.create_upload({1}, 100) # Failed to abort becaues wrong upload secret: with assert_fails_with_http_code(self, http.UNAUTHORIZED): self.http.result_of_with_flush( self.imm_client.abort_upload(storage_index, 1, upload_secret + b"X") ) # We can still write to it: self.http.result_of_with_flush( self.imm_client.write_share_chunk( storage_index, 1, upload_secret, 0, b"ABC", ) ) def test_too_late_abort(self): """ An abort of an already-fully-uploaded immutable will result in 405 error and will not affect the immutable. """ uploaded_data = b"123" (upload_secret, _, storage_index, _) = self.create_upload({0}, 3) self.http.result_of_with_flush( self.imm_client.write_share_chunk( storage_index, 0, upload_secret, 0, uploaded_data, ) ) # Can't abort, we finished upload: with assert_fails_with_http_code(self, http.NOT_ALLOWED): self.http.result_of_with_flush( self.imm_client.abort_upload(storage_index, 0, upload_secret) ) # Abort didn't prevent reading: self.assertEqual( uploaded_data, self.http.result_of_with_flush( self.imm_client.read_share_chunk( storage_index, 0, 0, 3, ) ), ) def test_lease_on_unknown_storage_index(self): """ An attempt to renew an unknown storage index will result in a HTTP 404. """ storage_index = urandom(16) secret = b"A" * 32 with assert_fails_with_http_code(self, http.NOT_FOUND): self.http.result_of_with_flush( self.general_client.add_or_renew_lease(storage_index, secret, secret) ) class MutableHTTPAPIsTests(SyncTestCase): """Tests for mutable APIs.""" def setUp(self): super(MutableHTTPAPIsTests, self).setUp() disable_thread_pool_for_test(self) self.http = self.useFixture(HttpTestFixture()) self.mut_client = StorageClientMutables(self.http.client) def create_upload(self, data=b"abcdef"): """ Utility that creates shares 0 and 1 with bodies ``{data}-{share_number}``. """ write_secret = urandom(32) lease_secret = urandom(32) storage_index = urandom(16) self.http.result_of_with_flush( self.mut_client.read_test_write_chunks( storage_index, write_secret, lease_secret, lease_secret, { 0: TestWriteVectors( write_vectors=[WriteVector(offset=0, data=data + b"-0")] ), 1: TestWriteVectors( write_vectors=[ WriteVector(offset=0, data=data), WriteVector(offset=len(data), data=b"-1"), ] ), }, [], ) ) return storage_index, write_secret, lease_secret def test_write_can_be_read_small_data(self): """ Small written data can be read using ``read_share_chunk``. """ self.write_can_be_read(b"abcdef") def test_write_can_be_read_large_data(self): """ Large written data (50MB) can be read using ``read_share_chunk``. """ self.write_can_be_read(b"abcdefghij" * 5 * 1024 * 1024) def write_can_be_read(self, data): """ Written data can be read using ``read_share_chunk``. """ lease_secret = urandom(32) storage_index = urandom(16) self.http.result_of_with_flush( self.mut_client.read_test_write_chunks( storage_index, urandom(32), lease_secret, lease_secret, { 0: TestWriteVectors( write_vectors=[WriteVector(offset=0, data=data)] ), }, [], ) ) read_data = self.http.result_of_with_flush( self.mut_client.read_share_chunk(storage_index, 0, 0, len(data)) ) self.assertEqual(read_data, data) def test_read_before_write(self): """In combo read/test/write operation, reads happen before writes.""" storage_index, write_secret, lease_secret = self.create_upload() result = self.http.result_of_with_flush( self.mut_client.read_test_write_chunks( storage_index, write_secret, lease_secret, lease_secret, { 0: TestWriteVectors( write_vectors=[WriteVector(offset=1, data=b"XYZ")] ), }, [ReadVector(0, 8)], ) ) # Reads are from before the write: self.assertEqual( result, ReadTestWriteResult( success=True, reads={0: [b"abcdef-0"], 1: [b"abcdef-1"]} ), ) # But the write did happen: data0 = self.http.result_of_with_flush( self.mut_client.read_share_chunk(storage_index, 0, 0, 8) ) data1 = self.http.result_of_with_flush( self.mut_client.read_share_chunk(storage_index, 1, 0, 8) ) self.assertEqual((data0, data1), (b"aXYZef-0", b"abcdef-1")) def test_conditional_write(self): """Uploads only happen if the test passes.""" storage_index, write_secret, lease_secret = self.create_upload() result_failed = self.http.result_of_with_flush( self.mut_client.read_test_write_chunks( storage_index, write_secret, lease_secret, lease_secret, { 0: TestWriteVectors( test_vectors=[TestVector(1, 4, b"FAIL")], write_vectors=[WriteVector(offset=1, data=b"XYZ")], ), }, [], ) ) self.assertFalse(result_failed.success) # This time the test matches: result = self.http.result_of_with_flush( self.mut_client.read_test_write_chunks( storage_index, write_secret, lease_secret, lease_secret, { 0: TestWriteVectors( test_vectors=[TestVector(1, 4, b"bcde")], write_vectors=[WriteVector(offset=1, data=b"XYZ")], ), }, [ReadVector(0, 8)], ) ) self.assertTrue(result.success) self.assertEqual( self.http.result_of_with_flush( self.mut_client.read_share_chunk(storage_index, 0, 0, 8) ), b"aXYZef-0", ) def test_list_shares(self): """``list_shares()`` returns the shares for a given storage index.""" storage_index, _, _ = self.create_upload() self.assertEqual( self.http.result_of_with_flush(self.mut_client.list_shares(storage_index)), {0, 1}, ) def test_non_existent_list_shares(self): """A non-existent storage index errors when shares are listed.""" with self.assertRaises(ClientException) as exc: self.http.result_of_with_flush(self.mut_client.list_shares(urandom(32))) self.assertEqual(exc.exception.code, http.NOT_FOUND) def test_wrong_write_enabler(self): """Writes with the wrong write enabler fail, and are not processed.""" storage_index, write_secret, lease_secret = self.create_upload() with self.assertRaises(ClientException) as exc: self.http.result_of_with_flush( self.mut_client.read_test_write_chunks( storage_index, urandom(32), lease_secret, lease_secret, { 0: TestWriteVectors( write_vectors=[WriteVector(offset=1, data=b"XYZ")] ), }, [ReadVector(0, 8)], ) ) self.assertEqual(exc.exception.code, http.UNAUTHORIZED) # The write did not happen: self.assertEqual( self.http.result_of_with_flush( self.mut_client.read_share_chunk(storage_index, 0, 0, 8) ), b"abcdef-0", ) class SharedImmutableMutableTestsMixin: """ Shared tests for mutables and immutables where the API is the same. """ KIND: str # either "mutable" or "immutable" general_client: StorageClientGeneral client: Union[StorageClientImmutables, StorageClientMutables] clientFactory: Callable[ [StorageClient], Union[StorageClientImmutables, StorageClientMutables] ] def upload(self, share_number: int, data_length=26) -> Tuple[bytes, bytes, bytes]: """ Create a share, return (storage_index, uploaded_data, lease secret). """ raise NotImplementedError def get_leases(self, storage_index: bytes) -> Iterable[LeaseInfo]: """Get leases for the storage index.""" raise NotImplementedError() def test_advise_corrupt_share(self): """ Advising share was corrupted succeeds from HTTP client's perspective, and calls appropriate method on server. """ corrupted = [] self.http.storage_server.advise_corrupt_share = lambda *args: corrupted.append( args ) storage_index, _, _ = self.upload(13) reason = "OHNO \u1235" self.http.result_of_with_flush( self.client.advise_corrupt_share(storage_index, 13, reason) ) self.assertEqual( corrupted, [(self.KIND.encode("ascii"), storage_index, 13, reason.encode("utf-8"))], ) def test_advise_corrupt_share_unknown(self): """ Advising an unknown share was corrupted results in 404. """ storage_index, _, _ = self.upload(13) reason = "OHNO \u1235" self.http.result_of_with_flush( self.client.advise_corrupt_share(storage_index, 13, reason) ) for si, share_number in [(storage_index, 11), (urandom(16), 13)]: with assert_fails_with_http_code(self, http.NOT_FOUND): self.http.result_of_with_flush( self.client.advise_corrupt_share(si, share_number, reason) ) def test_lease_renew_and_add(self): """ It's possible the renew the lease on an uploaded mutable/immutable, by using the same renewal secret, or add a new lease by choosing a different renewal secret. """ # Create a storage index: storage_index, _, lease_secret = self.upload(0) [lease] = self.get_leases(storage_index) initial_expiration_time = lease.get_expiration_time() # Time passes: self.http.clock.advance(167) # We renew the lease: self.http.result_of_with_flush( self.general_client.add_or_renew_lease( storage_index, lease_secret, lease_secret ) ) # More time passes: self.http.clock.advance(10) # We create a new lease: lease_secret2 = urandom(32) self.http.result_of_with_flush( self.general_client.add_or_renew_lease( storage_index, lease_secret2, lease_secret2 ) ) [lease1, lease2] = self.get_leases(storage_index) self.assertEqual(lease1.get_expiration_time(), initial_expiration_time + 167) self.assertEqual(lease2.get_expiration_time(), initial_expiration_time + 177) def test_read_of_wrong_storage_index_fails(self): """ Reading from unknown storage index results in 404. """ with assert_fails_with_http_code(self, http.NOT_FOUND): self.http.result_of_with_flush( self.client.read_share_chunk( b"1" * 16, 1, 0, 10, ) ) def test_read_of_wrong_share_number_fails(self): """ Reading from unknown storage index results in 404. """ storage_index, _, _ = self.upload(1) with assert_fails_with_http_code(self, http.NOT_FOUND): self.http.result_of_with_flush( self.client.read_share_chunk( storage_index, 7, # different share number 0, 10, ) ) def test_read_with_negative_offset_fails(self): """ Malformed or unsupported Range headers result in 416 (requested range not satisfiable) error. """ storage_index, _, _ = self.upload(1) def check_bad_range(bad_range_value): client = self.clientFactory( StorageClientWithHeadersOverride( self.http.client, {"range": bad_range_value} ) ) with assert_fails_with_http_code( self, http.REQUESTED_RANGE_NOT_SATISFIABLE ): self.http.result_of_with_flush( client.read_share_chunk( storage_index, 1, 0, 10, ) ) # Bad unit check_bad_range("molluscs=0-9") # Negative offsets check_bad_range("bytes=-2-9") check_bad_range("bytes=0--10") # Negative offset no endpoint check_bad_range("bytes=-300-") check_bad_range("bytes=") # Multiple ranges are currently unsupported, even if they're # semantically valid under HTTP: check_bad_range("bytes=0-5, 6-7") # Ranges without an end are currently unsupported, even if they're # semantically valid under HTTP. check_bad_range("bytes=0-") def _read_with_no_range_test(self, data_length): """ A read with no range returns the whole mutable/immutable. Actual test is defined in subclasses, to fix complaints from Hypothesis about the method having different executors. """ storage_index, uploaded_data, _ = self.upload(1, data_length) response = self.http.result_of_with_flush( self.http.client.request( "GET", self.http.client.relative_url( "/storage/v1/{}/{}/1".format(self.KIND, _encode_si(storage_index)) ), ) ) self.assertEqual(response.code, http.OK) self.assertEqual( self.http.result_of_with_flush(response.content()), uploaded_data ) def test_validate_content_range_response_to_read(self): """ The server responds to ranged reads with an appropriate Content-Range header. """ storage_index, _, _ = self.upload(1, 26) def check_range(requested_range, expected_response): headers = Headers() headers.setRawHeaders("range", [requested_range]) response = self.http.result_of_with_flush( self.http.client.request( "GET", self.http.client.relative_url( "/storage/v1/{}/{}/1".format( self.KIND, _encode_si(storage_index) ) ), headers=headers, ) ) self.assertEqual( response.headers.getRawHeaders("content-range"), [expected_response] ) check_range("bytes=0-10", "bytes 0-10/*") check_range("bytes=3-17", "bytes 3-17/*") # TODO re-enable in https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3907 # Can't go beyond the end of the mutable/immutable! # check_range("bytes=10-100", "bytes 10-25/*") class ImmutableSharedTests(SharedImmutableMutableTestsMixin, SyncTestCase): """Shared tests, running on immutables.""" KIND = "immutable" clientFactory = StorageClientImmutables def setUp(self): super(ImmutableSharedTests, self).setUp() disable_thread_pool_for_test(self) self.http = self.useFixture(HttpTestFixture()) self.client = self.clientFactory(self.http.client) self.general_client = StorageClientGeneral(self.http.client) def upload(self, share_number, data_length=26): """ Create a share, return (storage_index, uploaded_data, lease_secret). """ uploaded_data = (b"abcdefghijklmnopqrstuvwxyz" * ((data_length // 26) + 1))[ :data_length ] upload_secret = urandom(32) lease_secret = urandom(32) storage_index = urandom(16) self.http.result_of_with_flush( self.client.create( storage_index, {share_number}, data_length, upload_secret, lease_secret, lease_secret, ) ) self.http.result_of_with_flush( self.client.write_share_chunk( storage_index, share_number, upload_secret, 0, uploaded_data, ) ) return storage_index, uploaded_data, lease_secret def get_leases(self, storage_index): return self.http.storage_server.get_leases(storage_index) @given(data_length=st.integers(min_value=1, max_value=300000)) def test_read_with_no_range(self, data_length): """ A read with no range returns the whole immutable. """ return self._read_with_no_range_test(data_length) class MutableSharedTests(SharedImmutableMutableTestsMixin, SyncTestCase): """Shared tests, running on mutables.""" KIND = "mutable" clientFactory = StorageClientMutables def setUp(self): super(MutableSharedTests, self).setUp() disable_thread_pool_for_test(self) self.http = self.useFixture(HttpTestFixture()) self.client = self.clientFactory(self.http.client) self.general_client = StorageClientGeneral(self.http.client) def upload(self, share_number, data_length=26): """ Create a share, return (storage_index, uploaded_data, lease_secret). """ data = (b"abcdefghijklmnopqrstuvwxyz" * ((data_length // 26) + 1))[:data_length] write_secret = urandom(32) lease_secret = urandom(32) storage_index = urandom(16) self.http.result_of_with_flush( self.client.read_test_write_chunks( storage_index, write_secret, lease_secret, lease_secret, { share_number: TestWriteVectors( write_vectors=[WriteVector(offset=0, data=data)] ), }, [], ) ) return storage_index, data, lease_secret def get_leases(self, storage_index): return self.http.storage_server.get_slot_leases(storage_index) @given(data_length=st.integers(min_value=1, max_value=300000)) def test_read_with_no_range(self, data_length): """ A read with no range returns the whole mutable. """ return self._read_with_no_range_test(data_length) tahoe_lafs-1.20.0/src/allmydata/test/test_storage_https.py0000644000000000000000000002071513615410400020641 0ustar00""" Tests for the TLS part of the HTTP Storage Protocol. More broadly, these are tests for HTTPS usage as replacement for Foolscap's server authentication logic, which may one day apply outside of HTTP Storage Protocol. """ from contextlib import asynccontextmanager from base64 import b64decode from yaml import safe_load from cryptography import x509 from twisted.internet.endpoints import serverFromString from twisted.internet import reactor from twisted.internet.defer import maybeDeferred from twisted.web.server import Site from twisted.web.static import Data from twisted.web.client import Agent, HTTPConnectionPool, ResponseNeverReceived from twisted.python.filepath import FilePath from treq.client import HTTPClient from .common import SyncTestCase, AsyncTestCase, SameProcessStreamEndpointAssigner from .certs import ( generate_certificate, generate_private_key, private_key_to_file, cert_to_file, ) from ..storage.http_common import get_spki, get_spki_hash from ..storage.http_client import _StorageClientHTTPSPolicy from ..storage.http_server import _TLSEndpointWrapper from ..util.deferredutil import async_to_deferred from .common_system import spin_until_cleanup_done spki_test_vectors_path = FilePath(__file__).sibling("data").child("spki-hash-test-vectors.yaml") class HTTPSNurlTests(SyncTestCase): """Tests for HTTPS NURLs.""" def test_spki_hash(self): """ The output of ``get_spki_hash()`` matches the semantics of RFC 7469. The test vector certificates were generated using the openssl command line tool:: openssl req -x509 -newkey rsa:4096 -keyout key.pem -out cert.pem -days 365 The expected hash was generated using Appendix A instructions in the RFC:: openssl x509 -noout -in certificate.pem -pubkey | \ openssl asn1parse -noout -inform pem -out public.key openssl dgst -sha256 -binary public.key | openssl enc -base64 The OpenSSL base64-encoded output was then adjusted into the URL-safe base64 variation: `+` and `/` were replaced with `-` and `_` and the trailing `=` padding was removed. The expected SubjectPublicKeyInfo bytes were extracted from the implementation of `get_spki_hash` after its result matched the expected value generated by the command above. """ spki_cases = safe_load(spki_test_vectors_path.getContent())["vector"] for n, case in enumerate(spki_cases): certificate_text = case["certificate"].encode("ascii") expected_spki = b64decode(case["expected-spki"]) expected_hash = case["expected-hash"].encode("ascii") try: certificate = x509.load_pem_x509_certificate(certificate_text) except Exception as e: self.fail(f"Loading case {n} certificate failed: {e}") self.assertEqual( expected_spki, get_spki(certificate), f"case {n} spki data mismatch", ) self.assertEqual( expected_hash, get_spki_hash(certificate), f"case {n} spki hash mismatch", ) class PinningHTTPSValidation(AsyncTestCase): """ Test client-side validation logic of HTTPS certificates that uses Tahoe-LAFS's pinning-based scheme instead of the traditional certificate authority scheme. https://cryptography.io/en/latest/x509/tutorial/#creating-a-self-signed-certificate """ def setUp(self): self._port_assigner = SameProcessStreamEndpointAssigner() self._port_assigner.setUp() self.addCleanup(self._port_assigner.tearDown) return AsyncTestCase.setUp(self) def tearDown(self): d = maybeDeferred(AsyncTestCase.tearDown, self) return d.addCallback(lambda _: spin_until_cleanup_done()) @asynccontextmanager async def listen(self, private_key_path: FilePath, cert_path: FilePath): """ Context manager that runs a HTTPS server with the given private key and certificate. Returns a URL that will connect to the server. """ location_hint, endpoint_string = self._port_assigner.assign(reactor) underlying_endpoint = serverFromString(reactor, endpoint_string) endpoint = _TLSEndpointWrapper.from_paths( underlying_endpoint, private_key_path, cert_path ) root = Data(b"YOYODYNE", "text/plain") root.isLeaf = True listening_port = await endpoint.listen(Site(root)) try: yield f"https://127.0.0.1:{listening_port.getHost().port}/" # type: ignore[attr-defined] finally: result = listening_port.stopListening() if result is not None: await result def request(self, url: str, expected_certificate: x509.Certificate): """ Send a HTTPS request to the given URL, ensuring that the given certificate is the one used via SPKI-hash-based pinning comparison. """ # No persistent connections, so we don't have dirty reactor at the end # of the test. treq_client = HTTPClient( Agent( reactor, _StorageClientHTTPSPolicy( expected_spki_hash=get_spki_hash(expected_certificate) ), pool=HTTPConnectionPool(reactor, persistent=False), ) ) return treq_client.get(url) @async_to_deferred async def test_success(self): """ If all conditions are met, a TLS client using the Tahoe-LAFS policy can connect to the server. """ private_key = generate_private_key() certificate = generate_certificate(private_key) async with self.listen( private_key_to_file(FilePath(self.mktemp()), private_key), cert_to_file(FilePath(self.mktemp()), certificate), ) as url: response = await self.request(url, certificate) self.assertEqual(await response.content(), b"YOYODYNE") @async_to_deferred async def test_server_certificate_has_wrong_hash(self): """ If the server's certificate hash doesn't match the hash the client expects, the request to the server fails. """ private_key1 = generate_private_key() certificate1 = generate_certificate(private_key1) private_key2 = generate_private_key() certificate2 = generate_certificate(private_key2) async with self.listen( private_key_to_file(FilePath(self.mktemp()), private_key1), cert_to_file(FilePath(self.mktemp()), certificate1), ) as url: with self.assertRaises(ResponseNeverReceived): await self.request(url, certificate2) @async_to_deferred async def test_server_certificate_expired(self): """ If the server's certificate has expired, the request to the server succeeds if the hash matches the one the client expects; expiration has no effect. """ private_key = generate_private_key() certificate = generate_certificate(private_key, expires_days=-10) async with self.listen( private_key_to_file(FilePath(self.mktemp()), private_key), cert_to_file(FilePath(self.mktemp()), certificate), ) as url: response = await self.request(url, certificate) self.assertEqual(await response.content(), b"YOYODYNE") @async_to_deferred async def test_server_certificate_not_valid_yet(self): """ If the server's certificate is only valid starting in The Future, the request to the server succeeds if the hash matches the one the client expects; start time has no effect. """ private_key = generate_private_key() certificate = generate_certificate( private_key, expires_days=10, valid_in_days=5 ) async with self.listen( private_key_to_file(FilePath(self.mktemp()), private_key), cert_to_file(FilePath(self.mktemp()), certificate), ) as url: response = await self.request(url, certificate) self.assertEqual(await response.content(), b"YOYODYNE") # A potential attack to test is a private key that doesn't match the # certificate... but OpenSSL (quite rightly) won't let you listen with that # so I don't know how to test that! See # https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3884 tahoe_lafs-1.20.0/src/allmydata/test/test_storage_web.py0000644000000000000000000022450313615410400020255 0ustar00""" Tests for twisted.storage that uses Web APIs. Partially ported to Python 3. """ import time import os.path import re import json from unittest import skipIf from io import StringIO from twisted.trial import unittest from twisted.internet import defer from twisted.application import service from twisted.web.template import flattenString from twisted.python.filepath import FilePath from twisted.python.runtime import platform from foolscap.api import fireEventually from allmydata.util import fileutil, hashutil, base32, pollmixin from allmydata.storage.common import storage_index_to_dir, \ UnknownMutableContainerVersionError, UnknownImmutableContainerVersionError from allmydata.storage.server import StorageServer from allmydata.storage.crawler import ( BucketCountingCrawler, _LeaseStateSerializer, ) from allmydata.storage.expirer import ( LeaseCheckingCrawler, _HistorySerializer, ) from allmydata.web.storage import ( StorageStatus, StorageStatusElement, remove_prefix ) from allmydata.scripts.admin import ( migrate_crawler, ) from allmydata.scripts.runner import ( Options, ) from .common_web import ( render, ) def remove_tags(s): s = re.sub(br'<[^>]*>', b' ', s) s = re.sub(br'\s+', b' ', s) return s def renderSynchronously(ss): """ Return fully rendered HTML document. :param _StorageStatus ss: a StorageStatus instance. """ return unittest.TestCase().successResultOf(renderDeferred(ss)) def renderDeferred(ss): """ Return a `Deferred` HTML renderer. :param _StorageStatus ss: a StorageStatus instance. """ elem = StorageStatusElement(ss._storage, ss._nickname) return flattenString(None, elem) def renderJSON(resource): """ Render a JSON from the given resource. """ return render(resource, {b"t": [b"json"]}) class MyBucketCountingCrawler(BucketCountingCrawler): def finished_prefix(self, cycle, prefix): BucketCountingCrawler.finished_prefix(self, cycle, prefix) if self.hook_ds: d = self.hook_ds.pop(0) d.callback(None) class MyStorageServer(StorageServer): def add_bucket_counter(self): statefile = os.path.join(self.storedir, "bucket_counter.state") self.bucket_counter = MyBucketCountingCrawler(self, statefile) self.bucket_counter.setServiceParent(self) class BucketCounter(unittest.TestCase, pollmixin.PollMixin): def setUp(self): self.s = service.MultiService() self.s.startService() def tearDown(self): return self.s.stopService() def test_bucket_counter(self): basedir = "storage/BucketCounter/bucket_counter" fileutil.make_dirs(basedir) ss = StorageServer(basedir, b"\x00" * 20) # to make sure we capture the bucket-counting-crawler in the middle # of a cycle, we reach in and reduce its maximum slice time to 0. We # also make it start sooner than usual. ss.bucket_counter.slow_start = 0 orig_cpu_slice = ss.bucket_counter.cpu_slice ss.bucket_counter.cpu_slice = 0 ss.setServiceParent(self.s) w = StorageStatus(ss) # this sample is before the crawler has started doing anything html = renderSynchronously(w) self.failUnlessIn(b"

Storage Server Status

", html) s = remove_tags(html) self.failUnlessIn(b"Accepting new shares: Yes", s) self.failUnlessIn(b"Reserved space: - 0 B (0)", s) self.failUnlessIn(b"Total buckets: Not computed yet", s) self.failUnlessIn(b"Next crawl in", s) # give the bucket-counting-crawler one tick to get started. The # cpu_slice=0 will force it to yield right after it processes the # first prefix d = fireEventually() def _check(ignored): # are we really right after the first prefix? state = ss.bucket_counter.get_state() if state["last-complete-prefix"] is None: d2 = fireEventually() d2.addCallback(_check) return d2 self.failUnlessEqual(state["last-complete-prefix"], ss.bucket_counter.prefixes[0]) ss.bucket_counter.cpu_slice = 100.0 # finish as fast as possible html = renderSynchronously(w) s = remove_tags(html) self.failUnlessIn(b" Current crawl ", s) self.failUnlessIn(b" (next work in ", s) d.addCallback(_check) # now give it enough time to complete a full cycle def _watch(): return not ss.bucket_counter.get_progress()["cycle-in-progress"] d.addCallback(lambda ignored: self.poll(_watch)) def _check2(ignored): ss.bucket_counter.cpu_slice = orig_cpu_slice html = renderSynchronously(w) s = remove_tags(html) self.failUnlessIn(b"Total buckets: 0 (the number of", s) self.failUnless(b"Next crawl in 59 minutes" in s or b"Next crawl in 60 minutes" in s, s) d.addCallback(_check2) return d def test_bucket_counter_cleanup(self): basedir = "storage/BucketCounter/bucket_counter_cleanup" fileutil.make_dirs(basedir) ss = StorageServer(basedir, b"\x00" * 20) # to make sure we capture the bucket-counting-crawler in the middle # of a cycle, we reach in and reduce its maximum slice time to 0. ss.bucket_counter.slow_start = 0 orig_cpu_slice = ss.bucket_counter.cpu_slice ss.bucket_counter.cpu_slice = 0 ss.setServiceParent(self.s) d = fireEventually() def _after_first_prefix(ignored): state = ss.bucket_counter.state if state["last-complete-prefix"] is None: d2 = fireEventually() d2.addCallback(_after_first_prefix) return d2 ss.bucket_counter.cpu_slice = 100.0 # finish as fast as possible # now sneak in and mess with its state, to make sure it cleans up # properly at the end of the cycle self.failUnlessEqual(state["last-complete-prefix"], ss.bucket_counter.prefixes[0]) state["bucket-counts"][-12] = {} state["storage-index-samples"]["bogusprefix!"] = (-12, []) ss.bucket_counter.save_state() d.addCallback(_after_first_prefix) # now give it enough time to complete a cycle def _watch(): return not ss.bucket_counter.get_progress()["cycle-in-progress"] d.addCallback(lambda ignored: self.poll(_watch)) def _check2(ignored): ss.bucket_counter.cpu_slice = orig_cpu_slice s = ss.bucket_counter.get_state() self.failIf(-12 in s["bucket-counts"], list(s["bucket-counts"].keys())) self.failIf("bogusprefix!" in s["storage-index-samples"], list(s["storage-index-samples"].keys())) d.addCallback(_check2) return d def test_bucket_counter_eta(self): basedir = "storage/BucketCounter/bucket_counter_eta" fileutil.make_dirs(basedir) ss = MyStorageServer(basedir, b"\x00" * 20) ss.bucket_counter.slow_start = 0 # these will be fired inside finished_prefix() hooks = ss.bucket_counter.hook_ds = [defer.Deferred() for i in range(3)] w = StorageStatus(ss) d = defer.Deferred() def _check_1(ignored): # no ETA is available yet html = renderSynchronously(w) s = remove_tags(html) self.failUnlessIn(b"complete (next work", s) def _check_2(ignored): # one prefix has finished, so an ETA based upon that elapsed time # should be available. html = renderSynchronously(w) s = remove_tags(html) self.failUnlessIn(b"complete (ETA ", s) def _check_3(ignored): # two prefixes have finished html = renderSynchronously(w) s = remove_tags(html) self.failUnlessIn(b"complete (ETA ", s) d.callback("done") hooks[0].addCallback(_check_1).addErrback(d.errback) hooks[1].addCallback(_check_2).addErrback(d.errback) hooks[2].addCallback(_check_3).addErrback(d.errback) ss.setServiceParent(self.s) return d class InstrumentedLeaseCheckingCrawler(LeaseCheckingCrawler): stop_after_first_bucket = False def process_bucket(self, *args, **kwargs): LeaseCheckingCrawler.process_bucket(self, *args, **kwargs) if self.stop_after_first_bucket: self.stop_after_first_bucket = False self.cpu_slice = -1.0 def yielding(self, sleep_time): if not self.stop_after_first_bucket: self.cpu_slice = 500 class BrokenStatResults(object): pass class No_ST_BLOCKS_LeaseCheckingCrawler(LeaseCheckingCrawler): def stat(self, fn): s = os.stat(fn) bsr = BrokenStatResults() for attrname in dir(s): if attrname.startswith("_"): continue if attrname == "st_blocks": continue setattr(bsr, attrname, getattr(s, attrname)) return bsr class InstrumentedStorageServer(StorageServer): LeaseCheckerClass = InstrumentedLeaseCheckingCrawler class No_ST_BLOCKS_StorageServer(StorageServer): LeaseCheckerClass = No_ST_BLOCKS_LeaseCheckingCrawler class LeaseCrawler(unittest.TestCase, pollmixin.PollMixin): def setUp(self): self.s = service.MultiService() self.s.startService() def tearDown(self): return self.s.stopService() def make_shares(self, ss): def make(si): return (si, hashutil.tagged_hash(b"renew", si), hashutil.tagged_hash(b"cancel", si)) def make_mutable(si): return (si, hashutil.tagged_hash(b"renew", si), hashutil.tagged_hash(b"cancel", si), hashutil.tagged_hash(b"write-enabler", si)) def make_extra_lease(si, num): return (hashutil.tagged_hash(b"renew-%d" % num, si), hashutil.tagged_hash(b"cancel-%d" % num, si)) immutable_si_0, rs0, cs0 = make(b"\x00" * 16) immutable_si_1, rs1, cs1 = make(b"\x01" * 16) rs1a, cs1a = make_extra_lease(immutable_si_1, 1) mutable_si_2, rs2, cs2, we2 = make_mutable(b"\x02" * 16) mutable_si_3, rs3, cs3, we3 = make_mutable(b"\x03" * 16) rs3a, cs3a = make_extra_lease(mutable_si_3, 1) sharenums = [0] # note: 'tahoe debug dump-share' will not handle this file, since the # inner contents are not a valid CHK share data = b"\xff" * 1000 a,w = ss.allocate_buckets(immutable_si_0, rs0, cs0, sharenums, 1000) w[0].write(0, data) w[0].close() a,w = ss.allocate_buckets(immutable_si_1, rs1, cs1, sharenums, 1000) w[0].write(0, data) w[0].close() ss.add_lease(immutable_si_1, rs1a, cs1a) writev = ss.slot_testv_and_readv_and_writev writev(mutable_si_2, (we2, rs2, cs2), {0: ([], [(0,data)], len(data))}, []) writev(mutable_si_3, (we3, rs3, cs3), {0: ([], [(0,data)], len(data))}, []) ss.add_lease(mutable_si_3, rs3a, cs3a) self.sis = [immutable_si_0, immutable_si_1, mutable_si_2, mutable_si_3] self.renew_secrets = [rs0, rs1, rs1a, rs2, rs3, rs3a] self.cancel_secrets = [cs0, cs1, cs1a, cs2, cs3, cs3a] def test_basic(self): basedir = "storage/LeaseCrawler/basic" fileutil.make_dirs(basedir) ss = InstrumentedStorageServer(basedir, b"\x00" * 20) # make it start sooner than usual. lc = ss.lease_checker lc.slow_start = 0 lc.cpu_slice = 500 lc.stop_after_first_bucket = True webstatus = StorageStatus(ss) # create a few shares, with some leases on them self.make_shares(ss) [immutable_si_0, immutable_si_1, mutable_si_2, mutable_si_3] = self.sis # add a non-sharefile to exercise another code path fn = os.path.join(ss.sharedir, storage_index_to_dir(immutable_si_0), "not-a-share") f = open(fn, "wb") f.write(b"I am not a share.\n") f.close() # this is before the crawl has started, so we're not in a cycle yet initial_state = lc.get_state() self.failIf(lc.get_progress()["cycle-in-progress"]) self.failIfIn("cycle-to-date", initial_state) self.failIfIn("estimated-remaining-cycle", initial_state) self.failIfIn("estimated-current-cycle", initial_state) self.failUnlessIn("history", initial_state) self.failUnlessEqual(initial_state["history"], {}) ss.setServiceParent(self.s) DAY = 24*60*60 d = fireEventually() # now examine the state right after the first bucket has been # processed. def _after_first_bucket(ignored): initial_state = lc.get_state() if "cycle-to-date" not in initial_state: d2 = fireEventually() d2.addCallback(_after_first_bucket) return d2 self.failUnlessIn("cycle-to-date", initial_state) self.failUnlessIn("estimated-remaining-cycle", initial_state) self.failUnlessIn("estimated-current-cycle", initial_state) self.failUnlessIn("history", initial_state) self.failUnlessEqual(initial_state["history"], {}) so_far = initial_state["cycle-to-date"] self.failUnlessEqual(so_far["expiration-enabled"], False) self.failUnlessIn("configured-expiration-mode", so_far) self.failUnlessIn("lease-age-histogram", so_far) lah = so_far["lease-age-histogram"] self.failUnlessEqual(type(lah), list) self.failUnlessEqual(len(lah), 1) self.failUnlessEqual(lah, [ (0.0, DAY, 1) ] ) self.failUnlessEqual(so_far["leases-per-share-histogram"], {"1": 1}) self.failUnlessEqual(so_far["corrupt-shares"], []) sr1 = so_far["space-recovered"] self.failUnlessEqual(sr1["examined-buckets"], 1) self.failUnlessEqual(sr1["examined-shares"], 1) self.failUnlessEqual(sr1["actual-shares"], 0) self.failUnlessEqual(sr1["configured-diskbytes"], 0) self.failUnlessEqual(sr1["original-sharebytes"], 0) left = initial_state["estimated-remaining-cycle"] sr2 = left["space-recovered"] self.failUnless(sr2["examined-buckets"] > 0, sr2["examined-buckets"]) self.failUnless(sr2["examined-shares"] > 0, sr2["examined-shares"]) self.failIfEqual(sr2["actual-shares"], None) self.failIfEqual(sr2["configured-diskbytes"], None) self.failIfEqual(sr2["original-sharebytes"], None) d.addCallback(_after_first_bucket) d.addCallback(lambda ign: renderDeferred(webstatus)) def _check_html_in_cycle(html): s = remove_tags(html) self.failUnlessIn(b"So far, this cycle has examined " b"1 shares in 1 buckets (0 mutable / 1 immutable) ", s) self.failUnlessIn(b"and has recovered: " b"0 shares, 0 buckets (0 mutable / 0 immutable), " b"0 B (0 B / 0 B)", s) self.failUnlessIn(b"If expiration were enabled, " b"we would have recovered: " b"0 shares, 0 buckets (0 mutable / 0 immutable)," b" 0 B (0 B / 0 B) by now", s) self.failUnlessIn(b"and the remainder of this cycle " b"would probably recover: " b"0 shares, 0 buckets (0 mutable / 0 immutable)," b" 0 B (0 B / 0 B)", s) self.failUnlessIn(b"and the whole cycle would probably recover: " b"0 shares, 0 buckets (0 mutable / 0 immutable)," b" 0 B (0 B / 0 B)", s) self.failUnlessIn(b"if we were strictly using each lease's default " b"31-day lease lifetime", s) self.failUnlessIn(b"this cycle would be expected to recover: ", s) d.addCallback(_check_html_in_cycle) # wait for the crawler to finish the first cycle. Nothing should have # been removed. def _wait(): return bool(lc.get_state()["last-cycle-finished"] is not None) d.addCallback(lambda ign: self.poll(_wait)) def _after_first_cycle(ignored): s = lc.get_state() self.failIf("cycle-to-date" in s) self.failIf("estimated-remaining-cycle" in s) self.failIf("estimated-current-cycle" in s) last = s["history"]["0"] self.failUnlessIn("cycle-start-finish-times", last) self.failUnlessEqual(type(last["cycle-start-finish-times"]), list) self.failUnlessEqual(last["expiration-enabled"], False) self.failUnlessIn("configured-expiration-mode", last) self.failUnlessIn("lease-age-histogram", last) lah = last["lease-age-histogram"] self.failUnlessEqual(type(lah), list) self.failUnlessEqual(len(lah), 1) self.failUnlessEqual(lah, [ [0.0, DAY, 6] ] ) self.failUnlessEqual(last["leases-per-share-histogram"], {"1": 2, "2": 2}) self.failUnlessEqual(last["corrupt-shares"], []) rec = last["space-recovered"] self.failUnlessEqual(rec["examined-buckets"], 4) self.failUnlessEqual(rec["examined-shares"], 4) self.failUnlessEqual(rec["actual-buckets"], 0) self.failUnlessEqual(rec["original-buckets"], 0) self.failUnlessEqual(rec["configured-buckets"], 0) self.failUnlessEqual(rec["actual-shares"], 0) self.failUnlessEqual(rec["original-shares"], 0) self.failUnlessEqual(rec["configured-shares"], 0) self.failUnlessEqual(rec["actual-diskbytes"], 0) self.failUnlessEqual(rec["original-diskbytes"], 0) self.failUnlessEqual(rec["configured-diskbytes"], 0) self.failUnlessEqual(rec["actual-sharebytes"], 0) self.failUnlessEqual(rec["original-sharebytes"], 0) self.failUnlessEqual(rec["configured-sharebytes"], 0) def _get_sharefile(si): return list(ss._iter_share_files(si))[0] def count_leases(si): return len(list(_get_sharefile(si).get_leases())) self.failUnlessEqual(count_leases(immutable_si_0), 1) self.failUnlessEqual(count_leases(immutable_si_1), 2) self.failUnlessEqual(count_leases(mutable_si_2), 1) self.failUnlessEqual(count_leases(mutable_si_3), 2) d.addCallback(_after_first_cycle) d.addCallback(lambda ign: renderDeferred(webstatus)) def _check_html(html): s = remove_tags(html) self.failUnlessIn(b"recovered: 0 shares, 0 buckets " b"(0 mutable / 0 immutable), 0 B (0 B / 0 B) ", s) self.failUnlessIn(b"and saw a total of 4 shares, 4 buckets " b"(2 mutable / 2 immutable),", s) self.failUnlessIn(b"but expiration was not enabled", s) d.addCallback(_check_html) d.addCallback(lambda ign: renderJSON(webstatus)) def _check_json(raw): data = json.loads(raw) self.failUnlessIn("lease-checker", data) self.failUnlessIn("lease-checker-progress", data) d.addCallback(_check_json) return d def backdate_lease(self, sf, renew_secret, new_expire_time): sf.renew_lease(renew_secret, new_expire_time, allow_backdate=True) def test_expire_age(self): basedir = "storage/LeaseCrawler/expire_age" fileutil.make_dirs(basedir) # setting expiration_time to 2000 means that any lease which is more # than 2000s old will be expired. ss = InstrumentedStorageServer(basedir, b"\x00" * 20, expiration_enabled=True, expiration_mode="age", expiration_override_lease_duration=2000) # make it start sooner than usual. lc = ss.lease_checker lc.slow_start = 0 lc.stop_after_first_bucket = True webstatus = StorageStatus(ss) # create a few shares, with some leases on them self.make_shares(ss) [immutable_si_0, immutable_si_1, mutable_si_2, mutable_si_3] = self.sis def count_shares(si): return len(list(ss._iter_share_files(si))) def _get_sharefile(si): return list(ss._iter_share_files(si))[0] def count_leases(si): return len(list(_get_sharefile(si).get_leases())) self.failUnlessEqual(count_shares(immutable_si_0), 1) self.failUnlessEqual(count_leases(immutable_si_0), 1) self.failUnlessEqual(count_shares(immutable_si_1), 1) self.failUnlessEqual(count_leases(immutable_si_1), 2) self.failUnlessEqual(count_shares(mutable_si_2), 1) self.failUnlessEqual(count_leases(mutable_si_2), 1) self.failUnlessEqual(count_shares(mutable_si_3), 1) self.failUnlessEqual(count_leases(mutable_si_3), 2) # artificially crank back the expiration time on the first lease of # each share, to make it look like it expired already (age=1000s). # Some shares have an extra lease which is set to expire at the # default time in 31 days from now (age=31days). We then run the # crawler, which will expire the first lease, making some shares get # deleted and others stay alive (with one remaining lease) now = time.time() sf0 = _get_sharefile(immutable_si_0) self.backdate_lease(sf0, self.renew_secrets[0], now - 1000) sf0_size = os.stat(sf0.home).st_size # immutable_si_1 gets an extra lease sf1 = _get_sharefile(immutable_si_1) self.backdate_lease(sf1, self.renew_secrets[1], now - 1000) sf2 = _get_sharefile(mutable_si_2) self.backdate_lease(sf2, self.renew_secrets[3], now - 1000) sf2_size = os.stat(sf2.home).st_size # mutable_si_3 gets an extra lease sf3 = _get_sharefile(mutable_si_3) self.backdate_lease(sf3, self.renew_secrets[4], now - 1000) ss.setServiceParent(self.s) d = fireEventually() # examine the state right after the first bucket has been processed def _after_first_bucket(ignored): p = lc.get_progress() if not p["cycle-in-progress"]: d2 = fireEventually() d2.addCallback(_after_first_bucket) return d2 d.addCallback(_after_first_bucket) d.addCallback(lambda ign: renderDeferred(webstatus)) def _check_html_in_cycle(html): s = remove_tags(html) # the first bucket encountered gets deleted, and its prefix # happens to be about 1/5th of the way through the ring, so the # predictor thinks we'll have 5 shares and that we'll delete them # all. This part of the test depends upon the SIs landing right # where they do now. self.failUnlessIn(b"The remainder of this cycle is expected to " b"recover: 4 shares, 4 buckets", s) self.failUnlessIn(b"The whole cycle is expected to examine " b"5 shares in 5 buckets and to recover: " b"5 shares, 5 buckets", s) d.addCallback(_check_html_in_cycle) # wait for the crawler to finish the first cycle. Two shares should # have been removed def _wait(): return bool(lc.get_state()["last-cycle-finished"] is not None) d.addCallback(lambda ign: self.poll(_wait)) def _after_first_cycle(ignored): self.failUnlessEqual(count_shares(immutable_si_0), 0) self.failUnlessEqual(count_shares(immutable_si_1), 1) self.failUnlessEqual(count_leases(immutable_si_1), 1) self.failUnlessEqual(count_shares(mutable_si_2), 0) self.failUnlessEqual(count_shares(mutable_si_3), 1) self.failUnlessEqual(count_leases(mutable_si_3), 1) s = lc.get_state() last = s["history"]["0"] self.failUnlessEqual(last["expiration-enabled"], True) self.failUnlessEqual(last["configured-expiration-mode"], ["age", 2000, None, ["mutable", "immutable"]]) self.failUnlessEqual(last["leases-per-share-histogram"], {"1": 2, "2": 2}) rec = last["space-recovered"] self.failUnlessEqual(rec["examined-buckets"], 4) self.failUnlessEqual(rec["examined-shares"], 4) self.failUnlessEqual(rec["actual-buckets"], 2) self.failUnlessEqual(rec["original-buckets"], 2) self.failUnlessEqual(rec["configured-buckets"], 2) self.failUnlessEqual(rec["actual-shares"], 2) self.failUnlessEqual(rec["original-shares"], 2) self.failUnlessEqual(rec["configured-shares"], 2) size = sf0_size + sf2_size self.failUnlessEqual(rec["actual-sharebytes"], size) self.failUnlessEqual(rec["original-sharebytes"], size) self.failUnlessEqual(rec["configured-sharebytes"], size) # different platforms have different notions of "blocks used by # this file", so merely assert that it's a number self.failUnless(rec["actual-diskbytes"] >= 0, rec["actual-diskbytes"]) self.failUnless(rec["original-diskbytes"] >= 0, rec["original-diskbytes"]) self.failUnless(rec["configured-diskbytes"] >= 0, rec["configured-diskbytes"]) d.addCallback(_after_first_cycle) d.addCallback(lambda ign: renderDeferred(webstatus)) def _check_html(html): s = remove_tags(html) self.failUnlessIn(b"Expiration Enabled: expired leases will be removed", s) self.failUnlessIn(b"Leases created or last renewed more than 33 minutes ago will be considered expired.", s) self.failUnlessIn(b" recovered: 2 shares, 2 buckets (1 mutable / 1 immutable), ", s) d.addCallback(_check_html) return d def test_expire_cutoff_date(self): basedir = "storage/LeaseCrawler/expire_cutoff_date" fileutil.make_dirs(basedir) # setting cutoff-date to 2000 seconds ago means that any lease which # is more than 2000s old will be expired. now = time.time() then = int(now - 2000) ss = InstrumentedStorageServer(basedir, b"\x00" * 20, expiration_enabled=True, expiration_mode="cutoff-date", expiration_cutoff_date=then) # make it start sooner than usual. lc = ss.lease_checker lc.slow_start = 0 lc.stop_after_first_bucket = True webstatus = StorageStatus(ss) # create a few shares, with some leases on them self.make_shares(ss) [immutable_si_0, immutable_si_1, mutable_si_2, mutable_si_3] = self.sis def count_shares(si): return len(list(ss._iter_share_files(si))) def _get_sharefile(si): return list(ss._iter_share_files(si))[0] def count_leases(si): return len(list(_get_sharefile(si).get_leases())) self.failUnlessEqual(count_shares(immutable_si_0), 1) self.failUnlessEqual(count_leases(immutable_si_0), 1) self.failUnlessEqual(count_shares(immutable_si_1), 1) self.failUnlessEqual(count_leases(immutable_si_1), 2) self.failUnlessEqual(count_shares(mutable_si_2), 1) self.failUnlessEqual(count_leases(mutable_si_2), 1) self.failUnlessEqual(count_shares(mutable_si_3), 1) self.failUnlessEqual(count_leases(mutable_si_3), 2) # artificially crank back the expiration time on the first lease of # each share, to make it look like was renewed 3000s ago. To achieve # this, we need to set the expiration time to now-3000+31days. This # will change when the lease format is improved to contain both # create/renew time and duration. new_expiration_time = now - 3000 + 31*24*60*60 # Some shares have an extra lease which is set to expire at the # default time in 31 days from now (age=31days). We then run the # crawler, which will expire the first lease, making some shares get # deleted and others stay alive (with one remaining lease) sf0 = _get_sharefile(immutable_si_0) self.backdate_lease(sf0, self.renew_secrets[0], new_expiration_time) sf0_size = os.stat(sf0.home).st_size # immutable_si_1 gets an extra lease sf1 = _get_sharefile(immutable_si_1) self.backdate_lease(sf1, self.renew_secrets[1], new_expiration_time) sf2 = _get_sharefile(mutable_si_2) self.backdate_lease(sf2, self.renew_secrets[3], new_expiration_time) sf2_size = os.stat(sf2.home).st_size # mutable_si_3 gets an extra lease sf3 = _get_sharefile(mutable_si_3) self.backdate_lease(sf3, self.renew_secrets[4], new_expiration_time) ss.setServiceParent(self.s) d = fireEventually() # examine the state right after the first bucket has been processed def _after_first_bucket(ignored): p = lc.get_progress() if not p["cycle-in-progress"]: d2 = fireEventually() d2.addCallback(_after_first_bucket) return d2 d.addCallback(_after_first_bucket) d.addCallback(lambda ign: renderDeferred(webstatus)) def _check_html_in_cycle(html): s = remove_tags(html) # the first bucket encountered gets deleted, and its prefix # happens to be about 1/5th of the way through the ring, so the # predictor thinks we'll have 5 shares and that we'll delete them # all. This part of the test depends upon the SIs landing right # where they do now. self.failUnlessIn(b"The remainder of this cycle is expected to " b"recover: 4 shares, 4 buckets", s) self.failUnlessIn(b"The whole cycle is expected to examine " b"5 shares in 5 buckets and to recover: " b"5 shares, 5 buckets", s) d.addCallback(_check_html_in_cycle) # wait for the crawler to finish the first cycle. Two shares should # have been removed def _wait(): return bool(lc.get_state()["last-cycle-finished"] is not None) d.addCallback(lambda ign: self.poll(_wait)) def _after_first_cycle(ignored): self.failUnlessEqual(count_shares(immutable_si_0), 0) self.failUnlessEqual(count_shares(immutable_si_1), 1) self.failUnlessEqual(count_leases(immutable_si_1), 1) self.failUnlessEqual(count_shares(mutable_si_2), 0) self.failUnlessEqual(count_shares(mutable_si_3), 1) self.failUnlessEqual(count_leases(mutable_si_3), 1) s = lc.get_state() last = s["history"]["0"] self.failUnlessEqual(last["expiration-enabled"], True) self.failUnlessEqual(last["configured-expiration-mode"], ["cutoff-date", None, then, ["mutable", "immutable"]]) self.failUnlessEqual(last["leases-per-share-histogram"], {"1": 2, "2": 2}) rec = last["space-recovered"] self.failUnlessEqual(rec["examined-buckets"], 4) self.failUnlessEqual(rec["examined-shares"], 4) self.failUnlessEqual(rec["actual-buckets"], 2) self.failUnlessEqual(rec["original-buckets"], 0) self.failUnlessEqual(rec["configured-buckets"], 2) self.failUnlessEqual(rec["actual-shares"], 2) self.failUnlessEqual(rec["original-shares"], 0) self.failUnlessEqual(rec["configured-shares"], 2) size = sf0_size + sf2_size self.failUnlessEqual(rec["actual-sharebytes"], size) self.failUnlessEqual(rec["original-sharebytes"], 0) self.failUnlessEqual(rec["configured-sharebytes"], size) # different platforms have different notions of "blocks used by # this file", so merely assert that it's a number self.failUnless(rec["actual-diskbytes"] >= 0, rec["actual-diskbytes"]) self.failUnless(rec["original-diskbytes"] >= 0, rec["original-diskbytes"]) self.failUnless(rec["configured-diskbytes"] >= 0, rec["configured-diskbytes"]) d.addCallback(_after_first_cycle) d.addCallback(lambda ign: renderDeferred(webstatus)) def _check_html(html): s = remove_tags(html) self.failUnlessIn(b"Expiration Enabled:" b" expired leases will be removed", s) date = time.strftime( u"%Y-%m-%d (%d-%b-%Y) UTC", time.gmtime(then)).encode("ascii") substr =b"Leases created or last renewed before %s will be considered expired." % date self.failUnlessIn(substr, s) self.failUnlessIn(b" recovered: 2 shares, 2 buckets (1 mutable / 1 immutable), ", s) d.addCallback(_check_html) return d def test_only_immutable(self): basedir = "storage/LeaseCrawler/only_immutable" fileutil.make_dirs(basedir) now = time.time() then = int(now - 2000) ss = StorageServer(basedir, b"\x00" * 20, expiration_enabled=True, expiration_mode="cutoff-date", expiration_cutoff_date=then, expiration_sharetypes=("immutable",)) lc = ss.lease_checker lc.slow_start = 0 webstatus = StorageStatus(ss) self.make_shares(ss) [immutable_si_0, immutable_si_1, mutable_si_2, mutable_si_3] = self.sis # set all leases to be expirable new_expiration_time = now - 3000 + 31*24*60*60 def count_shares(si): return len(list(ss._iter_share_files(si))) def _get_sharefile(si): return list(ss._iter_share_files(si))[0] def count_leases(si): return len(list(_get_sharefile(si).get_leases())) sf0 = _get_sharefile(immutable_si_0) self.backdate_lease(sf0, self.renew_secrets[0], new_expiration_time) sf1 = _get_sharefile(immutable_si_1) self.backdate_lease(sf1, self.renew_secrets[1], new_expiration_time) self.backdate_lease(sf1, self.renew_secrets[2], new_expiration_time) sf2 = _get_sharefile(mutable_si_2) self.backdate_lease(sf2, self.renew_secrets[3], new_expiration_time) sf3 = _get_sharefile(mutable_si_3) self.backdate_lease(sf3, self.renew_secrets[4], new_expiration_time) self.backdate_lease(sf3, self.renew_secrets[5], new_expiration_time) ss.setServiceParent(self.s) def _wait(): return bool(lc.get_state()["last-cycle-finished"] is not None) d = self.poll(_wait) def _after_first_cycle(ignored): self.failUnlessEqual(count_shares(immutable_si_0), 0) self.failUnlessEqual(count_shares(immutable_si_1), 0) self.failUnlessEqual(count_shares(mutable_si_2), 1) self.failUnlessEqual(count_leases(mutable_si_2), 1) self.failUnlessEqual(count_shares(mutable_si_3), 1) self.failUnlessEqual(count_leases(mutable_si_3), 2) d.addCallback(_after_first_cycle) d.addCallback(lambda ign: renderDeferred(webstatus)) def _check_html(html): s = remove_tags(html) self.failUnlessIn(b"The following sharetypes will be expired: immutable.", s) d.addCallback(_check_html) return d def test_only_mutable(self): basedir = "storage/LeaseCrawler/only_mutable" fileutil.make_dirs(basedir) now = time.time() then = int(now - 2000) ss = StorageServer(basedir, b"\x00" * 20, expiration_enabled=True, expiration_mode="cutoff-date", expiration_cutoff_date=then, expiration_sharetypes=("mutable",)) lc = ss.lease_checker lc.slow_start = 0 webstatus = StorageStatus(ss) self.make_shares(ss) [immutable_si_0, immutable_si_1, mutable_si_2, mutable_si_3] = self.sis # set all leases to be expirable new_expiration_time = now - 3000 + 31*24*60*60 def count_shares(si): return len(list(ss._iter_share_files(si))) def _get_sharefile(si): return list(ss._iter_share_files(si))[0] def count_leases(si): return len(list(_get_sharefile(si).get_leases())) sf0 = _get_sharefile(immutable_si_0) self.backdate_lease(sf0, self.renew_secrets[0], new_expiration_time) sf1 = _get_sharefile(immutable_si_1) self.backdate_lease(sf1, self.renew_secrets[1], new_expiration_time) self.backdate_lease(sf1, self.renew_secrets[2], new_expiration_time) sf2 = _get_sharefile(mutable_si_2) self.backdate_lease(sf2, self.renew_secrets[3], new_expiration_time) sf3 = _get_sharefile(mutable_si_3) self.backdate_lease(sf3, self.renew_secrets[4], new_expiration_time) self.backdate_lease(sf3, self.renew_secrets[5], new_expiration_time) ss.setServiceParent(self.s) def _wait(): return bool(lc.get_state()["last-cycle-finished"] is not None) d = self.poll(_wait) def _after_first_cycle(ignored): self.failUnlessEqual(count_shares(immutable_si_0), 1) self.failUnlessEqual(count_leases(immutable_si_0), 1) self.failUnlessEqual(count_shares(immutable_si_1), 1) self.failUnlessEqual(count_leases(immutable_si_1), 2) self.failUnlessEqual(count_shares(mutable_si_2), 0) self.failUnlessEqual(count_shares(mutable_si_3), 0) d.addCallback(_after_first_cycle) d.addCallback(lambda ign: renderDeferred(webstatus)) def _check_html(html): s = remove_tags(html) self.failUnlessIn(b"The following sharetypes will be expired: mutable.", s) d.addCallback(_check_html) return d def test_bad_mode(self): basedir = "storage/LeaseCrawler/bad_mode" fileutil.make_dirs(basedir) e = self.failUnlessRaises(ValueError, StorageServer, basedir, b"\x00" * 20, expiration_mode="bogus") self.failUnlessIn("GC mode 'bogus' must be 'age' or 'cutoff-date'", str(e)) def test_limited_history(self): basedir = "storage/LeaseCrawler/limited_history" fileutil.make_dirs(basedir) ss = StorageServer(basedir, b"\x00" * 20) # make it start sooner than usual. lc = ss.lease_checker lc.slow_start = 0 lc.cpu_slice = 500 # create a few shares, with some leases on them self.make_shares(ss) ss.setServiceParent(self.s) def _wait_until_15_cycles_done(): last = lc.state["last-cycle-finished"] if last is not None and last >= 15: return True if lc.timer: lc.timer.reset(0) return False d = self.poll(_wait_until_15_cycles_done) def _check(ignored): s = lc.get_state() h = s["history"] self.failUnlessEqual(len(h), 10) self.failUnlessEqual(max(int(k) for k in h.keys()), 15) self.failUnlessEqual(min(int(k) for k in h.keys()), 6) d.addCallback(_check) return d def test_unpredictable_future(self): basedir = "storage/LeaseCrawler/unpredictable_future" fileutil.make_dirs(basedir) ss = StorageServer(basedir, b"\x00" * 20) # make it start sooner than usual. lc = ss.lease_checker lc.slow_start = 0 lc.cpu_slice = -1.0 # stop quickly self.make_shares(ss) ss.setServiceParent(self.s) d = fireEventually() def _check(ignored): # this should fire after the first bucket is complete, but before # the first prefix is complete, so the progress-measurer won't # think we've gotten far enough to raise our percent-complete # above 0%, triggering the cannot-predict-the-future code in # expirer.py . This will have to change if/when the # progress-measurer gets smart enough to count buckets (we'll # have to interrupt it even earlier, before it's finished the # first bucket). s = lc.get_state() if "cycle-to-date" not in s: d2 = fireEventually() d2.addCallback(_check) return d2 self.failUnlessIn("cycle-to-date", s) self.failUnlessIn("estimated-remaining-cycle", s) self.failUnlessIn("estimated-current-cycle", s) left = s["estimated-remaining-cycle"]["space-recovered"] self.failUnlessEqual(left["actual-buckets"], None) self.failUnlessEqual(left["original-buckets"], None) self.failUnlessEqual(left["configured-buckets"], None) self.failUnlessEqual(left["actual-shares"], None) self.failUnlessEqual(left["original-shares"], None) self.failUnlessEqual(left["configured-shares"], None) self.failUnlessEqual(left["actual-diskbytes"], None) self.failUnlessEqual(left["original-diskbytes"], None) self.failUnlessEqual(left["configured-diskbytes"], None) self.failUnlessEqual(left["actual-sharebytes"], None) self.failUnlessEqual(left["original-sharebytes"], None) self.failUnlessEqual(left["configured-sharebytes"], None) full = s["estimated-remaining-cycle"]["space-recovered"] self.failUnlessEqual(full["actual-buckets"], None) self.failUnlessEqual(full["original-buckets"], None) self.failUnlessEqual(full["configured-buckets"], None) self.failUnlessEqual(full["actual-shares"], None) self.failUnlessEqual(full["original-shares"], None) self.failUnlessEqual(full["configured-shares"], None) self.failUnlessEqual(full["actual-diskbytes"], None) self.failUnlessEqual(full["original-diskbytes"], None) self.failUnlessEqual(full["configured-diskbytes"], None) self.failUnlessEqual(full["actual-sharebytes"], None) self.failUnlessEqual(full["original-sharebytes"], None) self.failUnlessEqual(full["configured-sharebytes"], None) d.addCallback(_check) return d def test_no_st_blocks(self): basedir = "storage/LeaseCrawler/no_st_blocks" fileutil.make_dirs(basedir) ss = No_ST_BLOCKS_StorageServer(basedir, b"\x00" * 20, expiration_mode="age", expiration_override_lease_duration=-1000) # a negative expiration_time= means the "configured-" # space-recovered counts will be non-zero, since all shares will have # expired by then # make it start sooner than usual. lc = ss.lease_checker lc.slow_start = 0 self.make_shares(ss) ss.setServiceParent(self.s) def _wait(): return bool(lc.get_state()["last-cycle-finished"] is not None) d = self.poll(_wait) def _check(ignored): s = lc.get_state() last = s["history"]["0"] rec = last["space-recovered"] self.failUnlessEqual(rec["configured-buckets"], 4) self.failUnlessEqual(rec["configured-shares"], 4) self.failUnless(rec["configured-sharebytes"] > 0, rec["configured-sharebytes"]) # without the .st_blocks field in os.stat() results, we should be # reporting diskbytes==sharebytes self.failUnlessEqual(rec["configured-sharebytes"], rec["configured-diskbytes"]) d.addCallback(_check) return d def test_share_corruption(self): self._poll_should_ignore_these_errors = [ UnknownMutableContainerVersionError, UnknownImmutableContainerVersionError, ] basedir = "storage/LeaseCrawler/share_corruption" fileutil.make_dirs(basedir) ss = InstrumentedStorageServer(basedir, b"\x00" * 20) w = StorageStatus(ss) # make it start sooner than usual. lc = ss.lease_checker lc.stop_after_first_bucket = True lc.slow_start = 0 lc.cpu_slice = 500 # create a few shares, with some leases on them self.make_shares(ss) # now corrupt one, and make sure the lease-checker keeps going [immutable_si_0, immutable_si_1, mutable_si_2, mutable_si_3] = self.sis first = min(self.sis) first_b32 = base32.b2a(first) fn = os.path.join(ss.sharedir, storage_index_to_dir(first), "0") f = open(fn, "rb+") f.seek(0) f.write(b"BAD MAGIC") f.close() # if get_share_file() doesn't see the correct mutable magic, it # assumes the file is an immutable share, and then # immutable.ShareFile sees a bad version. So regardless of which kind # of share we corrupted, this will trigger an # UnknownImmutableContainerVersionError. # also create an empty bucket empty_si = base32.b2a(b"\x04"*16) empty_bucket_dir = os.path.join(ss.sharedir, storage_index_to_dir(empty_si)) fileutil.make_dirs(empty_bucket_dir) ss.setServiceParent(self.s) d = fireEventually() # now examine the state right after the first bucket has been # processed. def _after_first_bucket(ignored): s = lc.get_state() if "cycle-to-date" not in s: d2 = fireEventually() d2.addCallback(_after_first_bucket) return d2 so_far = s["cycle-to-date"] rec = so_far["space-recovered"] self.failUnlessEqual(rec["examined-buckets"], 1) self.failUnlessEqual(rec["examined-shares"], 0) [(actual_b32, i)] = so_far["corrupt-shares"] actual_b32 = actual_b32.encode("ascii") self.failUnlessEqual((actual_b32, i), (first_b32, 0)) d.addCallback(_after_first_bucket) d.addCallback(lambda ign: renderJSON(w)) def _check_json(raw): data = json.loads(raw) # grr. json turns all dict keys into strings. so_far = data["lease-checker"]["cycle-to-date"] corrupt_shares = so_far["corrupt-shares"] # it also turns all tuples into lists, and result is unicode: [(actual_b32, i)] = corrupt_shares actual_b32 = actual_b32.encode("ascii") self.failUnlessEqual([actual_b32, i], [first_b32, 0]) d.addCallback(_check_json) d.addCallback(lambda ign: renderDeferred(w)) def _check_html(html): s = remove_tags(html) self.failUnlessIn(b"Corrupt shares: SI %s shnum 0" % first_b32, s) d.addCallback(_check_html) def _wait(): return bool(lc.get_state()["last-cycle-finished"] is not None) d.addCallback(lambda ign: self.poll(_wait)) def _after_first_cycle(ignored): s = lc.get_state() last = s["history"]["0"] rec = last["space-recovered"] self.failUnlessEqual(rec["examined-buckets"], 5) self.failUnlessEqual(rec["examined-shares"], 3) [(actual_b32, i)] = last["corrupt-shares"] actual_b32 = actual_b32.encode("ascii") self.failUnlessEqual((actual_b32, i), (first_b32, 0)) d.addCallback(_after_first_cycle) d.addCallback(lambda ign: renderJSON(w)) def _check_json_history(raw): data = json.loads(raw) last = data["lease-checker"]["history"]["0"] [(actual_b32, i)] = last["corrupt-shares"] actual_b32 = actual_b32.encode("ascii") self.failUnlessEqual([actual_b32, i], [first_b32, 0]) d.addCallback(_check_json_history) d.addCallback(lambda ign: renderDeferred(w)) def _check_html_history(html): s = remove_tags(html) self.failUnlessIn(b"Corrupt shares: SI %s shnum 0" % first_b32, s) d.addCallback(_check_html_history) def _cleanup(res): self.flushLoggedErrors(UnknownMutableContainerVersionError, UnknownImmutableContainerVersionError) return res d.addBoth(_cleanup) return d @skipIf(platform.isWindows(), "pickle test-data can't be loaded on windows") def test_deserialize_pickle(self): """ The crawler can read existing state from the old pickle format """ # this file came from an "in the wild" tahoe version 1.16.0 original_pickle = FilePath(__file__).parent().child("data").child("lease_checker.state.txt") root = FilePath(self.mktemp()) storage = root.child("storage") storage.makedirs() test_pickle = storage.child("lease_checker.state") with test_pickle.open("wb") as local, original_pickle.open("rb") as remote: local.write(remote.read()) # convert from pickle format to JSON top = Options() top.parseOptions([ "admin", "migrate-crawler", "--basedir", storage.parent().path, ]) options = top.subOptions while hasattr(options, "subOptions"): options = options.subOptions options.stdout = StringIO() migrate_crawler(options) # the (existing) state file should have been upgraded to JSON self.assertFalse(test_pickle.exists()) self.assertTrue(test_pickle.siblingExtension(".json").exists()) serial = _LeaseStateSerializer(test_pickle.path) self.assertEqual( serial.load(), { u'last-complete-prefix': None, u'version': 1, u'current-cycle-start-time': 1635003106.611748, u'last-cycle-finished': 312, u'cycle-to-date': { u'leases-per-share-histogram': { u'1': 36793, u'2': 1, }, u'space-recovered': { u'examined-buckets-immutable': 17183, u'configured-buckets-mutable': 0, u'examined-shares-mutable': 1796, u'original-shares-mutable': 1563, u'configured-buckets-immutable': 0, u'original-shares-immutable': 27926, u'original-diskbytes-immutable': 431149056, u'examined-shares-immutable': 34998, u'original-buckets': 14661, u'actual-shares-immutable': 0, u'configured-shares': 0, u'original-buckets-mutable': 899, u'actual-diskbytes': 4096, u'actual-shares-mutable': 0, u'configured-buckets': 1, u'examined-buckets-unknown': 14, u'actual-sharebytes': 0, u'original-shares': 29489, u'actual-buckets-immutable': 0, u'original-sharebytes': 312664812, u'examined-sharebytes-immutable': 383801602, u'actual-shares': 0, u'actual-sharebytes-immutable': 0, u'original-diskbytes': 441643008, u'configured-diskbytes-mutable': 0, u'configured-sharebytes-immutable': 0, u'configured-shares-mutable': 0, u'actual-diskbytes-immutable': 0, u'configured-diskbytes-immutable': 0, u'original-diskbytes-mutable': 10489856, u'actual-sharebytes-mutable': 0, u'configured-sharebytes': 0, u'examined-shares': 36794, u'actual-diskbytes-mutable': 0, u'actual-buckets': 1, u'original-buckets-immutable': 13761, u'configured-sharebytes-mutable': 0, u'examined-sharebytes': 390369660, u'original-sharebytes-immutable': 308125753, u'original-sharebytes-mutable': 4539059, u'actual-buckets-mutable': 0, u'examined-buckets-mutable': 1043, u'configured-shares-immutable': 0, u'examined-diskbytes': 476598272, u'examined-diskbytes-mutable': 9154560, u'examined-sharebytes-mutable': 6568058, u'examined-buckets': 18241, u'configured-diskbytes': 4096, u'examined-diskbytes-immutable': 467443712}, u'corrupt-shares': [ [u'2dn6xnlnsqwtnapwxfdivpm3s4', 4], [u'2dn6xnlnsqwtnapwxfdivpm3s4', 1], [u'2rrzthwsrrxolevmwdvbdy3rqi', 4], [u'2rrzthwsrrxolevmwdvbdy3rqi', 1], [u'2skfngcto6h7eqmn4uo7ntk3ne', 4], [u'2skfngcto6h7eqmn4uo7ntk3ne', 1], [u'32d5swqpqx2mwix7xmqzvhdwje', 4], [u'32d5swqpqx2mwix7xmqzvhdwje', 1], [u'5mmayp66yflmpon3o6unsnbaca', 4], [u'5mmayp66yflmpon3o6unsnbaca', 1], [u'6ixhpvbtre7fnrl6pehlrlflc4', 4], [u'6ixhpvbtre7fnrl6pehlrlflc4', 1], [u'ewzhvswjsz4vp2bqkb6mi3bz2u', 4], [u'ewzhvswjsz4vp2bqkb6mi3bz2u', 1], [u'fu7pazf6ogavkqj6z4q5qqex3u', 4], [u'fu7pazf6ogavkqj6z4q5qqex3u', 1], [u'hbyjtqvpcimwxiyqbcbbdn2i4a', 4], [u'hbyjtqvpcimwxiyqbcbbdn2i4a', 1], [u'pmcjbdkbjdl26k3e6yja77femq', 4], [u'pmcjbdkbjdl26k3e6yja77femq', 1], [u'r6swof4v2uttbiiqwj5pi32cm4', 4], [u'r6swof4v2uttbiiqwj5pi32cm4', 1], [u't45v5akoktf53evc2fi6gwnv6y', 4], [u't45v5akoktf53evc2fi6gwnv6y', 1], [u'y6zb4faar3rdvn3e6pfg4wlotm', 4], [u'y6zb4faar3rdvn3e6pfg4wlotm', 1], [u'z3yghutvqoqbchjao4lndnrh3a', 4], [u'z3yghutvqoqbchjao4lndnrh3a', 1], ], u'lease-age-histogram': { "1641600,1728000": 78, "12441600,12528000": 78, "8640000,8726400": 32, "1814400,1900800": 1860, "2764800,2851200": 76, "11491200,11577600": 20, "10713600,10800000": 183, "47865600,47952000": 7, "3110400,3196800": 328, "10627200,10713600": 43, "45619200,45705600": 4, "12873600,12960000": 5, "7430400,7516800": 7228, "1555200,1641600": 492, "38880000,38966400": 3, "12528000,12614400": 193, "7344000,7430400": 12689, "2678400,2764800": 278, "2332800,2419200": 12, "9244800,9331200": 73, "12787200,12873600": 218, "49075200,49161600": 19, "10368000,10454400": 117, "4665600,4752000": 256, "7516800,7603200": 993, "42336000,42422400": 33, "10972800,11059200": 122, "39052800,39139200": 51, "12614400,12700800": 210, "7603200,7689600": 2004, "10540800,10627200": 16, "950400,1036800": 4435, "42076800,42163200": 4, "8812800,8899200": 57, "5788800,5875200": 954, "36374400,36460800": 3, "9331200,9417600": 12, "30499200,30585600": 5, "12700800,12787200": 25, "2073600,2160000": 388, "12960000,13046400": 8, "11923200,12009600": 89, "3369600,3456000": 79, "3196800,3283200": 628, "37497600,37584000": 11, "33436800,33523200": 7, "44928000,45014400": 2, "37929600,38016000": 3, "38966400,39052800": 61, "3283200,3369600": 86, "11750400,11836800": 7, "3801600,3888000": 32, "46310400,46396800": 1, "4838400,4924800": 386, "8208000,8294400": 38, "37411200,37497600": 4, "12009600,12096000": 329, "10454400,10540800": 1239, "40176000,40262400": 1, "3715200,3801600": 104, "44409600,44496000": 13, "38361600,38448000": 5, "12268800,12355200": 2, "28771200,28857600": 6, "41990400,42076800": 10, "2592000,2678400": 40, }, }, 'current-cycle': None, 'last-complete-bucket': None, } ) second_serial = _LeaseStateSerializer(serial._path.path) self.assertEqual( serial.load(), second_serial.load(), ) @skipIf(platform.isWindows(), "pickle test-data can't be loaded on windows") def test_deserialize_history_pickle(self): """ The crawler can read existing history state from the old pickle format """ # this file came from an "in the wild" tahoe version 1.16.0 original_pickle = FilePath(__file__).parent().child("data").child("lease_checker.history.txt") root = FilePath(self.mktemp()) storage = root.child("storage") storage.makedirs() test_pickle = storage.child("lease_checker.history") with test_pickle.open("wb") as local, original_pickle.open("rb") as remote: local.write(remote.read()) # convert from pickle format to JSON top = Options() top.parseOptions([ "admin", "migrate-crawler", "--basedir", storage.parent().path, ]) options = top.subOptions while hasattr(options, "subOptions"): options = options.subOptions options.stdout = StringIO() migrate_crawler(options) serial = _HistorySerializer(test_pickle.path) self.maxDiff = None self.assertEqual( serial.load(), { "363": { 'configured-expiration-mode': ['age', None, None, ['immutable', 'mutable']], 'expiration-enabled': False, 'leases-per-share-histogram': { '1': 39774, }, 'lease-age-histogram': [ [0, 86400, 3125], [345600, 432000, 4175], [950400, 1036800, 141], [1036800, 1123200, 345], [1123200, 1209600, 81], [1296000, 1382400, 1832], [1555200, 1641600, 390], [1728000, 1814400, 12], [2073600, 2160000, 84], [2160000, 2246400, 228], [2246400, 2332800, 75], [2592000, 2678400, 644], [2678400, 2764800, 273], [2764800, 2851200, 94], [2851200, 2937600, 97], [3196800, 3283200, 143], [3283200, 3369600, 48], [4147200, 4233600, 374], [4320000, 4406400, 534], [5270400, 5356800, 1005], [6739200, 6825600, 8704], [6825600, 6912000, 3986], [6912000, 6998400, 7592], [6998400, 7084800, 2607], [7689600, 7776000, 35], [8035200, 8121600, 33], [8294400, 8380800, 54], [8640000, 8726400, 45], [8726400, 8812800, 27], [8812800, 8899200, 12], [9763200, 9849600, 77], [9849600, 9936000, 91], [9936000, 10022400, 1210], [10022400, 10108800, 45], [10108800, 10195200, 186], [10368000, 10454400, 113], [10972800, 11059200, 21], [11232000, 11318400, 5], [11318400, 11404800, 19], [11404800, 11491200, 238], [11491200, 11577600, 159], [11750400, 11836800, 1], [11836800, 11923200, 32], [11923200, 12009600, 192], [12009600, 12096000, 222], [12096000, 12182400, 18], [12182400, 12268800, 224], [12268800, 12355200, 9], [12355200, 12441600, 9], [12441600, 12528000, 10], [12528000, 12614400, 6], [12614400, 12700800, 6], [12700800, 12787200, 18], [12787200, 12873600, 6], [12873600, 12960000, 62], ], 'cycle-start-finish-times': [1634446505.241972, 1634446666.055401], 'space-recovered': { 'examined-buckets-immutable': 17896, 'configured-buckets-mutable': 0, 'examined-shares-mutable': 2473, 'original-shares-mutable': 1185, 'configured-buckets-immutable': 0, 'original-shares-immutable': 27457, 'original-diskbytes-immutable': 2810982400, 'examined-shares-immutable': 37301, 'original-buckets': 14047, 'actual-shares-immutable': 0, 'configured-shares': 0, 'original-buckets-mutable': 691, 'actual-diskbytes': 4096, 'actual-shares-mutable': 0, 'configured-buckets': 1, 'examined-buckets-unknown': 14, 'actual-sharebytes': 0, 'original-shares': 28642, 'actual-buckets-immutable': 0, 'original-sharebytes': 2695552941, 'examined-sharebytes-immutable': 2754798505, 'actual-shares': 0, 'actual-sharebytes-immutable': 0, 'original-diskbytes': 2818981888, 'configured-diskbytes-mutable': 0, 'configured-sharebytes-immutable': 0, 'configured-shares-mutable': 0, 'actual-diskbytes-immutable': 0, 'configured-diskbytes-immutable': 0, 'original-diskbytes-mutable': 7995392, 'actual-sharebytes-mutable': 0, 'configured-sharebytes': 0, 'examined-shares': 39774, 'actual-diskbytes-mutable': 0, 'actual-buckets': 1, 'original-buckets-immutable': 13355, 'configured-sharebytes-mutable': 0, 'examined-sharebytes': 2763646972, 'original-sharebytes-immutable': 2692076909, 'original-sharebytes-mutable': 3476032, 'actual-buckets-mutable': 0, 'examined-buckets-mutable': 1286, 'configured-shares-immutable': 0, 'examined-diskbytes': 2854801408, 'examined-diskbytes-mutable': 12161024, 'examined-sharebytes-mutable': 8848467, 'examined-buckets': 19197, 'configured-diskbytes': 4096, 'examined-diskbytes-immutable': 2842640384 }, 'corrupt-shares': [ ['2dn6xnlnsqwtnapwxfdivpm3s4', 3], ['2dn6xnlnsqwtnapwxfdivpm3s4', 0], ['2rrzthwsrrxolevmwdvbdy3rqi', 3], ['2rrzthwsrrxolevmwdvbdy3rqi', 0], ['2skfngcto6h7eqmn4uo7ntk3ne', 3], ['2skfngcto6h7eqmn4uo7ntk3ne', 0], ['32d5swqpqx2mwix7xmqzvhdwje', 3], ['32d5swqpqx2mwix7xmqzvhdwje', 0], ['5mmayp66yflmpon3o6unsnbaca', 3], ['5mmayp66yflmpon3o6unsnbaca', 0], ['6ixhpvbtre7fnrl6pehlrlflc4', 3], ['6ixhpvbtre7fnrl6pehlrlflc4', 0], ['ewzhvswjsz4vp2bqkb6mi3bz2u', 3], ['ewzhvswjsz4vp2bqkb6mi3bz2u', 0], ['fu7pazf6ogavkqj6z4q5qqex3u', 3], ['fu7pazf6ogavkqj6z4q5qqex3u', 0], ['hbyjtqvpcimwxiyqbcbbdn2i4a', 3], ['hbyjtqvpcimwxiyqbcbbdn2i4a', 0], ['pmcjbdkbjdl26k3e6yja77femq', 3], ['pmcjbdkbjdl26k3e6yja77femq', 0], ['r6swof4v2uttbiiqwj5pi32cm4', 3], ['r6swof4v2uttbiiqwj5pi32cm4', 0], ['t45v5akoktf53evc2fi6gwnv6y', 3], ['t45v5akoktf53evc2fi6gwnv6y', 0], ['y6zb4faar3rdvn3e6pfg4wlotm', 3], ['y6zb4faar3rdvn3e6pfg4wlotm', 0], ['z3yghutvqoqbchjao4lndnrh3a', 3], ['z3yghutvqoqbchjao4lndnrh3a', 0], ] } } ) class WebStatus(unittest.TestCase, pollmixin.PollMixin): def setUp(self): self.s = service.MultiService() self.s.startService() def tearDown(self): return self.s.stopService() def test_no_server(self): w = StorageStatus(None) html = renderSynchronously(w) self.failUnlessIn(b"

No Storage Server Running

", html) def test_status(self): basedir = "storage/WebStatus/status" fileutil.make_dirs(basedir) nodeid = b"\x00" * 20 ss = StorageServer(basedir, nodeid) ss.setServiceParent(self.s) w = StorageStatus(ss, "nickname") d = renderDeferred(w) def _check_html(html): self.failUnlessIn(b"

Storage Server Status

", html) s = remove_tags(html) self.failUnlessIn(b"Server Nickname: nickname", s) self.failUnlessIn(b"Server Nodeid: %s" % base32.b2a(nodeid), s) self.failUnlessIn(b"Accepting new shares: Yes", s) self.failUnlessIn(b"Reserved space: - 0 B (0)", s) d.addCallback(_check_html) d.addCallback(lambda ign: renderJSON(w)) def _check_json(raw): data = json.loads(raw) s = data["stats"] self.failUnlessEqual(s["storage_server.accepting_immutable_shares"], 1) self.failUnlessEqual(s["storage_server.reserved_space"], 0) self.failUnlessIn("bucket-counter", data) self.failUnlessIn("lease-checker", data) d.addCallback(_check_json) return d def test_status_no_disk_stats(self): def call_get_disk_stats(whichdir, reserved_space=0): raise AttributeError() self.patch(fileutil, 'get_disk_stats', call_get_disk_stats) # Some platforms may have no disk stats API. Make sure the code can handle that # (test runs on all platforms). basedir = "storage/WebStatus/status_no_disk_stats" fileutil.make_dirs(basedir) ss = StorageServer(basedir, b"\x00" * 20) ss.setServiceParent(self.s) w = StorageStatus(ss) html = renderSynchronously(w) self.failUnlessIn(b"

Storage Server Status

", html) s = remove_tags(html) self.failUnlessIn(b"Accepting new shares: Yes", s) self.failUnlessIn(b"Total disk space: ?", s) self.failUnlessIn(b"Space Available to Tahoe: ?", s) self.failUnless(ss.get_available_space() is None) def test_status_bad_disk_stats(self): def call_get_disk_stats(whichdir, reserved_space=0): raise OSError() self.patch(fileutil, 'get_disk_stats', call_get_disk_stats) # If the API to get disk stats exists but a call to it fails, then the status should # show that no shares will be accepted, and get_available_space() should be 0. basedir = "storage/WebStatus/status_bad_disk_stats" fileutil.make_dirs(basedir) ss = StorageServer(basedir, b"\x00" * 20) ss.setServiceParent(self.s) w = StorageStatus(ss) html = renderSynchronously(w) self.failUnlessIn(b"

Storage Server Status

", html) s = remove_tags(html) self.failUnlessIn(b"Accepting new shares: No", s) self.failUnlessIn(b"Total disk space: ?", s) self.failUnlessIn(b"Space Available to Tahoe: ?", s) self.failUnlessEqual(ss.get_available_space(), 0) def test_status_right_disk_stats(self): GB = 1000000000 total = 5*GB free_for_root = 4*GB free_for_nonroot = 3*GB reserved = 1*GB basedir = "storage/WebStatus/status_right_disk_stats" fileutil.make_dirs(basedir) ss = StorageServer(basedir, b"\x00" * 20, reserved_space=reserved) expecteddir = ss.sharedir def call_get_disk_stats(whichdir, reserved_space=0): self.failUnlessEqual(whichdir, expecteddir) self.failUnlessEqual(reserved_space, reserved) used = total - free_for_root avail = max(free_for_nonroot - reserved_space, 0) return { 'total': total, 'free_for_root': free_for_root, 'free_for_nonroot': free_for_nonroot, 'used': used, 'avail': avail, } self.patch(fileutil, 'get_disk_stats', call_get_disk_stats) ss.setServiceParent(self.s) w = StorageStatus(ss) html = renderSynchronously(w) self.failUnlessIn(b"

Storage Server Status

", html) s = remove_tags(html) self.failUnlessIn(b"Total disk space: 5.00 GB", s) self.failUnlessIn(b"Disk space used: - 1.00 GB", s) self.failUnlessIn(b"Disk space free (root): 4.00 GB", s) self.failUnlessIn(b"Disk space free (non-root): 3.00 GB", s) self.failUnlessIn(b"Reserved space: - 1.00 GB", s) self.failUnlessIn(b"Space Available to Tahoe: 2.00 GB", s) self.failUnlessEqual(ss.get_available_space(), 2*GB) def test_readonly(self): basedir = "storage/WebStatus/readonly" fileutil.make_dirs(basedir) ss = StorageServer(basedir, b"\x00" * 20, readonly_storage=True) ss.setServiceParent(self.s) w = StorageStatus(ss) html = renderSynchronously(w) self.failUnlessIn(b"

Storage Server Status

", html) s = remove_tags(html) self.failUnlessIn(b"Accepting new shares: No", s) def test_reserved(self): basedir = "storage/WebStatus/reserved" fileutil.make_dirs(basedir) ss = StorageServer(basedir, b"\x00" * 20, reserved_space=10e6) ss.setServiceParent(self.s) w = StorageStatus(ss) html = renderSynchronously(w) self.failUnlessIn(b"

Storage Server Status

", html) s = remove_tags(html) self.failUnlessIn(b"Reserved space: - 10.00 MB (10000000)", s) def test_huge_reserved(self): basedir = "storage/WebStatus/reserved" fileutil.make_dirs(basedir) ss = StorageServer(basedir, b"\x00" * 20, reserved_space=10e6) ss.setServiceParent(self.s) w = StorageStatus(ss) html = renderSynchronously(w) self.failUnlessIn(b"

Storage Server Status

", html) s = remove_tags(html) self.failUnlessIn(b"Reserved space: - 10.00 MB (10000000)", s) def test_util(self): w = StorageStatusElement(None, None) self.failUnlessEqual(w.render_space(None), "?") self.failUnlessEqual(w.render_space(10e6), "10000000") self.failUnlessEqual(w.render_abbrev_space(None), "?") self.failUnlessEqual(w.render_abbrev_space(10e6), "10.00 MB") self.failUnlessEqual(remove_prefix("foo.bar", "foo."), "bar") self.failUnlessEqual(remove_prefix("foo.bar", "baz."), None) tahoe_lafs-1.20.0/src/allmydata/test/test_system.py0000644000000000000000000024743013615410400017304 0ustar00""" Ported to Python 3. """ from __future__ import annotations from past.builtins import chr as byteschr from six import ensure_text import os, re, sys, time, json from typing import Optional from bs4 import BeautifulSoup from twisted.trial import unittest from twisted.internet import defer from allmydata import uri from allmydata.storage.mutable import MutableShareFile from allmydata.storage.immutable import ShareFile from allmydata.storage.server import si_a2b from allmydata.immutable import offloaded, upload from allmydata.immutable.literal import LiteralFileNode from allmydata.immutable.filenode import ImmutableFileNode from allmydata.util import idlib, mathutil from allmydata.util import log, base32 from allmydata.util.encodingutil import quote_output, unicode_to_argv from allmydata.util.fileutil import abspath_expanduser_unicode from allmydata.util.consumer import MemoryConsumer, download_to_data from allmydata.util.deferredutil import async_to_deferred from allmydata.interfaces import IDirectoryNode, IFileNode, \ NoSuchChildError, NoSharesError, SDMF_VERSION, MDMF_VERSION from allmydata.monitor import Monitor from allmydata.mutable.common import NotWriteableError from allmydata.mutable import layout as mutable_layout from allmydata.mutable.publish import MutableData from foolscap.api import DeadReferenceError, fireEventually from twisted.python.failure import Failure from twisted.internet.utils import ( getProcessOutputAndValue, ) from .common_web import do_http as do_http_bytes, Error from .web.common import ( assert_soup_has_tag_with_attributes ) from .common_system import SystemTestMixin from .common_util import run_cli_unicode class RunBinTahoeMixin(object): def run_bintahoe(self, args, stdin=None, python_options:Optional[list[str]]=None, env=None): # test_runner.run_bintahoe has better unicode support but doesn't # support env yet and is also synchronous. If we could get rid of # this in favor of that, though, it would probably be an improvement. if python_options is None: python_options = [] command = sys.executable argv = python_options + ["-b", "-m", "allmydata.scripts.runner"] + args if env is None: env = os.environ d = getProcessOutputAndValue(command, argv, env, stdinBytes=stdin) def fix_signal(result): # Mirror subprocess.Popen.returncode structure (out, err, signal) = result return (out, err, -signal) d.addErrback(fix_signal) return d def run_cli(*args, **kwargs): """ Run a Tahoe-LAFS CLI utility, but inline. Version of run_cli_unicode() that takes any kind of string, and the command-line args inline instead of as verb + list. Backwards compatible version so we don't have to change all the tests that expected this API. """ nodeargs = [ensure_text(a) for a in kwargs.pop("nodeargs", [])] kwargs["nodeargs"] = nodeargs return run_cli_unicode( ensure_text(args[0]), [ensure_text(a) for a in args[1:]], **kwargs) def do_http(*args, **kwargs): """Wrapper for do_http() that returns Unicode.""" return do_http_bytes(*args, **kwargs).addCallback( lambda b: str(b, "utf-8")) LARGE_DATA = b""" This is some data to publish to the remote grid.., which needs to be large enough to not fit inside a LIT uri. """ class CountingDataUploadable(upload.Data): bytes_read = 0 interrupt_after = None interrupt_after_d = None def read(self, length): self.bytes_read += length if self.interrupt_after is not None: if self.bytes_read > self.interrupt_after: self.interrupt_after = None self.interrupt_after_d.callback(self) return upload.Data.read(self, length) class SystemTest(SystemTestMixin, RunBinTahoeMixin, unittest.TestCase): """Foolscap integration-y tests.""" FORCE_FOOLSCAP_FOR_STORAGE = True timeout = 300 @property def basedir(self): return "system/SystemTest/{}-foolscap-{}".format( self.id().split(".")[-1], self.FORCE_FOOLSCAP_FOR_STORAGE ) def test_connections(self): d = self.set_up_nodes() self.extra_node = None d.addCallback(lambda res: self.add_extra_node(self.numclients)) def _check(extra_node): self.extra_node = extra_node for c in self.clients: all_peerids = c.get_storage_broker().get_all_serverids() self.failUnlessEqual(len(all_peerids), self.numclients+1) sb = c.storage_broker permuted_peers = sb.get_servers_for_psi("a") self.failUnlessEqual(len(permuted_peers), self.numclients+1) d.addCallback(_check) def _shutdown_extra_node(res): if self.extra_node: return self.extra_node.stopService() return res d.addBoth(_shutdown_extra_node) return d # test_connections is subsumed by test_upload_and_download, and takes # quite a while to run on a slow machine (because of all the TLS # connections that must be established). If we ever rework the introducer # code to such an extent that we're not sure if it works anymore, we can # reinstate this test until it does. del test_connections def test_upload_and_download_random_key(self): return self._test_upload_and_download(convergence=None) def test_upload_and_download_convergent(self): return self._test_upload_and_download(convergence=b"some convergence string") def _test_upload_and_download(self, convergence): # we use 4000 bytes of data, which will result in about 400k written # to disk among all our simulated nodes DATA = b"Some data to upload\n" * 200 d = self.set_up_nodes() def _check_connections(res): for c in self.clients: c.encoding_params['happy'] = 5 all_peerids = c.get_storage_broker().get_all_serverids() self.failUnlessEqual(len(all_peerids), self.numclients) sb = c.storage_broker permuted_peers = sb.get_servers_for_psi(b"a") self.failUnlessEqual(len(permuted_peers), self.numclients) d.addCallback(_check_connections) def _do_upload(res): log.msg("UPLOADING") u = self.clients[0].getServiceNamed("uploader") self.uploader = u # we crank the max segsize down to 1024b for the duration of this # test, so we can exercise multiple segments. It is important # that this is not a multiple of the segment size, so that the # tail segment is not the same length as the others. This actualy # gets rounded up to 1025 to be a multiple of the number of # required shares (since we use 25 out of 100 FEC). up = upload.Data(DATA, convergence=convergence) up.max_segment_size = 1024 d1 = u.upload(up) return d1 d.addCallback(_do_upload) def _upload_done(results): theuri = results.get_uri() log.msg("upload finished: uri is %r" % (theuri,)) self.uri = theuri assert isinstance(self.uri, bytes), self.uri self.cap = uri.from_string(self.uri) self.n = self.clients[1].create_node_from_uri(self.uri) d.addCallback(_upload_done) def _upload_again(res): # Upload again. If using convergent encryption then this ought to be # short-circuited, however with the way we currently generate URIs # (i.e. because they include the roothash), we have to do all of the # encoding work, and only get to save on the upload part. log.msg("UPLOADING AGAIN") up = upload.Data(DATA, convergence=convergence) up.max_segment_size = 1024 return self.uploader.upload(up) d.addCallback(_upload_again) def _download_to_data(res): log.msg("DOWNLOADING") return download_to_data(self.n) d.addCallback(_download_to_data) def _download_to_data_done(data): log.msg("download finished") self.failUnlessEqual(data, DATA) d.addCallback(_download_to_data_done) def _test_read(res): n = self.clients[1].create_node_from_uri(self.uri) d = download_to_data(n) def _read_done(data): self.failUnlessEqual(data, DATA) d.addCallback(_read_done) d.addCallback(lambda ign: n.read(MemoryConsumer(), offset=1, size=4)) def _read_portion_done(mc): self.failUnlessEqual(b"".join(mc.chunks), DATA[1:1+4]) d.addCallback(_read_portion_done) d.addCallback(lambda ign: n.read(MemoryConsumer(), offset=2, size=None)) def _read_tail_done(mc): self.failUnlessEqual(b"".join(mc.chunks), DATA[2:]) d.addCallback(_read_tail_done) d.addCallback(lambda ign: n.read(MemoryConsumer(), size=len(DATA)+1000)) def _read_too_much(mc): self.failUnlessEqual(b"".join(mc.chunks), DATA) d.addCallback(_read_too_much) return d d.addCallback(_test_read) def _test_bad_read(res): bad_u = uri.from_string_filenode(self.uri) bad_u.key = self.flip_bit(bad_u.key) bad_n = self.clients[1].create_node_from_uri(bad_u.to_string()) # this should cause an error during download d = self.shouldFail2(NoSharesError, "'download bad node'", None, bad_n.read, MemoryConsumer(), offset=2) return d d.addCallback(_test_bad_read) def _download_nonexistent_uri(res): baduri = self.mangle_uri(self.uri) badnode = self.clients[1].create_node_from_uri(baduri) log.msg("about to download non-existent URI", level=log.UNUSUAL, facility="tahoe.tests") d1 = download_to_data(badnode) def _baduri_should_fail(res): log.msg("finished downloading non-existent URI", level=log.UNUSUAL, facility="tahoe.tests") self.failUnless(isinstance(res, Failure)) self.failUnless(res.check(NoSharesError), "expected NoSharesError, got %s" % res) d1.addBoth(_baduri_should_fail) return d1 d.addCallback(_download_nonexistent_uri) # add a new node, which doesn't accept shares, and only uses the # helper for upload. d.addCallback(lambda res: self.add_extra_node(self.numclients, self.helper_furl, add_to_sparent=True)) def _added(extra_node): self.extra_node = extra_node self.extra_node.encoding_params['happy'] = 5 d.addCallback(_added) def _has_helper(): uploader = self.extra_node.getServiceNamed("uploader") furl, connected = uploader.get_helper_info() return connected d.addCallback(lambda ign: self.poll(_has_helper)) HELPER_DATA = b"Data that needs help to upload" * 1000 def _upload_with_helper(res): u = upload.Data(HELPER_DATA, convergence=convergence) d = self.extra_node.upload(u) def _uploaded(results): n = self.clients[1].create_node_from_uri(results.get_uri()) return download_to_data(n) d.addCallback(_uploaded) def _check(newdata): self.failUnlessEqual(newdata, HELPER_DATA) d.addCallback(_check) return d d.addCallback(_upload_with_helper) def _upload_duplicate_with_helper(res): u = upload.Data(HELPER_DATA, convergence=convergence) u.debug_stash_RemoteEncryptedUploadable = True d = self.extra_node.upload(u) def _uploaded(results): n = self.clients[1].create_node_from_uri(results.get_uri()) return download_to_data(n) d.addCallback(_uploaded) def _check(newdata): self.failUnlessEqual(newdata, HELPER_DATA) self.failIf(hasattr(u, "debug_RemoteEncryptedUploadable"), "uploadable started uploading, should have been avoided") d.addCallback(_check) return d if convergence is not None: d.addCallback(_upload_duplicate_with_helper) d.addCallback(fireEventually) def _upload_resumable(res): DATA = b"Data that needs help to upload and gets interrupted" * 1000 u1 = CountingDataUploadable(DATA, convergence=convergence) u2 = CountingDataUploadable(DATA, convergence=convergence) # we interrupt the connection after about 5kB by shutting down # the helper, then restarting it. u1.interrupt_after = 5000 u1.interrupt_after_d = defer.Deferred() bounced_d = defer.Deferred() def _do_bounce(res): d = self.bounce_client(0) d.addBoth(bounced_d.callback) u1.interrupt_after_d.addCallback(_do_bounce) # sneak into the helper and reduce its chunk size, so that our # debug_interrupt will sever the connection on about the fifth # chunk fetched. This makes sure that we've started to write the # new shares before we abandon them, which exercises the # abort/delete-partial-share code. TODO: find a cleaner way to do # this. I know that this will affect later uses of the helper in # this same test run, but I'm not currently worried about it. offloaded.CHKCiphertextFetcher.CHUNK_SIZE = 1000 upload_d = self.extra_node.upload(u1) # The upload will start, and bounce_client() will be called after # about 5kB. bounced_d will fire after bounce_client() finishes # shutting down and restarting the node. d = bounced_d def _bounced(ign): # By this point, the upload should have failed because of the # interruption. upload_d will fire in a moment def _should_not_finish(res): self.fail("interrupted upload should have failed, not" " finished with result %s" % (res,)) def _interrupted(f): f.trap(DeadReferenceError) # make sure we actually interrupted it before finishing # the file self.failUnless(u1.bytes_read < len(DATA), "read %d out of %d total" % (u1.bytes_read, len(DATA))) upload_d.addCallbacks(_should_not_finish, _interrupted) return upload_d d.addCallback(_bounced) def _disconnected(res): # check to make sure the storage servers aren't still hanging # on to the partial share: their incoming/ directories should # now be empty. log.msg("disconnected", level=log.NOISY, facility="tahoe.test.test_system") for i in range(self.numclients): incdir = os.path.join(self.getdir("client%d" % i), "storage", "shares", "incoming") self.failIf(os.path.exists(incdir) and os.listdir(incdir)) d.addCallback(_disconnected) d.addCallback(lambda res: log.msg("wait_for_helper", level=log.NOISY, facility="tahoe.test.test_system")) # then we need to wait for the extra node to reestablish its # connection to the helper. d.addCallback(lambda ign: self.poll(_has_helper)) d.addCallback(lambda res: log.msg("uploading again", level=log.NOISY, facility="tahoe.test.test_system")) d.addCallback(lambda res: self.extra_node.upload(u2)) def _uploaded(results): cap = results.get_uri() log.msg("Second upload complete", level=log.NOISY, facility="tahoe.test.test_system") # this is really bytes received rather than sent, but it's # convenient and basically measures the same thing bytes_sent = results.get_ciphertext_fetched() self.failUnless(isinstance(bytes_sent, int), bytes_sent) # We currently don't support resumption of upload if the data is # encrypted with a random key. (Because that would require us # to store the key locally and re-use it on the next upload of # this file, which isn't a bad thing to do, but we currently # don't do it.) if convergence is not None: # Make sure we did not have to read the whole file the # second time around . self.failUnless(bytes_sent < len(DATA), "resumption didn't save us any work:" " read %r bytes out of %r total" % (bytes_sent, len(DATA))) else: # Make sure we did have to read the whole file the second # time around -- because the one that we partially uploaded # earlier was encrypted with a different random key. self.failIf(bytes_sent < len(DATA), "resumption saved us some work even though we were using random keys:" " read %r bytes out of %r total" % (bytes_sent, len(DATA))) n = self.clients[1].create_node_from_uri(cap) return download_to_data(n) d.addCallback(_uploaded) def _check(newdata): self.failUnlessEqual(newdata, DATA) # If using convergent encryption, then also check that the # helper has removed the temp file from its directories. if convergence is not None: basedir = os.path.join(self.getdir("client0"), "helper") files = os.listdir(os.path.join(basedir, "CHK_encoding")) self.failUnlessEqual(files, []) files = os.listdir(os.path.join(basedir, "CHK_incoming")) self.failUnlessEqual(files, []) d.addCallback(_check) return d d.addCallback(_upload_resumable) def _grab_stats(ignored): stats = self.clients[0].stats_provider.get_stats() s = stats["stats"] self.failUnlessEqual(s["storage_server.accepting_immutable_shares"], 1) c = stats["counters"] self.failUnless("storage_server.allocate" in c) d.addCallback(_grab_stats) return d def _find_all_shares(self, basedir): shares = [] for (dirpath, dirnames, filenames) in os.walk(basedir): if "storage" not in dirpath: continue if not filenames: continue pieces = dirpath.split(os.sep) if (len(pieces) >= 5 and pieces[-4] == "storage" and pieces[-3] == "shares"): # we're sitting in .../storage/shares/$START/$SINDEX , and there # are sharefiles here assert pieces[-5].startswith("client") client_num = int(pieces[-5][-1]) storage_index_s = pieces[-1] storage_index = si_a2b(storage_index_s.encode("ascii")) for sharename in filenames: shnum = int(sharename) filename = os.path.join(dirpath, sharename) data = (client_num, storage_index, filename, shnum) shares.append(data) if not shares: self.fail("unable to find any share files in %s" % basedir) return shares def _corrupt_mutable_share(self, filename, which): msf = MutableShareFile(filename) # Read more than share length: datav = msf.readv([ (0, 10_000_000) ]) final_share = datav[0] assert len(final_share) < 10_000_000 # ought to be truncated pieces = mutable_layout.unpack_share(final_share) (seqnum, root_hash, IV, k, N, segsize, datalen, verification_key, signature, share_hash_chain, block_hash_tree, share_data, enc_privkey) = pieces if which == "seqnum": seqnum = seqnum + 15 elif which == "R": root_hash = self.flip_bit(root_hash) elif which == "IV": IV = self.flip_bit(IV) elif which == "segsize": segsize = segsize + 15 elif which == "pubkey": verification_key = self.flip_bit(verification_key) elif which == "signature": signature = self.flip_bit(signature) elif which == "share_hash_chain": nodenum = list(share_hash_chain.keys())[0] share_hash_chain[nodenum] = self.flip_bit(share_hash_chain[nodenum]) elif which == "block_hash_tree": block_hash_tree[-1] = self.flip_bit(block_hash_tree[-1]) elif which == "share_data": share_data = self.flip_bit(share_data) elif which == "encprivkey": enc_privkey = self.flip_bit(enc_privkey) prefix = mutable_layout.pack_prefix(seqnum, root_hash, IV, k, N, segsize, datalen) final_share = mutable_layout.pack_share(prefix, verification_key, signature, share_hash_chain, block_hash_tree, share_data, enc_privkey) msf.writev( [(0, final_share)], None) def test_mutable_sdmf(self): """SDMF mutables can be uploaded, downloaded, and many other things.""" return self._test_mutable(SDMF_VERSION) def test_mutable_mdmf(self): """MDMF mutables can be uploaded, downloaded, and many other things.""" return self._test_mutable(MDMF_VERSION) def _test_mutable(self, mutable_version): DATA = b"initial contents go here." # 25 bytes % 3 != 0 DATA_uploadable = MutableData(DATA) NEWDATA = b"new contents yay" NEWDATA_uploadable = MutableData(NEWDATA) NEWERDATA = b"this is getting old" * 1_000_000 NEWERDATA_uploadable = MutableData(NEWERDATA) d = self.set_up_nodes() def _create_mutable(res): c = self.clients[0] log.msg("starting create_mutable_file") d1 = c.create_mutable_file(DATA_uploadable, mutable_version) def _done(res): log.msg("DONE: %s" % (res,)) self._mutable_node_1 = res d1.addCallback(_done) return d1 d.addCallback(_create_mutable) @defer.inlineCallbacks def _test_debug(res): # find a share. It is important to run this while there is only # one slot in the grid. shares = self._find_all_shares(self.basedir) (client_num, storage_index, filename, shnum) = shares[0] log.msg("test_system.SystemTest.test_mutable._test_debug using %s" % filename) log.msg(" for clients[%d]" % client_num) rc,output,err = yield run_cli("debug", "dump-share", "--offsets", filename) self.failUnlessEqual(rc, 0) try: share_type = 'SDMF' if mutable_version == SDMF_VERSION else 'MDMF' self.failUnless("Mutable slot found:\n" in output) self.assertIn(f"share_type: {share_type}\n", output) peerid = idlib.nodeid_b2a(self.clients[client_num].nodeid) self.failUnless(" WE for nodeid: %s\n" % peerid in output) self.failUnless(" num_extra_leases: 0\n" in output) self.failUnless(" secrets are for nodeid: %s\n" % peerid in output) self.failUnless(f" {share_type} contents:\n" in output) self.failUnless(" seqnum: 1\n" in output) self.failUnless(" required_shares: 3\n" in output) self.failUnless(" total_shares: 10\n" in output) if mutable_version == SDMF_VERSION: self.failUnless(" segsize: 27\n" in output, (output, filename)) self.failUnless(" datalen: 25\n" in output) # the exact share_hash_chain nodes depends upon the sharenum, # and is more of a hassle to compute than I want to deal with # now self.failUnless(" share_hash_chain: " in output) self.failUnless(" block_hash_tree: 1 nodes\n" in output) if mutable_version == SDMF_VERSION: expected = (" verify-cap: URI:SSK-Verifier:%s:" % str(base32.b2a(storage_index), "ascii")) else: expected = (" verify-cap: URI:MDMF-Verifier:%s" % str(base32.b2a(storage_index), "ascii")) self.assertIn(expected, output) except unittest.FailTest: print() print("dump-share output was:") print(output) raise d.addCallback(_test_debug) # test retrieval # first, let's see if we can use the existing node to retrieve the # contents. This allows it to use the cached pubkey and maybe the # latest-known sharemap. d.addCallback(lambda res: self._mutable_node_1.download_best_version()) def _check_download_1(res): self.failUnlessEqual(res, DATA) # now we see if we can retrieve the data from a new node, # constructed using the URI of the original one. We do this test # on the same client that uploaded the data. uri = self._mutable_node_1.get_uri() log.msg("starting retrieve1") newnode = self.clients[0].create_node_from_uri(uri) newnode_2 = self.clients[0].create_node_from_uri(uri) self.failUnlessIdentical(newnode, newnode_2) return newnode.download_best_version() d.addCallback(_check_download_1) def _check_download_2(res): self.failUnlessEqual(res, DATA) # same thing, but with a different client uri = self._mutable_node_1.get_uri() newnode = self.clients[1].create_node_from_uri(uri) log.msg("starting retrieve2") d1 = newnode.download_best_version() d1.addCallback(lambda res: (res, newnode)) return d1 d.addCallback(_check_download_2) def _check_download_3(res_and_newnode): (res, newnode) = res_and_newnode self.failUnlessEqual(res, DATA) # replace the data log.msg("starting replace1") d1 = newnode.overwrite(NEWDATA_uploadable) d1.addCallback(lambda res: newnode.download_best_version()) return d1 d.addCallback(_check_download_3) def _check_download_4(res): self.failUnlessEqual(res, NEWDATA) # now create an even newer node and replace the data on it. This # new node has never been used for download before. uri = self._mutable_node_1.get_uri() newnode1 = self.clients[2].create_node_from_uri(uri) newnode2 = self.clients[3].create_node_from_uri(uri) self._newnode3 = self.clients[3].create_node_from_uri(uri) log.msg("starting replace2") d1 = newnode1.overwrite(NEWERDATA_uploadable) d1.addCallback(lambda res: newnode2.download_best_version()) return d1 d.addCallback(_check_download_4) def _check_download_5(res): log.msg("finished replace2") self.failUnlessEqual(res, NEWERDATA) d.addCallback(_check_download_5) # The previous checks upload a complete replacement. This uses a # different API that is supposed to do a partial write at an offset. @async_to_deferred async def _check_write_at_offset(newnode): log.msg("writing at offset") start = b"abcdef" expected = b"abXYef" uri = self._mutable_node_1.get_uri() newnode = self.clients[0].create_node_from_uri(uri) await newnode.overwrite(MutableData(start)) version = await newnode.get_mutable_version() await version.update(MutableData(b"XY"), 2) result = await newnode.download_best_version() self.assertEqual(result, expected) # Revert to previous version await newnode.overwrite(MutableData(NEWERDATA)) d.addCallback(_check_write_at_offset) def _corrupt_shares(_res): # run around and flip bits in all but k of the shares, to test # the hash checks shares = self._find_all_shares(self.basedir) ## sort by share number #shares.sort( lambda a,b: cmp(a[3], b[3]) ) where = dict([ (shnum, filename) for (client_num, storage_index, filename, shnum) in shares ]) assert len(where) == 10 # this test is designed for 3-of-10 for shnum, filename in list(where.items()): # shares 7,8,9 are left alone. read will check # (share_hash_chain, block_hash_tree, share_data). New # seqnum+R pairs will trigger a check of (seqnum, R, IV, # segsize, signature). if shnum == 0: # read: this will trigger "pubkey doesn't match # fingerprint". self._corrupt_mutable_share(filename, "pubkey") self._corrupt_mutable_share(filename, "encprivkey") elif shnum == 1: # triggers "signature is invalid" self._corrupt_mutable_share(filename, "seqnum") elif shnum == 2: # triggers "signature is invalid" self._corrupt_mutable_share(filename, "R") elif shnum == 3: # triggers "signature is invalid" self._corrupt_mutable_share(filename, "segsize") elif shnum == 4: self._corrupt_mutable_share(filename, "share_hash_chain") elif shnum == 5: self._corrupt_mutable_share(filename, "block_hash_tree") elif shnum == 6: self._corrupt_mutable_share(filename, "share_data") # other things to correct: IV, signature # 7,8,9 are left alone # note that initial_query_count=5 means that we'll hit the # first 5 servers in effectively random order (based upon # response time), so we won't necessarily ever get a "pubkey # doesn't match fingerprint" error (if we hit shnum>=1 before # shnum=0, we pull the pubkey from there). To get repeatable # specific failures, we need to set initial_query_count=1, # but of course that will change the sequencing behavior of # the retrieval process. TODO: find a reasonable way to make # this a parameter, probably when we expand this test to test # for one failure mode at a time. # when we retrieve this, we should get three signature # failures (where we've mangled seqnum, R, and segsize). The # pubkey mangling if mutable_version == SDMF_VERSION: # TODO Corrupting shares in test_systm doesn't work for MDMF right now d.addCallback(_corrupt_shares) d.addCallback(lambda res: self._newnode3.download_best_version()) d.addCallback(_check_download_5) def _check_empty_file(res): # make sure we can create empty files, this usually screws up the # segsize math d1 = self.clients[2].create_mutable_file(MutableData(b""), mutable_version) d1.addCallback(lambda newnode: newnode.download_best_version()) d1.addCallback(lambda res: self.failUnlessEqual(b"", res)) return d1 d.addCallback(_check_empty_file) d.addCallback(lambda res: self.clients[0].create_dirnode()) def _created_dirnode(dnode): log.msg("_created_dirnode(%s)" % (dnode,)) d1 = dnode.list() d1.addCallback(lambda children: self.failUnlessEqual(children, {})) d1.addCallback(lambda res: dnode.has_child(u"edgar")) d1.addCallback(lambda answer: self.failUnlessEqual(answer, False)) d1.addCallback(lambda res: dnode.set_node(u"see recursive", dnode)) d1.addCallback(lambda res: dnode.has_child(u"see recursive")) d1.addCallback(lambda answer: self.failUnlessEqual(answer, True)) d1.addCallback(lambda res: dnode.build_manifest().when_done()) d1.addCallback(lambda res: self.failUnlessEqual(len(res["manifest"]), 1)) return d1 d.addCallback(_created_dirnode) return d def flip_bit(self, good): return good[:-1] + byteschr(ord(good[-1:]) ^ 0x01) def mangle_uri(self, gooduri): # change the key, which changes the storage index, which means we'll # be asking about the wrong file, so nobody will have any shares u = uri.from_string(gooduri) u2 = uri.CHKFileURI(key=self.flip_bit(u.key), uri_extension_hash=u.uri_extension_hash, needed_shares=u.needed_shares, total_shares=u.total_shares, size=u.size) return u2.to_string() # TODO: add a test which mangles the uri_extension_hash instead, and # should fail due to not being able to get a valid uri_extension block. # Also a test which sneakily mangles the uri_extension block to change # some of the validation data, so it will fail in the post-download phase # when the file's crypttext integrity check fails. Do the same thing for # the key, which should cause the download to fail the post-download # plaintext_hash check. def test_filesystem(self): self.data = LARGE_DATA d = self.set_up_nodes(2) def _new_happy_semantics(ign): for c in self.clients: c.encoding_params['happy'] = 1 d.addCallback(_new_happy_semantics) d.addCallback(self.log, "starting publish") d.addCallback(self._do_publish1) d.addCallback(self._test_runner) d.addCallback(self._do_publish2) # at this point, we have the following filesystem (where "R" denotes # self._root_directory_uri): # R # R/subdir1 # R/subdir1/mydata567 # R/subdir1/subdir2/ # R/subdir1/subdir2/mydata992 d.addCallback(lambda res: self.bounce_client(0)) d.addCallback(self.log, "bounced client0") d.addCallback(self._check_publish1) d.addCallback(self.log, "did _check_publish1") d.addCallback(self._check_publish2) d.addCallback(self.log, "did _check_publish2") d.addCallback(self._do_publish_private) d.addCallback(self.log, "did _do_publish_private") # now we also have (where "P" denotes a new dir): # P/personal/sekrit data # P/s2-rw -> /subdir1/subdir2/ # P/s2-ro -> /subdir1/subdir2/ (read-only) d.addCallback(self._check_publish_private) d.addCallback(self.log, "did _check_publish_private") d.addCallback(self._test_web) d.addCallback(self._test_cli) # P now has four top-level children: # P/personal/sekrit data # P/s2-ro/ # P/s2-rw/ # P/test_put/ (empty) d.addCallback(self._test_checker) return d def _do_publish1(self, res): ut = upload.Data(self.data, convergence=None) c0 = self.clients[0] d = c0.create_dirnode() def _made_root(new_dirnode): self._root_directory_uri = new_dirnode.get_uri() return c0.create_node_from_uri(self._root_directory_uri) d.addCallback(_made_root) d.addCallback(lambda root: root.create_subdirectory(u"subdir1")) def _made_subdir1(subdir1_node): self._subdir1_node = subdir1_node d1 = subdir1_node.add_file(u"mydata567", ut) d1.addCallback(self.log, "publish finished") def _stash_uri(filenode): self.uri = filenode.get_uri() assert isinstance(self.uri, bytes), (self.uri, filenode) d1.addCallback(_stash_uri) return d1 d.addCallback(_made_subdir1) return d def _do_publish2(self, res): ut = upload.Data(self.data, convergence=None) d = self._subdir1_node.create_subdirectory(u"subdir2") d.addCallback(lambda subdir2: subdir2.add_file(u"mydata992", ut)) return d def log(self, res, *args, **kwargs): # print("MSG: %s RES: %s" % (msg, args)) log.msg(*args, **kwargs) return res def _do_publish_private(self, res): self.smalldata = b"sssh, very secret stuff" ut = upload.Data(self.smalldata, convergence=None) d = self.clients[0].create_dirnode() d.addCallback(self.log, "GOT private directory") def _got_new_dir(privnode): rootnode = self.clients[0].create_node_from_uri(self._root_directory_uri) d1 = privnode.create_subdirectory(u"personal") d1.addCallback(self.log, "made P/personal") d1.addCallback(lambda node: node.add_file(u"sekrit data", ut)) d1.addCallback(self.log, "made P/personal/sekrit data") d1.addCallback(lambda res: rootnode.get_child_at_path([u"subdir1", u"subdir2"])) def _got_s2(s2node): d2 = privnode.set_uri(u"s2-rw", s2node.get_uri(), s2node.get_readonly_uri()) d2.addCallback(lambda node: privnode.set_uri(u"s2-ro", s2node.get_readonly_uri(), s2node.get_readonly_uri())) return d2 d1.addCallback(_got_s2) d1.addCallback(lambda res: privnode) return d1 d.addCallback(_got_new_dir) return d def _check_publish1(self, res): # this one uses the iterative API c1 = self.clients[1] d = defer.succeed(c1.create_node_from_uri(self._root_directory_uri)) d.addCallback(self.log, "check_publish1 got /") d.addCallback(lambda root: root.get(u"subdir1")) d.addCallback(lambda subdir1: subdir1.get(u"mydata567")) d.addCallback(lambda filenode: download_to_data(filenode)) d.addCallback(self.log, "get finished") def _get_done(data): self.failUnlessEqual(data, self.data) d.addCallback(_get_done) return d def _check_publish2(self, res): # this one uses the path-based API rootnode = self.clients[1].create_node_from_uri(self._root_directory_uri) d = rootnode.get_child_at_path(u"subdir1") d.addCallback(lambda dirnode: self.failUnless(IDirectoryNode.providedBy(dirnode))) d.addCallback(lambda res: rootnode.get_child_at_path(u"subdir1/mydata567")) d.addCallback(lambda filenode: download_to_data(filenode)) d.addCallback(lambda data: self.failUnlessEqual(data, self.data)) d.addCallback(lambda res: rootnode.get_child_at_path(u"subdir1/mydata567")) def _got_filenode(filenode): fnode = self.clients[1].create_node_from_uri(filenode.get_uri()) assert fnode == filenode d.addCallback(_got_filenode) return d def _check_publish_private(self, resnode): # this one uses the path-based API self._private_node = resnode d = self._private_node.get_child_at_path(u"personal") def _got_personal(personal): self._personal_node = personal return personal d.addCallback(_got_personal) d.addCallback(lambda dirnode: self.failUnless(IDirectoryNode.providedBy(dirnode), dirnode)) def get_path(path): return self._private_node.get_child_at_path(path) d.addCallback(lambda res: get_path(u"personal/sekrit data")) d.addCallback(lambda filenode: download_to_data(filenode)) d.addCallback(lambda data: self.failUnlessEqual(data, self.smalldata)) d.addCallback(lambda res: get_path(u"s2-rw")) d.addCallback(lambda dirnode: self.failUnless(dirnode.is_mutable())) d.addCallback(lambda res: get_path(u"s2-ro")) def _got_s2ro(dirnode): self.failUnless(dirnode.is_mutable(), dirnode) self.failUnless(dirnode.is_readonly(), dirnode) d1 = defer.succeed(None) d1.addCallback(lambda res: dirnode.list()) d1.addCallback(self.log, "dirnode.list") d1.addCallback(lambda res: self.shouldFail2(NotWriteableError, "mkdir(nope)", None, dirnode.create_subdirectory, u"nope")) d1.addCallback(self.log, "doing add_file(ro)") ut = upload.Data(b"I will disappear, unrecorded and unobserved. The tragedy of my demise is made more poignant by its silence, but this beauty is not for you to ever know.", convergence=b"99i-p1x4-xd4-18yc-ywt-87uu-msu-zo -- completely and totally unguessable string (unless you read this)") d1.addCallback(lambda res: self.shouldFail2(NotWriteableError, "add_file(nope)", None, dirnode.add_file, u"hope", ut)) d1.addCallback(self.log, "doing get(ro)") d1.addCallback(lambda res: dirnode.get(u"mydata992")) d1.addCallback(lambda filenode: self.failUnless(IFileNode.providedBy(filenode))) d1.addCallback(self.log, "doing delete(ro)") d1.addCallback(lambda res: self.shouldFail2(NotWriteableError, "delete(nope)", None, dirnode.delete, u"mydata992")) d1.addCallback(lambda res: self.shouldFail2(NotWriteableError, "set_uri(nope)", None, dirnode.set_uri, u"hopeless", self.uri, self.uri)) d1.addCallback(lambda res: self.shouldFail2(NoSuchChildError, "get(missing)", "missing", dirnode.get, u"missing")) personal = self._personal_node d1.addCallback(lambda res: self.shouldFail2(NotWriteableError, "mv from readonly", None, dirnode.move_child_to, u"mydata992", personal, u"nope")) d1.addCallback(self.log, "doing move_child_to(ro)2") d1.addCallback(lambda res: self.shouldFail2(NotWriteableError, "mv to readonly", None, personal.move_child_to, u"sekrit data", dirnode, u"nope")) d1.addCallback(self.log, "finished with _got_s2ro") return d1 d.addCallback(_got_s2ro) def _got_home(dummy): home = self._private_node personal = self._personal_node d1 = defer.succeed(None) d1.addCallback(self.log, "mv 'P/personal/sekrit data' to P/sekrit") d1.addCallback(lambda res: personal.move_child_to(u"sekrit data",home,u"sekrit")) d1.addCallback(self.log, "mv P/sekrit 'P/sekrit data'") d1.addCallback(lambda res: home.move_child_to(u"sekrit", home, u"sekrit data")) d1.addCallback(self.log, "mv 'P/sekret data' P/personal/") d1.addCallback(lambda res: home.move_child_to(u"sekrit data", personal)) d1.addCallback(lambda res: home.build_manifest().when_done()) d1.addCallback(self.log, "manifest") # five items: # P/ # P/personal/ # P/personal/sekrit data # P/s2-rw (same as P/s2-ro) # P/s2-rw/mydata992 (same as P/s2-rw/mydata992) d1.addCallback(lambda res: self.failUnlessEqual(len(res["manifest"]), 5)) d1.addCallback(lambda res: home.start_deep_stats().when_done()) def _check_stats(stats): expected = {"count-immutable-files": 1, "count-mutable-files": 0, "count-literal-files": 1, "count-files": 2, "count-directories": 3, "size-immutable-files": 112, "size-literal-files": 23, #"size-directories": 616, # varies #"largest-directory": 616, "largest-directory-children": 3, "largest-immutable-file": 112, } for k,v in list(expected.items()): self.failUnlessEqual(stats[k], v, "stats[%s] was %s, not %s" % (k, stats[k], v)) self.failUnless(stats["size-directories"] > 1300, stats["size-directories"]) self.failUnless(stats["largest-directory"] > 800, stats["largest-directory"]) self.failUnlessEqual(stats["size-files-histogram"], [ (11, 31, 1), (101, 316, 1) ]) d1.addCallback(_check_stats) return d1 d.addCallback(_got_home) return d def shouldFail(self, res, expected_failure, which, substring=None): if isinstance(res, Failure): res.trap(expected_failure) if substring: self.failUnless(substring in str(res), "substring '%s' not in '%s'" % (substring, str(res))) else: self.fail("%s was supposed to raise %s, not get '%s'" % (which, expected_failure, res)) def shouldFail2(self, expected_failure, which, substring, callable, *args, **kwargs): assert substring is None or isinstance(substring, str) d = defer.maybeDeferred(callable, *args, **kwargs) def done(res): if isinstance(res, Failure): res.trap(expected_failure) if substring: self.failUnless(substring in str(res), "substring '%s' not in '%s'" % (substring, str(res))) else: self.fail("%s was supposed to raise %s, not get '%s'" % (which, expected_failure, res)) d.addBoth(done) return d def PUT(self, urlpath, data): return do_http("put", self.webish_url + urlpath, data=data) def GET(self, urlpath): return do_http("get", self.webish_url + urlpath) def POST(self, urlpath, use_helper=False, **fields): sepbase = b"boogabooga" sep = b"--" + sepbase form = [] form.append(sep) form.append(b'Content-Disposition: form-data; name="_charset"') form.append(b'') form.append(b'UTF-8') form.append(sep) for name, value in fields.items(): if isinstance(value, tuple): filename, value = value form.append(b'Content-Disposition: form-data; name="%s"; ' b'filename="%s"' % (name.encode("utf-8"), filename.encode("utf-8"))) else: form.append(b'Content-Disposition: form-data; name="%s"' % name.encode("utf-8")) form.append(b'') form.append(b"%s" % (value,)) form.append(sep) form[-1] += b"--" body = b"" headers = {} if fields: body = b"\r\n".join(form) + b"\r\n" headers["content-type"] = "multipart/form-data; boundary=%s" % str(sepbase, "ascii") return self.POST2(urlpath, body, headers, use_helper) def POST2(self, urlpath, body=b"", headers=None, use_helper=False): if headers is None: headers = {} if use_helper: url = self.helper_webish_url + urlpath else: url = self.webish_url + urlpath return do_http("post", url, data=body, headers=headers) def _test_web(self, res): public = "uri/" + str(self._root_directory_uri, "ascii") d = self.GET("") def _got_welcome(page): html = page.replace('\n', ' ') connected_re = r'Connected to %d\s*of %d known storage servers' % (self.numclients, self.numclients) self.failUnless(re.search(connected_re, html), "I didn't see the right '%s' message in:\n%s" % (connected_re, page)) # nodeids/tubids don't have any regexp-special characters nodeid_re = r'Node ID:\s*%s' % ( self.clients[0].get_long_tubid(), str(self.clients[0].get_long_nodeid(), "ascii")) self.failUnless(re.search(nodeid_re, html), "I didn't see the right '%s' message in:\n%s" % (nodeid_re, page)) self.failUnless("Helper: 0 active uploads" in page) d.addCallback(_got_welcome) d.addCallback(self.log, "done with _got_welcome") # get the welcome page from the node that uses the helper too d.addCallback(lambda res: do_http("get", self.helper_webish_url)) def _got_welcome_helper(page): soup = BeautifulSoup(page, 'html5lib') assert_soup_has_tag_with_attributes( self, soup, u"img", { u"alt": u"Connected", u"src": u"img/connected-yes.png" } ) self.failUnlessIn("Not running helper", page) d.addCallback(_got_welcome_helper) d.addCallback(lambda res: self.GET(public)) d.addCallback(lambda res: self.GET(public + "/subdir1")) def _got_subdir1(page): # there ought to be an href for our file self.failUnlessIn('%d' % len(self.data), page) self.failUnless(">mydata567" in page) d.addCallback(_got_subdir1) d.addCallback(self.log, "done with _got_subdir1") d.addCallback(lambda res: self.GET(public + "/subdir1/mydata567")) def _got_data(page): self.failUnlessEqual(page.encode("utf-8"), self.data) d.addCallback(_got_data) # download from a URI embedded in a URL d.addCallback(self.log, "_get_from_uri") def _get_from_uri(res): return self.GET("uri/%s?filename=%s" % (str(self.uri, "utf-8"), "mydata567")) d.addCallback(_get_from_uri) def _got_from_uri(page): self.failUnlessEqual(page.encode("utf-8"), self.data) d.addCallback(_got_from_uri) # download from a URI embedded in a URL, second form d.addCallback(self.log, "_get_from_uri2") def _get_from_uri2(res): return self.GET("uri?uri=%s" % (str(self.uri, "utf-8"),)) d.addCallback(_get_from_uri2) d.addCallback(_got_from_uri) # download from a bogus URI, make sure we get a reasonable error d.addCallback(self.log, "_get_from_bogus_uri", level=log.UNUSUAL) @defer.inlineCallbacks def _get_from_bogus_uri(res): d1 = self.GET("uri/%s?filename=%s" % (str(self.mangle_uri(self.uri), "utf-8"), "mydata567")) e = yield self.assertFailure(d1, Error) self.assertEquals(e.status, b"410") d.addCallback(_get_from_bogus_uri) d.addCallback(self.log, "_got_from_bogus_uri", level=log.UNUSUAL) # upload a file with PUT d.addCallback(self.log, "about to try PUT") d.addCallback(lambda res: self.PUT(public + "/subdir3/new.txt", b"new.txt contents")) d.addCallback(lambda res: self.GET(public + "/subdir3/new.txt")) d.addCallback(self.failUnlessEqual, "new.txt contents") # and again with something large enough to use multiple segments, # and hopefully trigger pauseProducing too def _new_happy_semantics(ign): for c in self.clients: # these get reset somewhere? Whatever. c.encoding_params['happy'] = 1 d.addCallback(_new_happy_semantics) d.addCallback(lambda res: self.PUT(public + "/subdir3/big.txt", b"big" * 500000)) # 1.5MB d.addCallback(lambda res: self.GET(public + "/subdir3/big.txt")) d.addCallback(lambda res: self.failUnlessEqual(len(res), 1500000)) # can we replace files in place? d.addCallback(lambda res: self.PUT(public + "/subdir3/new.txt", b"NEWER contents")) d.addCallback(lambda res: self.GET(public + "/subdir3/new.txt")) d.addCallback(self.failUnlessEqual, "NEWER contents") # test unlinked POST d.addCallback(lambda res: self.POST("uri", t=b"upload", file=("new.txt", b"data" * 10000))) # and again using the helper, which exercises different upload-status # display code d.addCallback(lambda res: self.POST("uri", use_helper=True, t=b"upload", file=("foo.txt", b"data2" * 10000))) # check that the status page exists d.addCallback(lambda res: self.GET("status")) def _got_status(res): # find an interesting upload and download to look at. LIT files # are not interesting. h = self.clients[0].get_history() for ds in h.list_all_download_statuses(): if ds.get_size() > 200: self._down_status = ds.get_counter() for us in h.list_all_upload_statuses(): if us.get_size() > 200: self._up_status = us.get_counter() rs = list(h.list_all_retrieve_statuses())[0] self._retrieve_status = rs.get_counter() ps = list(h.list_all_publish_statuses())[0] self._publish_status = ps.get_counter() us = list(h.list_all_mapupdate_statuses())[0] self._update_status = us.get_counter() # and that there are some upload- and download- status pages return self.GET("status/up-%d" % self._up_status) d.addCallback(_got_status) def _got_up(res): return self.GET("status/down-%d" % self._down_status) d.addCallback(_got_up) def _got_down(res): return self.GET("status/mapupdate-%d" % self._update_status) d.addCallback(_got_down) def _got_update(res): return self.GET("status/publish-%d" % self._publish_status) d.addCallback(_got_update) def _got_publish(res): self.failUnlessIn("Publish Results", res) return self.GET("status/retrieve-%d" % self._retrieve_status) d.addCallback(_got_publish) def _got_retrieve(res): self.failUnlessIn("Retrieve Results", res) d.addCallback(_got_retrieve) # check that the helper status page exists d.addCallback(lambda res: self.GET("helper_status")) def _got_helper_status(res): self.failUnless("Bytes Fetched:" in res) # touch a couple of files in the helper's working directory to # exercise more code paths workdir = os.path.join(self.getdir("client0"), "helper") incfile = os.path.join(workdir, "CHK_incoming", "spurious") f = open(incfile, "wb") f.write(b"small file") f.close() then = time.time() - 86400*3 now = time.time() os.utime(incfile, (now, then)) encfile = os.path.join(workdir, "CHK_encoding", "spurious") f = open(encfile, "wb") f.write(b"less small file") f.close() os.utime(encfile, (now, then)) d.addCallback(_got_helper_status) # and that the json form exists d.addCallback(lambda res: self.GET("helper_status?t=json")) def _got_helper_status_json(res): data = json.loads(res) self.failUnlessEqual(data["chk_upload_helper.upload_need_upload"], 1) self.failUnlessEqual(data["chk_upload_helper.incoming_count"], 1) self.failUnlessEqual(data["chk_upload_helper.incoming_size"], 10) self.failUnlessEqual(data["chk_upload_helper.incoming_size_old"], 10) self.failUnlessEqual(data["chk_upload_helper.encoding_count"], 1) self.failUnlessEqual(data["chk_upload_helper.encoding_size"], 15) self.failUnlessEqual(data["chk_upload_helper.encoding_size_old"], 15) d.addCallback(_got_helper_status_json) # and check that client[3] (which uses a helper but does not run one # itself) doesn't explode when you ask for its status d.addCallback(lambda res: do_http("get", self.helper_webish_url + "status/")) def _got_non_helper_status(res): self.failUnlessIn("Recent and Active Operations", res) d.addCallback(_got_non_helper_status) # or for helper status with t=json d.addCallback(lambda res: do_http("get", self.helper_webish_url + "helper_status?t=json")) def _got_non_helper_status_json(res): data = json.loads(res) self.failUnlessEqual(data, {}) d.addCallback(_got_non_helper_status_json) # see if the statistics page exists d.addCallback(lambda res: self.GET("statistics")) def _got_stats(res): self.failUnlessIn("Operational Statistics", res) self.failUnlessIn(' "downloader.files_downloaded": 5,', res) d.addCallback(_got_stats) d.addCallback(lambda res: self.GET("statistics?t=json")) def _got_stats_json(res): data = json.loads(res) self.failUnlessEqual(data["counters"]["uploader.files_uploaded"], 5) self.failUnlessEqual(data["stats"]["chk_upload_helper.upload_need_upload"], 1) d.addCallback(_got_stats_json) # TODO: mangle the second segment of a file, to test errors that # occur after we've already sent some good data, which uses a # different error path. # TODO: download a URI with a form # TODO: create a directory by using a form # TODO: upload by using a form on the directory page # url = base + "somedir/subdir1/freeform_post!!upload" # TODO: delete a file by using a button on the directory page return d @defer.inlineCallbacks def _test_runner(self, res): # exercise some of the diagnostic tools in runner.py # find a share for (dirpath, dirnames, filenames) in os.walk(ensure_text(self.basedir)): if "storage" not in dirpath: continue if not filenames: continue pieces = dirpath.split(os.sep) if (len(pieces) >= 4 and pieces[-4] == "storage" and pieces[-3] == "shares"): # we're sitting in .../storage/shares/$START/$SINDEX , and there # are sharefiles here filename = os.path.join(dirpath, filenames[0]) # peek at the magic to see if it is a chk share with open(filename, "rb") as f: if ShareFile.is_valid_header(f.read(32)): break else: self.fail("unable to find any uri_extension files in %r" % self.basedir) log.msg("test_system.SystemTest._test_runner using %r" % filename) rc,output,err = yield run_cli("debug", "dump-share", "--offsets", unicode_to_argv(filename)) self.failUnlessEqual(rc, 0) # we only upload a single file, so we can assert some things about # its size and shares. self.failUnlessIn("share filename: %s" % quote_output(abspath_expanduser_unicode(filename)), output) self.failUnlessIn("size: %d\n" % len(self.data), output) self.failUnlessIn("num_segments: 1\n", output) # segment_size is always a multiple of needed_shares self.failUnlessIn("segment_size: %d\n" % mathutil.next_multiple(len(self.data), 3), output) self.failUnlessIn("total_shares: 10\n", output) # keys which are supposed to be present for key in ("size", "num_segments", "segment_size", "needed_shares", "total_shares", "codec_name", "codec_params", "tail_codec_params", #"plaintext_hash", "plaintext_root_hash", "crypttext_hash", "crypttext_root_hash", "share_root_hash", "UEB_hash"): self.failUnlessIn("%s: " % key, output) self.failUnlessIn(" verify-cap: URI:CHK-Verifier:", output) # now use its storage index to find the other shares using the # 'find-shares' tool sharedir, shnum = os.path.split(filename) storagedir, storage_index_s = os.path.split(sharedir) nodedirs = [self.getdir("client%d" % i) for i in range(self.numclients)] rc,out,err = yield run_cli("debug", "find-shares", storage_index_s, *nodedirs) self.failUnlessEqual(rc, 0) sharefiles = [sfn.strip() for sfn in out.splitlines()] self.failUnlessEqual(len(sharefiles), 10) # also exercise the 'catalog-shares' tool nodedirs = [self.getdir("client%d" % i) for i in range(self.numclients)] rc,out,err = yield run_cli("debug", "catalog-shares", *nodedirs) self.failUnlessEqual(rc, 0) descriptions = [sfn.strip() for sfn in out.splitlines()] self.failUnlessEqual(len(descriptions), 30) matching = [line for line in descriptions if line.startswith("CHK %s " % storage_index_s)] self.failUnlessEqual(len(matching), 10) def _test_cli(self, res): # run various CLI commands (in a thread, since they use blocking # network calls) private_uri = self._private_node.get_uri() client0_basedir = self.getdir("client0") nodeargs = [ "--node-directory", client0_basedir, ] d = defer.succeed(None) # for compatibility with earlier versions, private/root_dir.cap is # supposed to be treated as an alias named "tahoe:". Start by making # sure that works, before we add other aliases. root_file = os.path.join(client0_basedir, "private", "root_dir.cap") f = open(root_file, "wb") f.write(private_uri) f.close() @defer.inlineCallbacks def run(ignored, verb, *args, **kwargs): rc,out,err = yield run_cli(verb, *args, nodeargs=nodeargs, **kwargs) defer.returnValue((out,err)) def _check_ls(out_and_err, expected_children, unexpected_children=()): (out, err) = out_and_err self.failUnlessEqual(err, "") for s in expected_children: self.failUnless(s in out, (s,out)) for s in unexpected_children: self.failIf(s in out, (s,out)) def _check_ls_root(out_and_err): (out, err) = out_and_err self.failUnless("personal" in out) self.failUnless("s2-ro" in out) self.failUnless("s2-rw" in out) self.failUnlessEqual(err, "") # this should reference private_uri d.addCallback(run, "ls") d.addCallback(_check_ls, ["personal", "s2-ro", "s2-rw"]) d.addCallback(run, "list-aliases") def _check_aliases_1(out_and_err): (out, err) = out_and_err self.failUnlessEqual(err, "") self.failUnlessEqual(out.strip(" \n"), "tahoe: %s" % str(private_uri, "ascii")) d.addCallback(_check_aliases_1) # now that that's out of the way, remove root_dir.cap and work with # new files d.addCallback(lambda res: os.unlink(root_file)) d.addCallback(run, "list-aliases") def _check_aliases_2(out_and_err): (out, err) = out_and_err self.failUnlessEqual(err, "") self.failUnlessEqual(out, "") d.addCallback(_check_aliases_2) d.addCallback(run, "mkdir") def _got_dir(out_and_err ): (out, err) = out_and_err self.failUnless(uri.from_string_dirnode(out.strip())) return out.strip() d.addCallback(_got_dir) d.addCallback(lambda newcap: run(None, "add-alias", "tahoe", newcap)) d.addCallback(run, "list-aliases") def _check_aliases_3(out_and_err): (out, err) = out_and_err self.failUnlessEqual(err, "") self.failUnless("tahoe: " in out) d.addCallback(_check_aliases_3) def _check_empty_dir(out_and_err): (out, err) = out_and_err self.failUnlessEqual(out, "") self.failUnlessEqual(err, "") d.addCallback(run, "ls") d.addCallback(_check_empty_dir) def _check_missing_dir(out_and_err): # TODO: check that rc==2 (out, err) = out_and_err self.failUnlessEqual(out, "") self.failUnlessEqual(err, "No such file or directory\n") d.addCallback(run, "ls", "bogus") d.addCallback(_check_missing_dir) files = [] datas = [] for i in range(10): fn = os.path.join(self.basedir, "file%d" % i) files.append(fn) data = b"data to be uploaded: file%d\n" % i datas.append(data) with open(fn, "wb") as f: f.write(data) def _check_stdout_against(out_and_err, filenum=None, data=None): (out, err) = out_and_err self.failUnlessEqual(err, "") if filenum is not None: self.failUnlessEqual(out, str(datas[filenum], "ascii")) if data is not None: self.failUnlessEqual(out, data) # test all both forms of put: from a file, and from stdin # tahoe put bar FOO d.addCallback(run, "put", files[0], "tahoe-file0") def _put_out(out_and_err): (out, err) = out_and_err self.failUnless("URI:LIT:" in out, out) self.failUnless("201 Created" in err, err) uri0 = out.strip() return run(None, "get", uri0) d.addCallback(_put_out) d.addCallback(lambda out_err: self.failUnlessEqual(out_err[0], str(datas[0], "ascii"))) d.addCallback(run, "put", files[1], "subdir/tahoe-file1") # tahoe put bar tahoe:FOO d.addCallback(run, "put", files[2], "tahoe:file2") d.addCallback(run, "put", "--format=SDMF", files[3], "tahoe:file3") def _check_put_mutable(out_and_err): (out, err) = out_and_err self._mutable_file3_uri = out.strip() d.addCallback(_check_put_mutable) d.addCallback(run, "get", "tahoe:file3") d.addCallback(_check_stdout_against, 3) # tahoe put FOO STDIN_DATA = "This is the file to upload from stdin." d.addCallback(run, "put", "-", "tahoe-file-stdin", stdin=STDIN_DATA) # tahoe put tahoe:FOO d.addCallback(run, "put", "-", "tahoe:from-stdin", stdin="Other file from stdin.") d.addCallback(run, "ls") d.addCallback(_check_ls, ["tahoe-file0", "file2", "file3", "subdir", "tahoe-file-stdin", "from-stdin"]) d.addCallback(run, "ls", "subdir") d.addCallback(_check_ls, ["tahoe-file1"]) # tahoe mkdir FOO d.addCallback(run, "mkdir", "subdir2") d.addCallback(run, "ls") # TODO: extract the URI, set an alias with it d.addCallback(_check_ls, ["subdir2"]) # tahoe get: (to stdin and to a file) d.addCallback(run, "get", "tahoe-file0") d.addCallback(_check_stdout_against, 0) d.addCallback(run, "get", "tahoe:subdir/tahoe-file1") d.addCallback(_check_stdout_against, 1) outfile0 = os.path.join(self.basedir, "outfile0") d.addCallback(run, "get", "file2", outfile0) def _check_outfile0(out_and_err): (out, err) = out_and_err data = open(outfile0,"rb").read() self.failUnlessEqual(data, b"data to be uploaded: file2\n") d.addCallback(_check_outfile0) outfile1 = os.path.join(self.basedir, "outfile0") d.addCallback(run, "get", "tahoe:subdir/tahoe-file1", outfile1) def _check_outfile1(out_and_err): (out, err) = out_and_err data = open(outfile1,"rb").read() self.failUnlessEqual(data, b"data to be uploaded: file1\n") d.addCallback(_check_outfile1) d.addCallback(run, "unlink", "tahoe-file0") d.addCallback(run, "unlink", "tahoe:file2") d.addCallback(run, "ls") d.addCallback(_check_ls, [], ["tahoe-file0", "file2"]) d.addCallback(run, "ls", "-l") def _check_ls_l(out_and_err): (out, err) = out_and_err lines = out.split("\n") for l in lines: if "tahoe-file-stdin" in l: self.failUnless(l.startswith("-r-- "), l) self.failUnless(" %d " % len(STDIN_DATA) in l) if "file3" in l: self.failUnless(l.startswith("-rw- "), l) # mutable d.addCallback(_check_ls_l) d.addCallback(run, "ls", "--uri") def _check_ls_uri(out_and_err): (out, err) = out_and_err lines = out.split("\n") for l in lines: if "file3" in l: self.failUnless(self._mutable_file3_uri in l) d.addCallback(_check_ls_uri) d.addCallback(run, "ls", "--readonly-uri") def _check_ls_rouri(out_and_err): (out, err) = out_and_err lines = out.split("\n") for l in lines: if "file3" in l: rw_uri = self._mutable_file3_uri u = uri.from_string_mutable_filenode(rw_uri) ro_uri = str(u.get_readonly().to_string(), "ascii") self.failUnless(ro_uri in l) d.addCallback(_check_ls_rouri) d.addCallback(run, "mv", "tahoe-file-stdin", "tahoe-moved") d.addCallback(run, "ls") d.addCallback(_check_ls, ["tahoe-moved"], ["tahoe-file-stdin"]) d.addCallback(run, "ln", "tahoe-moved", "newlink") d.addCallback(run, "ls") d.addCallback(_check_ls, ["tahoe-moved", "newlink"]) d.addCallback(run, "cp", "tahoe:file3", "tahoe:file3-copy") d.addCallback(run, "ls") d.addCallback(_check_ls, ["file3", "file3-copy"]) d.addCallback(run, "get", "tahoe:file3-copy") d.addCallback(_check_stdout_against, 3) # copy from disk into tahoe d.addCallback(run, "cp", files[4], "tahoe:file4") d.addCallback(run, "ls") d.addCallback(_check_ls, ["file3", "file3-copy", "file4"]) d.addCallback(run, "get", "tahoe:file4") d.addCallback(_check_stdout_against, 4) # copy from tahoe into disk target_filename = os.path.join(self.basedir, "file-out") d.addCallback(run, "cp", "tahoe:file4", target_filename) def _check_cp_out(out_and_err): (out, err) = out_and_err self.failUnless(os.path.exists(target_filename)) got = open(target_filename,"rb").read() self.failUnlessEqual(got, datas[4]) d.addCallback(_check_cp_out) # copy from disk to disk (silly case) target2_filename = os.path.join(self.basedir, "file-out-copy") d.addCallback(run, "cp", target_filename, target2_filename) def _check_cp_out2(out_and_err): (out, err) = out_and_err self.failUnless(os.path.exists(target2_filename)) got = open(target2_filename,"rb").read() self.failUnlessEqual(got, datas[4]) d.addCallback(_check_cp_out2) # copy from tahoe into disk, overwriting an existing file d.addCallback(run, "cp", "tahoe:file3", target_filename) def _check_cp_out3(out_and_err): (out, err) = out_and_err self.failUnless(os.path.exists(target_filename)) got = open(target_filename,"rb").read() self.failUnlessEqual(got, datas[3]) d.addCallback(_check_cp_out3) # copy from disk into tahoe, overwriting an existing immutable file d.addCallback(run, "cp", files[5], "tahoe:file4") d.addCallback(run, "ls") d.addCallback(_check_ls, ["file3", "file3-copy", "file4"]) d.addCallback(run, "get", "tahoe:file4") d.addCallback(_check_stdout_against, 5) # copy from disk into tahoe, overwriting an existing mutable file d.addCallback(run, "cp", files[5], "tahoe:file3") d.addCallback(run, "ls") d.addCallback(_check_ls, ["file3", "file3-copy", "file4"]) d.addCallback(run, "get", "tahoe:file3") d.addCallback(_check_stdout_against, 5) # recursive copy: setup dn = os.path.join(self.basedir, "dir1") os.makedirs(dn) with open(os.path.join(dn, "rfile1"), "wb") as f: f.write(b"rfile1") with open(os.path.join(dn, "rfile2"), "wb") as f: f.write(b"rfile2") with open(os.path.join(dn, "rfile3"), "wb") as f: f.write(b"rfile3") sdn2 = os.path.join(dn, "subdir2") os.makedirs(sdn2) with open(os.path.join(sdn2, "rfile4"), "wb") as f: f.write(b"rfile4") with open(os.path.join(sdn2, "rfile5"), "wb") as f: f.write(b"rfile5") # from disk into tahoe d.addCallback(run, "cp", "-r", dn, "tahoe:") d.addCallback(run, "ls") d.addCallback(_check_ls, ["dir1"]) d.addCallback(run, "ls", "dir1") d.addCallback(_check_ls, ["rfile1", "rfile2", "rfile3", "subdir2"], ["rfile4", "rfile5"]) d.addCallback(run, "ls", "tahoe:dir1/subdir2") d.addCallback(_check_ls, ["rfile4", "rfile5"], ["rfile1", "rfile2", "rfile3"]) d.addCallback(run, "get", "dir1/subdir2/rfile4") d.addCallback(_check_stdout_against, data="rfile4") # and back out again dn_copy = os.path.join(self.basedir, "dir1-copy") d.addCallback(run, "cp", "--verbose", "-r", "tahoe:dir1", dn_copy) def _check_cp_r_out(out_and_err): (out, err) = out_and_err def _cmp(name): old = open(os.path.join(dn, name), "rb").read() newfn = os.path.join(dn_copy, "dir1", name) self.failUnless(os.path.exists(newfn)) new = open(newfn, "rb").read() self.failUnlessEqual(old, new) _cmp("rfile1") _cmp("rfile2") _cmp("rfile3") _cmp(os.path.join("subdir2", "rfile4")) _cmp(os.path.join("subdir2", "rfile5")) d.addCallback(_check_cp_r_out) # and copy it a second time, which ought to overwrite the same files d.addCallback(run, "cp", "-r", "tahoe:dir1", dn_copy) # and again, only writing filecaps dn_copy2 = os.path.join(self.basedir, "dir1-copy-capsonly") d.addCallback(run, "cp", "-r", "--caps-only", "tahoe:dir1", dn_copy2) def _check_capsonly(out_and_err): # these should all be LITs (out, err) = out_and_err x = open(os.path.join(dn_copy2, "dir1", "subdir2", "rfile4")).read() y = uri.from_string_filenode(x) self.failUnlessEqual(y.data, b"rfile4") d.addCallback(_check_capsonly) # and tahoe-to-tahoe d.addCallback(run, "cp", "-r", "tahoe:dir1", "tahoe:dir1-copy") d.addCallback(run, "ls") d.addCallback(_check_ls, ["dir1", "dir1-copy"]) d.addCallback(run, "ls", "dir1-copy/dir1") d.addCallback(_check_ls, ["rfile1", "rfile2", "rfile3", "subdir2"], ["rfile4", "rfile5"]) d.addCallback(run, "ls", "tahoe:dir1-copy/dir1/subdir2") d.addCallback(_check_ls, ["rfile4", "rfile5"], ["rfile1", "rfile2", "rfile3"]) d.addCallback(run, "get", "dir1-copy/dir1/subdir2/rfile4") d.addCallback(_check_stdout_against, data="rfile4") # and copy it a second time, which ought to overwrite the same files d.addCallback(run, "cp", "-r", "tahoe:dir1", "tahoe:dir1-copy") # tahoe_ls doesn't currently handle the error correctly: it tries to # JSON-parse a traceback. ## def _ls_missing(res): ## argv = nodeargs + ["ls", "bogus"] ## return self._run_cli(argv) ## d.addCallback(_ls_missing) ## def _check_ls_missing((out,err)): ## print("OUT", out) ## print("ERR", err) ## self.failUnlessEqual(err, "") ## d.addCallback(_check_ls_missing) return d # In CI this test can be very slow, so give it a longer timeout: test_filesystem.timeout = 360 # type: ignore[attr-defined] def test_filesystem_with_cli_in_subprocess(self): # We do this in a separate test so that test_filesystem doesn't skip if we can't run bin/tahoe. d = self.set_up_nodes() def _new_happy_semantics(ign): for c in self.clients: c.encoding_params['happy'] = 1 d.addCallback(_new_happy_semantics) def _run_in_subprocess(ignored, verb, *args, **kwargs): stdin = kwargs.get("stdin") # XXX https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3548 env = kwargs.get("env", os.environ) # Python warnings from the child process don't matter. env["PYTHONWARNINGS"] = "ignore" newargs = ["--node-directory", self.getdir("client0"), verb] + list(args) return self.run_bintahoe(newargs, stdin=stdin, env=env) def _check_succeeded(res): out, err, rc_or_sig = res self.failUnlessEqual(rc_or_sig, 0, str(res)) d.addCallback(_run_in_subprocess, "create-alias", "newalias") d.addCallback(_check_succeeded) STDIN_DATA = b"This is the file to upload from stdin." d.addCallback(_run_in_subprocess, "put", "-", "newalias:tahoe-file", stdin=STDIN_DATA) d.addCallback(_check_succeeded) def _mv_with_http_proxy(ign): env = os.environ env['http_proxy'] = env['HTTP_PROXY'] = "http://127.0.0.0:12345" # invalid address return _run_in_subprocess(None, "mv", "newalias:tahoe-file", "newalias:tahoe-moved", env=env) d.addCallback(_mv_with_http_proxy) d.addCallback(_check_succeeded) d.addCallback(_run_in_subprocess, "ls", "newalias:") def _check_ls(res): out, err, rc_or_sig = res self.failUnlessEqual(rc_or_sig, 0, str(res)) self.failUnlessIn(b"tahoe-moved", out) self.failIfIn(b"tahoe-file", out) d.addCallback(_check_ls) return d def _test_checker(self, res): ut = upload.Data(b"too big to be literal" * 200, convergence=None) d = self._personal_node.add_file(u"big file", ut) d.addCallback(lambda res: self._personal_node.check(Monitor())) def _check_dirnode_results(r): self.failUnless(r.is_healthy()) d.addCallback(_check_dirnode_results) d.addCallback(lambda res: self._personal_node.check(Monitor(), verify=True)) d.addCallback(_check_dirnode_results) d.addCallback(lambda res: self._personal_node.get(u"big file")) def _got_chk_filenode(n): self.failUnless(isinstance(n, ImmutableFileNode)) d = n.check(Monitor()) def _check_filenode_results(r): self.failUnless(r.is_healthy()) d.addCallback(_check_filenode_results) d.addCallback(lambda res: n.check(Monitor(), verify=True)) d.addCallback(_check_filenode_results) return d d.addCallback(_got_chk_filenode) d.addCallback(lambda res: self._personal_node.get(u"sekrit data")) def _got_lit_filenode(n): self.failUnless(isinstance(n, LiteralFileNode)) d = n.check(Monitor()) def _check_lit_filenode_results(r): self.failUnlessEqual(r, None) d.addCallback(_check_lit_filenode_results) d.addCallback(lambda res: n.check(Monitor(), verify=True)) d.addCallback(_check_lit_filenode_results) return d d.addCallback(_got_lit_filenode) return d class Connections(SystemTestMixin, unittest.TestCase): FORCE_FOOLSCAP_FOR_STORAGE = True def test_rref(self): # The way the listening port is created is via # SameProcessStreamEndpointAssigner (allmydata.test.common), which then # makes an endpoint string parsed by AdoptedServerPort. The latter does # dup(fd), which results in the filedescriptor staying alive _until the # test ends_. That means that when we disown the service, we still have # the listening port there on the OS level! Just the resulting # connections aren't handled. So this test relies on aggressive # timeouts in the HTTP client and presumably some equivalent in # Foolscap, since connection refused does _not_ happen. self.basedir = "system/Connections/rref-foolscap-{}".format( self.FORCE_FOOLSCAP_FOR_STORAGE ) d = self.set_up_nodes(2) def _start(ign): self.c0 = self.clients[0] nonclients = [s for s in self.c0.storage_broker.get_connected_servers() if s.get_serverid() != self.c0.get_long_nodeid()] self.failUnlessEqual(len(nonclients), 1) self.s1 = nonclients[0] # s1 is the server, not c0 self.s1_storage_server = self.s1.get_storage_server() self.assertIsNot(self.s1_storage_server, None) self.assertTrue(self.s1.is_connected()) d.addCallback(_start) # now shut down the server d.addCallback(lambda ign: self.clients[1].disownServiceParent()) # kill any persistent http connections that might continue to work d.addCallback(lambda ign: self.close_idle_http_connections()) # and wait for the client to notice def _poll(): return len(self.c0.storage_broker.get_connected_servers()) == 1 d.addCallback(lambda ign: self.poll(_poll)) def _down(ign): self.assertFalse(self.s1.is_connected()) storage_server = self.s1.get_storage_server() self.assertIsNot(storage_server, None) self.assertEqual(storage_server, self.s1_storage_server) d.addCallback(_down) return d class HTTPSystemTest(SystemTest): """HTTP storage protocol variant of the system tests.""" FORCE_FOOLSCAP_FOR_STORAGE = False class HTTPConnections(Connections): """HTTP storage protocol variant of the connections tests.""" FORCE_FOOLSCAP_FOR_STORAGE = False tahoe_lafs-1.20.0/src/allmydata/test/test_testing.py0000644000000000000000000001064113615410400017425 0ustar00# -*- coding: utf-8 -*- # Tahoe-LAFS -- secure, distributed storage grid # # Copyright © 2020 The Tahoe-LAFS Software Foundation # # This file is part of Tahoe-LAFS. # # See the docs/about.rst file for licensing information. """ Tests for the allmydata.testing helpers """ from twisted.internet.defer import ( inlineCallbacks, ) from allmydata.uri import ( from_string, CHKFileURI, ) from allmydata.testing.web import ( create_tahoe_treq_client, capability_generator, ) from hyperlink import ( DecodedURL, ) from hypothesis import ( given, ) from hypothesis.strategies import ( binary, ) from .common import ( SyncTestCase, ) from testtools.matchers import ( Always, Equals, IsInstance, MatchesStructure, AfterPreprocessing, Contains, ) from testtools.twistedsupport import ( succeeded, ) from twisted.web.http import GONE class FakeWebTest(SyncTestCase): """ Test the WebUI verified-fakes infrastucture """ # Note: do NOT use setUp() because Hypothesis doesn't work # properly with it. You must instead do all fixture-type work # yourself in each test. @given( content=binary(), ) def test_create_and_download(self, content): """ Upload some content (via 'PUT /uri') and then download it (via 'GET /uri?uri=...') """ http_client = create_tahoe_treq_client() @inlineCallbacks def do_test(): resp = yield http_client.put("http://example.com/uri", content) self.assertThat(resp.code, Equals(201)) cap_raw = yield resp.content() cap = from_string(cap_raw) self.assertThat(cap, IsInstance(CHKFileURI)) resp = yield http_client.get( b"http://example.com/uri?uri=" + cap.to_string() ) self.assertThat(resp.code, Equals(200)) round_trip_content = yield resp.content() # using the form "/uri/" is also valid resp = yield http_client.get( b"http://example.com/uri?uri=" + cap.to_string() ) self.assertEqual(resp.code, 200) round_trip_content = yield resp.content() self.assertEqual(content, round_trip_content) self.assertThat( do_test(), succeeded(Always()), ) @given( content=binary(), ) def test_duplicate_upload(self, content): """ Upload the same content (via 'PUT /uri') twice """ http_client = create_tahoe_treq_client() @inlineCallbacks def do_test(): resp = yield http_client.put("http://example.com/uri", content) self.assertEqual(resp.code, 201) cap_raw = yield resp.content() self.assertThat( cap_raw, AfterPreprocessing( from_string, IsInstance(CHKFileURI) ) ) resp = yield http_client.put("http://example.com/uri", content) self.assertThat(resp.code, Equals(200)) self.assertThat( do_test(), succeeded(Always()), ) def test_download_missing(self): """ The response to a request to download a capability that doesn't exist is 410 (GONE). """ http_client = create_tahoe_treq_client() cap_gen = capability_generator(b"URI:CHK:") cap = next(cap_gen).decode('ascii') uri = DecodedURL.from_text(u"http://example.com/uri?uri={}".format(cap)) resp = http_client.get(uri.to_uri().to_text()) self.assertThat( resp, succeeded( MatchesStructure( code=Equals(GONE), content=AfterPreprocessing( lambda m: m(), succeeded(Contains(b"No data for")), ), ) ) ) def test_download_no_arg(self): """ Error if we GET from "/uri" with no ?uri= query-arg """ http_client = create_tahoe_treq_client() uri = DecodedURL.from_text(u"http://example.com/uri/") resp = http_client.get(uri.to_uri().to_text()) self.assertThat( resp, succeeded( MatchesStructure( code=Equals(400) ) ) ) tahoe_lafs-1.20.0/src/allmydata/test/test_time_format.py0000644000000000000000000001554713615410400020270 0ustar00""" Tests for allmydata.util.time_format. """ import time from twisted.trial import unittest from allmydata.test.common_util import TimezoneMixin from allmydata.util import time_format class TimeFormat(unittest.TestCase, TimezoneMixin): def test_epoch(self): return self._help_test_epoch() def test_epoch_in_London(self): # Europe/London is a particularly troublesome timezone. Nowadays, its # offset from GMT is 0. But in 1970, its offset from GMT was 1. # (Apparently in 1970 Britain had redefined standard time to be GMT+1 # and stayed in standard time all year round, whereas today # Europe/London standard time is GMT and Europe/London Daylight # Savings Time is GMT+1.) The current implementation of # time_format.iso_utc_time_to_localseconds() breaks if the timezone is # Europe/London. (As soon as this unit test is done then I'll change # that implementation to something that works even in this case...) if not self.have_working_tzset(): raise unittest.SkipTest("This test can't be run on a platform without time.tzset().") self.setTimezone("Europe/London") return self._help_test_epoch() def _help_test_epoch(self): origtzname = time.tzname s = time_format.iso_utc_time_to_seconds("1970-01-01T00:00:01") self.failUnlessEqual(s, 1.0) s = time_format.iso_utc_time_to_seconds("1970-01-01_00:00:01") self.failUnlessEqual(s, 1.0) s = time_format.iso_utc_time_to_seconds("1970-01-01 00:00:01") self.failUnlessEqual(s, 1.0) self.failUnlessEqual(time_format.iso_utc(1.0), "1970-01-01_00:00:01") self.failUnlessEqual(time_format.iso_utc(1.0, sep=" "), "1970-01-01 00:00:01") now = time.time() isostr = time_format.iso_utc(now) timestamp = time_format.iso_utc_time_to_seconds(isostr) self.failUnlessEqual(int(timestamp), int(now)) def my_time(): return 1.0 self.failUnlessEqual(time_format.iso_utc(t=my_time), "1970-01-01_00:00:01") e = self.failUnlessRaises(ValueError, time_format.iso_utc_time_to_seconds, "invalid timestring") self.failUnless("not a complete ISO8601 timestamp" in str(e)) s = time_format.iso_utc_time_to_seconds("1970-01-01_00:00:01.500") self.failUnlessEqual(s, 1.5) # Look for daylight-savings-related errors. thatmomentinmarch = time_format.iso_utc_time_to_seconds("2009-03-20 21:49:02.226536") self.failUnlessEqual(thatmomentinmarch, 1237585742.226536) self.failUnlessEqual(origtzname, time.tzname) def test_iso_utc(self): when = 1266760143.7841301 out = time_format.iso_utc_date(when) self.failUnlessEqual(out, "2010-02-21") out = time_format.iso_utc_date(t=lambda: when) self.failUnlessEqual(out, "2010-02-21") out = time_format.iso_utc(when) self.failUnlessEqual(out, "2010-02-21_13:49:03.784130") out = time_format.iso_utc(when, sep="-") self.failUnlessEqual(out, "2010-02-21-13:49:03.784130") def test_parse_duration(self): p = time_format.parse_duration DAY = 24*60*60 MONTH = 31*DAY YEAR = 365*DAY self.failUnlessEqual(p("1 day"), DAY) self.failUnlessEqual(p("2 days"), 2*DAY) self.failUnlessEqual(p("3 months"), 3*MONTH) self.failUnlessEqual(p("4 mo"), 4*MONTH) self.failUnlessEqual(p("5 years"), 5*YEAR) e = self.failUnlessRaises(ValueError, p, "123") self.failUnlessIn("no unit (like day, month, or year) in '123'", str(e)) self.failUnlessEqual(p("7days"), 7*DAY) self.failUnlessEqual(p("31day"), 31*DAY) self.failUnlessEqual(p("60 days"), 60*DAY) self.failUnlessEqual(p("2mo"), 2*MONTH) self.failUnlessEqual(p("3 month"), 3*MONTH) self.failUnlessEqual(p("2years"), 2*YEAR) e = self.failUnlessRaises(ValueError, p, "2kumquats") self.failUnlessIn("no unit (like day, month, or year) in '2kumquats'", str(e)) def test_parse_date(self): p = time_format.parse_date self.failUnlessEqual(p("2010-02-21"), 1266710400) self.failUnless(isinstance(p("2009-03-18"), int), p("2009-03-18")) self.failUnlessEqual(p("2009-03-18"), 1237334400) def test_format_time(self): self.failUnlessEqual(time_format.format_time(time.gmtime(0)), '1970-01-01 00:00:00') self.failUnlessEqual(time_format.format_time(time.gmtime(60)), '1970-01-01 00:01:00') self.failUnlessEqual(time_format.format_time(time.gmtime(60*60)), '1970-01-01 01:00:00') seconds_per_day = 60*60*24 leap_years_1970_to_2014_inclusive = ((2012 - 1968) // 4) self.failUnlessEqual(time_format.format_time(time.gmtime(seconds_per_day*((2015 - 1970)*365+leap_years_1970_to_2014_inclusive))), '2015-01-01 00:00:00') def test_format_time_y2038(self): seconds_per_day = 60*60*24 leap_years_1970_to_2047_inclusive = ((2044 - 1968) // 4) t = (seconds_per_day* ((2048 - 1970)*365 + leap_years_1970_to_2047_inclusive)) try: gm_t = time.gmtime(t) except ValueError: raise unittest.SkipTest("Note: this system cannot handle dates after 2037.") self.failUnlessEqual(time_format.format_time(gm_t), '2048-01-01 00:00:00') def test_format_delta(self): time_1 = 1389812723 time_5s_delta = 1389812728 time_28m7s_delta = 1389814410 time_1h_delta = 1389816323 time_1d21h46m49s_delta = 1389977532 self.failUnlessEqual( time_format.format_delta(time_1, time_1), '0s') self.failUnlessEqual( time_format.format_delta(time_1, time_5s_delta), '5s') self.failUnlessEqual( time_format.format_delta(time_1, time_28m7s_delta), '28m 7s') self.failUnlessEqual( time_format.format_delta(time_1, time_1h_delta), '1h 0m 0s') self.failUnlessEqual( time_format.format_delta(time_1, time_1d21h46m49s_delta), '1d 21h 46m 49s') self.failUnlessEqual( time_format.format_delta(time_1d21h46m49s_delta, time_1), '-') # time_1 with a decimal fraction will make the delta 1s less time_1decimal = 1389812723.383963 self.failUnlessEqual( time_format.format_delta(time_1decimal, time_5s_delta), '4s') self.failUnlessEqual( time_format.format_delta(time_1decimal, time_28m7s_delta), '28m 6s') self.failUnlessEqual( time_format.format_delta(time_1decimal, time_1h_delta), '59m 59s') self.failUnlessEqual( time_format.format_delta(time_1decimal, time_1d21h46m49s_delta), '1d 21h 46m 48s') tahoe_lafs-1.20.0/src/allmydata/test/test_tor_provider.py0000644000000000000000000006445113615410400020476 0ustar00""" Ported to Python 3. """ import os from twisted.trial import unittest from twisted.internet import defer, error from io import StringIO from unittest import mock from ..util import tor_provider from ..scripts import create_node, runner from foolscap.eventual import flushEventualQueue def mock_txtorcon(txtorcon): return mock.patch("allmydata.util.tor_provider._import_txtorcon", return_value=txtorcon) def mock_tor(tor): return mock.patch("allmydata.util.tor_provider._import_tor", return_value=tor) def make_cli_config(basedir, *argv): parent = runner.Options() cli_config = create_node.CreateNodeOptions() cli_config.parent = parent cli_config.parseOptions(argv) cli_config["basedir"] = basedir cli_config.stdout = StringIO() return cli_config class TryToConnect(unittest.TestCase): def test_try(self): reactor = object() txtorcon = mock.Mock() tor_state = object() d = defer.succeed(tor_state) txtorcon.build_tor_connection = mock.Mock(return_value=d) ep = object() stdout = StringIO() with mock.patch("allmydata.util.tor_provider.clientFromString", return_value=ep) as cfs: d = tor_provider._try_to_connect(reactor, "desc", stdout, txtorcon) r = self.successResultOf(d) self.assertIs(r, tor_state) cfs.assert_called_with(reactor, "desc") txtorcon.build_tor_connection.assert_called_with(ep) def test_try_handled_error(self): reactor = object() txtorcon = mock.Mock() d = defer.fail(error.ConnectError("oops")) txtorcon.build_tor_connection = mock.Mock(return_value=d) ep = object() stdout = StringIO() with mock.patch("allmydata.util.tor_provider.clientFromString", return_value=ep) as cfs: d = tor_provider._try_to_connect(reactor, "desc", stdout, txtorcon) r = self.successResultOf(d) self.assertIs(r, None) cfs.assert_called_with(reactor, "desc") txtorcon.build_tor_connection.assert_called_with(ep) self.assertEqual(stdout.getvalue(), "Unable to reach Tor at 'desc': " "An error occurred while connecting: oops.\n") def test_try_unhandled_error(self): reactor = object() txtorcon = mock.Mock() d = defer.fail(ValueError("oops")) txtorcon.build_tor_connection = mock.Mock(return_value=d) ep = object() stdout = StringIO() with mock.patch("allmydata.util.tor_provider.clientFromString", return_value=ep) as cfs: d = tor_provider._try_to_connect(reactor, "desc", stdout, txtorcon) f = self.failureResultOf(d) self.assertIsInstance(f.value, ValueError) self.assertEqual(str(f.value), "oops") cfs.assert_called_with(reactor, "desc") txtorcon.build_tor_connection.assert_called_with(ep) self.assertEqual(stdout.getvalue(), "") class LaunchTor(unittest.TestCase): def _do_test_launch(self, tor_executable): reactor = object() private_dir = "private" txtorcon = mock.Mock() tor = mock.Mock txtorcon.launch = mock.Mock(return_value=tor) with mock.patch("allmydata.util.tor_provider.allocate_tcp_port", return_value=999999): d = tor_provider._launch_tor(reactor, tor_executable, private_dir, txtorcon) tor_control_endpoint, tor_result = self.successResultOf(d) self.assertIs(tor_result, tor) def test_launch(self): return self._do_test_launch(None) def test_launch_executable(self): return self._do_test_launch("mytor") class ConnectToTor(unittest.TestCase): def _do_test_connect(self, endpoint, reachable): reactor = object() txtorcon = object() args = [] if endpoint: args = ["--tor-control-port=%s" % endpoint] cli_config = make_cli_config("basedir", "--listen=tor", *args) stdout = cli_config.stdout expected_port = "tcp:127.0.0.1:9151" if endpoint: expected_port = endpoint tor_state = mock.Mock tor_state.protocol = object() tried = [] def _try_to_connect(reactor, port, stdout, txtorcon): tried.append( (reactor, port, stdout, txtorcon) ) if not reachable: return defer.succeed(None) if port == expected_port: # second one on the list return defer.succeed(tor_state) return defer.succeed(None) with mock.patch("allmydata.util.tor_provider._try_to_connect", _try_to_connect): d = tor_provider._connect_to_tor(reactor, cli_config, txtorcon) if not reachable: f = self.failureResultOf(d) self.assertIsInstance(f.value, ValueError) self.assertEqual(str(f.value), "unable to reach any default Tor control port") return successful_port, tor_control_proto = self.successResultOf(d) self.assertEqual(successful_port, expected_port) self.assertIs(tor_control_proto, tor_state.protocol) expected = [(reactor, "unix:/var/run/tor/control", stdout, txtorcon), (reactor, "tcp:127.0.0.1:9051", stdout, txtorcon), (reactor, "tcp:127.0.0.1:9151", stdout, txtorcon), ] if endpoint: expected = [(reactor, endpoint, stdout, txtorcon)] self.assertEqual(tried, expected) def test_connect(self): return self._do_test_connect(None, True) def test_connect_endpoint(self): return self._do_test_connect("tcp:other:port", True) def test_connect_unreachable(self): return self._do_test_connect(None, False) class FakeTor: """Pretends to be a ``txtorcon.Tor`` instance.""" def __init__(self): self.protocol = object() class CreateOnion(unittest.TestCase): def test_no_txtorcon(self): with mock.patch("allmydata.util.tor_provider._import_txtorcon", return_value=None): d = tor_provider.create_config("reactor", "cli_config") f = self.failureResultOf(d) self.assertIsInstance(f.value, ValueError) self.assertEqual(str(f.value), "Cannot create onion without txtorcon. " "Please 'pip install tahoe-lafs[tor]' to fix this.") def _do_test_launch(self, executable): basedir = self.mktemp() os.mkdir(basedir) private_dir = os.path.join(basedir, "private") os.mkdir(private_dir) reactor = object() args = ["--listen=tor", "--tor-launch"] if executable: args.append("--tor-executable=%s" % executable) cli_config = make_cli_config(basedir, *args) tor_instance = FakeTor() launch_tor = mock.Mock(return_value=defer.succeed(("control_endpoint", tor_instance))) txtorcon = mock.Mock() ehs = mock.Mock() # This appears to be a native string in the real txtorcon object... ehs.private_key = "privkey" ehs.hostname = "ONION.onion" txtorcon.EphemeralHiddenService = mock.Mock(return_value=ehs) ehs.add_to_tor = mock.Mock(return_value=defer.succeed(None)) ehs.remove_from_tor = mock.Mock(return_value=defer.succeed(None)) with mock_txtorcon(txtorcon): with mock.patch("allmydata.util.tor_provider._launch_tor", launch_tor): with mock.patch("allmydata.util.tor_provider.allocate_tcp_port", return_value=999999): d = tor_provider.create_config(reactor, cli_config) tor_config = self.successResultOf(d) launch_tor.assert_called_with(reactor, executable, os.path.abspath(private_dir), txtorcon) txtorcon.EphemeralHiddenService.assert_called_with("3457 127.0.0.1:999999") ehs.add_to_tor.assert_called_with(tor_instance.protocol) ehs.remove_from_tor.assert_called_with(tor_instance.protocol) expected = {"launch": "true", "onion": "true", "onion.local_port": "999999", "onion.external_port": "3457", "onion.private_key_file": os.path.join("private", "tor_onion.privkey"), } if executable: expected["tor.executable"] = executable self.assertEqual(dict(tor_config.node_config["tor"]), expected) self.assertEqual(tor_config.tub_ports, ["tcp:999999:interface=127.0.0.1"]) self.assertEqual(tor_config.tub_locations, ["tor:ONION.onion:3457"]) fn = os.path.join(basedir, dict(tor_config.node_config["tor"])["onion.private_key_file"]) with open(fn, "rb") as f: privkey = f.read() self.assertEqual(privkey, b"privkey") def test_launch(self): return self._do_test_launch(None) def test_launch_executable(self): return self._do_test_launch("mytor") def test_control_endpoint(self): basedir = self.mktemp() os.mkdir(basedir) private_dir = os.path.join(basedir, "private") os.mkdir(private_dir) reactor = object() cli_config = make_cli_config(basedir, "--listen=tor") protocol = object() connect_to_tor = mock.Mock(return_value=defer.succeed(("goodport", protocol))) txtorcon = mock.Mock() ehs = mock.Mock() ehs.private_key = b"privkey" ehs.hostname = "ONION.onion" txtorcon.EphemeralHiddenService = mock.Mock(return_value=ehs) ehs.add_to_tor = mock.Mock(return_value=defer.succeed(None)) ehs.remove_from_tor = mock.Mock(return_value=defer.succeed(None)) with mock_txtorcon(txtorcon): with mock.patch("allmydata.util.tor_provider._connect_to_tor", connect_to_tor): with mock.patch("allmydata.util.tor_provider.allocate_tcp_port", return_value=999999): d = tor_provider.create_config(reactor, cli_config) tor_config = self.successResultOf(d) connect_to_tor.assert_called_with(reactor, cli_config, txtorcon) txtorcon.EphemeralHiddenService.assert_called_with("3457 127.0.0.1:999999") ehs.add_to_tor.assert_called_with(protocol) ehs.remove_from_tor.assert_called_with(protocol) expected = {"control.port": "goodport", "onion": "true", "onion.local_port": "999999", "onion.external_port": "3457", "onion.private_key_file": os.path.join("private", "tor_onion.privkey"), } self.assertEqual(dict(tor_config.node_config["tor"]), expected) self.assertEqual(tor_config.tub_ports, ["tcp:999999:interface=127.0.0.1"]) self.assertEqual(tor_config.tub_locations, ["tor:ONION.onion:3457"]) fn = os.path.join(basedir, dict(tor_config.node_config["tor"])["onion.private_key_file"]) with open(fn, "rb") as f: privkey = f.read() self.assertEqual(privkey, b"privkey") _None = object() class FakeConfig(dict): def get_config(self, section, option, default=_None, boolean=False): if section != "tor": raise ValueError(section) value = self.get(option, default) if value is _None: raise KeyError return value def get_config_path(self, *args): return os.path.join(self.get("basedir", "basedir"), *args) class EmptyContext(object): def __init__(self): pass def __enter__(self): pass def __exit__(self, type, value, traceback): pass class Provider(unittest.TestCase): def test_build(self): tor_provider.create("reactor", FakeConfig()) def test_handler_disabled(self): p = tor_provider.create("reactor", FakeConfig(enabled=False)) self.assertEqual(p.get_tor_handler(), None) def test_handler_no_tor(self): with mock_tor(None): p = tor_provider.create("reactor", FakeConfig()) self.assertEqual(p.get_tor_handler(), None) def test_handler_launch_no_txtorcon(self): with mock_txtorcon(None): p = tor_provider.create("reactor", FakeConfig(launch=True)) self.assertEqual(p.get_tor_handler(), None) @defer.inlineCallbacks def test_handler_launch(self): reactor = object() tor = mock.Mock() txtorcon = mock.Mock() handler = object() tor.control_endpoint_maker = mock.Mock(return_value=handler) tor.add_context = mock.Mock(return_value=EmptyContext()) with mock_tor(tor): with mock_txtorcon(txtorcon): p = tor_provider.create(reactor, FakeConfig(launch=True)) h = p.get_tor_handler() self.assertIs(h, handler) tor.control_endpoint_maker.assert_called_with(p._make_control_endpoint, takes_status=True) # make sure Tor is launched just once, the first time an endpoint is # requested, and never again. The clientFromString() function is # called once each time. ep_desc = object() launch_tor = mock.Mock(return_value=defer.succeed((ep_desc,None))) ep = object() cfs = mock.Mock(return_value=ep) with mock.patch("allmydata.util.tor_provider._launch_tor", launch_tor): with mock.patch("allmydata.util.tor_provider.clientFromString", cfs): d = p._make_control_endpoint(reactor, update_status=lambda status: None) yield flushEventualQueue() self.assertIs(self.successResultOf(d), ep) launch_tor.assert_called_with(reactor, None, os.path.join("basedir", "private"), txtorcon) cfs.assert_called_with(reactor, ep_desc) launch_tor2 = mock.Mock(return_value=defer.succeed((ep_desc,None))) cfs2 = mock.Mock(return_value=ep) with mock.patch("allmydata.util.tor_provider._launch_tor", launch_tor2): with mock.patch("allmydata.util.tor_provider.clientFromString", cfs2): d2 = p._make_control_endpoint(reactor, update_status=lambda status: None) yield flushEventualQueue() self.assertIs(self.successResultOf(d2), ep) self.assertEqual(launch_tor2.mock_calls, []) cfs2.assert_called_with(reactor, ep_desc) def test_handler_socks_endpoint(self): """ If not configured otherwise, the Tor provider returns a Socks-based handler. """ tor = mock.Mock() handler = object() tor.socks_endpoint = mock.Mock(return_value=handler) ep = object() cfs = mock.Mock(return_value=ep) reactor = object() with mock_tor(tor): p = tor_provider.create(reactor, FakeConfig(**{"socks.port": "ep_desc"})) with mock.patch("allmydata.util.tor_provider.clientFromString", cfs): h = p.get_tor_handler() cfs.assert_called_with(reactor, "ep_desc") tor.socks_endpoint.assert_called_with(ep) self.assertIs(h, handler) def test_handler_socks_unix_endpoint(self): """ ``socks.port`` can be configured as a UNIX client endpoint. """ tor = mock.Mock() handler = object() tor.socks_endpoint = mock.Mock(return_value=handler) ep = object() cfs = mock.Mock(return_value=ep) reactor = object() with mock_tor(tor): p = tor_provider.create(reactor, FakeConfig(**{"socks.port": "unix:path"})) with mock.patch("allmydata.util.tor_provider.clientFromString", cfs): h = p.get_tor_handler() cfs.assert_called_with(reactor, "unix:path") tor.socks_endpoint.assert_called_with(ep) self.assertIs(h, handler) def test_handler_socks_tcp_endpoint(self): """ ``socks.port`` can be configured as a UNIX client endpoint. """ tor = mock.Mock() handler = object() tor.socks_endpoint = mock.Mock(return_value=handler) ep = object() cfs = mock.Mock(return_value=ep) reactor = object() with mock_tor(tor): p = tor_provider.create(reactor, FakeConfig(**{"socks.port": "tcp:127.0.0.1:1234"})) with mock.patch("allmydata.util.tor_provider.clientFromString", cfs): h = p.get_tor_handler() cfs.assert_called_with(reactor, "tcp:127.0.0.1:1234") tor.socks_endpoint.assert_called_with(ep) self.assertIs(h, handler) def test_handler_control_endpoint(self): tor = mock.Mock() handler = object() tor.control_endpoint = mock.Mock(return_value=handler) ep = object() cfs = mock.Mock(return_value=ep) reactor = object() with mock_tor(tor): p = tor_provider.create(reactor, FakeConfig(**{"control.port": "ep_desc"})) with mock.patch("allmydata.util.tor_provider.clientFromString", cfs): h = p.get_tor_handler() self.assertIs(h, handler) cfs.assert_called_with(reactor, "ep_desc") tor.control_endpoint.assert_called_with(ep) def test_handler_default(self): tor = mock.Mock() handler = object() tor.default_socks = mock.Mock(return_value=handler) with mock_tor(tor): p = tor_provider.create("reactor", FakeConfig()) h = p.get_tor_handler() self.assertIs(h, handler) tor.default_socks.assert_called_with() class ProviderListener(unittest.TestCase): def test_listener(self): """Does the Tor Provider object's get_listener() method correctly convert the [tor] section of tahoe.cfg into an endpoint/descriptor? """ tor = mock.Mock() handler = object() tor.socks_endpoint = mock.Mock(return_value=handler) reactor = object() with mock_tor(tor): p = tor_provider.create(reactor, FakeConfig(**{"onion.local_port": "321"})) fake_ep = object() with mock.patch("allmydata.util.tor_provider.TCP4ServerEndpoint", return_value=fake_ep) as e: endpoint_or_description = p.get_listener() self.assertIs(endpoint_or_description, fake_ep) self.assertEqual(e.mock_calls, [mock.call(reactor, 321, interface="127.0.0.1")]) class Provider_CheckOnionConfig(unittest.TestCase): def test_default(self): # default config doesn't start an onion service, so it should be # happy both with and without txtorcon p = tor_provider.create("reactor", FakeConfig()) p.check_onion_config() with mock_txtorcon(None): p = tor_provider.create("reactor", FakeConfig()) p.check_onion_config() def test_no_txtorcon(self): with mock_txtorcon(None): with self.assertRaises(ValueError) as ctx: tor_provider.create("reactor", FakeConfig(onion=True)) self.assertEqual( str(ctx.exception), "Cannot create onion without txtorcon. " "Please 'pip install tahoe-lafs[tor]' to fix." ) def test_no_launch_no_control(self): """ onion=true but no way to launch/connect to tor """ with self.assertRaises(ValueError) as ctx: tor_provider.create("reactor", FakeConfig(onion=True)) self.assertEqual( str(ctx.exception), "[tor] onion = true, but we have neither " "launch=true nor control.port=" ) def test_onion_no_local_port(self): """ onion=true but no local_port configured is an error """ with self.assertRaises(ValueError) as ctx: tor_provider.create("reactor", FakeConfig(onion=True, launch=True)) self.assertEqual( str(ctx.exception), "[tor] onion = true, " "but onion.local_port= is missing" ) def test_onion_no_external_port(self): """ onion=true but no external_port configured is an error """ with self.assertRaises(ValueError) as ctx: tor_provider.create("reactor", FakeConfig(onion=True, launch=True, **{"onion.local_port": "x", })) self.assertEqual( str(ctx.exception), "[tor] onion = true, but onion.external_port= is missing" ) def test_onion_no_private_key_file(self): """ onion=true but no private_key_file configured is an error """ with self.assertRaises(ValueError) as ctx: tor_provider.create("reactor", FakeConfig(onion=True, launch=True, **{"onion.local_port": "x", "onion.external_port": "y", })) self.assertEqual( str(ctx.exception), "[tor] onion = true, but onion.private_key_file= is missing" ) def test_ok(self): p = tor_provider.create("reactor", FakeConfig(onion=True, launch=True, **{"onion.local_port": "x", "onion.external_port": "y", "onion.private_key_file": "z", })) p.check_onion_config() class Provider_Service(unittest.TestCase): def test_no_onion(self): reactor = object() p = tor_provider.create(reactor, FakeConfig(onion=False)) with mock.patch("allmydata.util.tor_provider._Provider._start_onion") as s: p.startService() self.assertEqual(s.mock_calls, []) self.assertEqual(p.running, True) p.stopService() self.assertEqual(p.running, False) @defer.inlineCallbacks def test_launch(self): basedir = self.mktemp() os.mkdir(basedir) fn = os.path.join(basedir, "keyfile") with open(fn, "w") as f: f.write("private key") reactor = object() cfg = FakeConfig(basedir=basedir, onion=True, launch=True, **{"onion.local_port": 123, "onion.external_port": 456, "onion.private_key_file": "keyfile", }) txtorcon = mock.Mock() with mock_txtorcon(txtorcon): p = tor_provider.create(reactor, cfg) tor_instance = FakeTor() tor_state = mock.Mock() tor_state.protocol = tor_instance.protocol ehs = mock.Mock() ehs.add_to_tor = mock.Mock(return_value=defer.succeed(None)) ehs.remove_from_tor = mock.Mock(return_value=defer.succeed(None)) txtorcon.EphemeralHiddenService = mock.Mock(return_value=ehs) launch_tor = mock.Mock(return_value=defer.succeed((None,tor_instance))) with mock.patch("allmydata.util.tor_provider._launch_tor", launch_tor): d = p.startService() yield flushEventualQueue() self.successResultOf(d) self.assertIs(p._onion_ehs, ehs) self.assertIs(p._onion_tor_control_proto, tor_state.protocol) launch_tor.assert_called_with(reactor, None, os.path.join(basedir, "private"), txtorcon) txtorcon.EphemeralHiddenService.assert_called_with("456 127.0.0.1:123", b"private key") ehs.add_to_tor.assert_called_with(tor_state.protocol) yield p.stopService() ehs.remove_from_tor.assert_called_with(tor_state.protocol) @defer.inlineCallbacks def test_control_endpoint(self): basedir = self.mktemp() os.mkdir(basedir) fn = os.path.join(basedir, "keyfile") with open(fn, "w") as f: f.write("private key") reactor = object() cfg = FakeConfig(basedir=basedir, onion=True, **{"control.port": "ep_desc", "onion.local_port": 123, "onion.external_port": 456, "onion.private_key_file": "keyfile", }) txtorcon = mock.Mock() with mock_txtorcon(txtorcon): p = tor_provider.create(reactor, cfg) tor_instance = FakeTor() txtorcon.connect = mock.Mock(return_value=tor_instance) ehs = mock.Mock() ehs.add_to_tor = mock.Mock(return_value=defer.succeed(None)) ehs.remove_from_tor = mock.Mock(return_value=defer.succeed(None)) txtorcon.EphemeralHiddenService = mock.Mock(return_value=ehs) tcep = object() cfs = mock.Mock(return_value=tcep) with mock.patch("allmydata.util.tor_provider.clientFromString", cfs): d = p.startService() yield flushEventualQueue() self.successResultOf(d) self.assertIs(p._onion_ehs, ehs) self.assertIs(p._onion_tor_control_proto, tor_instance.protocol) cfs.assert_called_with(reactor, "ep_desc") txtorcon.connect.assert_called_with(reactor, tcep) txtorcon.EphemeralHiddenService.assert_called_with("456 127.0.0.1:123", b"private key") ehs.add_to_tor.assert_called_with(tor_instance.protocol) yield p.stopService() ehs.remove_from_tor.assert_called_with(tor_instance.protocol) tahoe_lafs-1.20.0/src/allmydata/test/test_upload.py0000644000000000000000000025646413615410400017253 0ustar00# -*- coding: utf-8 -*- """ Ported to Python 3. """ import os, shutil from io import BytesIO from base64 import ( b64encode, ) from hypothesis import ( given, ) from hypothesis.strategies import ( just, integers, ) from twisted.trial import unittest from twisted.python.failure import Failure from twisted.internet import defer, task from foolscap.api import fireEventually import allmydata # for __full_version__ from allmydata import uri, monitor, client from allmydata.immutable import upload, encode from allmydata.interfaces import FileTooLargeError, UploadUnhappinessError from allmydata.util import log, base32 from allmydata.util.assertutil import precondition from allmydata.util.deferredutil import DeferredListShouldSucceed from allmydata.test.no_network import GridTestMixin from allmydata.storage_client import StorageFarmBroker from allmydata.storage.server import storage_index_to_dir from allmydata.client import _Client from .common import ( EMPTY_CLIENT_CONFIG, ShouldFailMixin, ) from functools import reduce MiB = 1024*1024 def extract_uri(results): return results.get_uri() class Uploadable(unittest.TestCase): def shouldEqual(self, data, expected): self.failUnless(isinstance(data, list)) for e in data: self.failUnless(isinstance(e, bytes)) s = b"".join(data) self.failUnlessEqual(s, expected) def test_filehandle_random_key(self): return self._test_filehandle(convergence=None) def test_filehandle_convergent_encryption(self): return self._test_filehandle(convergence=b"some convergence string") def _test_filehandle(self, convergence): s = BytesIO(b"a"*41) u = upload.FileHandle(s, convergence=convergence) d = u.get_size() d.addCallback(self.failUnlessEqual, 41) d.addCallback(lambda res: u.read(1)) d.addCallback(self.shouldEqual, b"a") d.addCallback(lambda res: u.read(80)) d.addCallback(self.shouldEqual, b"a"*40) d.addCallback(lambda res: u.close()) # this doesn't close the filehandle d.addCallback(lambda res: s.close()) # that privilege is reserved for us return d def test_filename(self): basedir = "upload/Uploadable/test_filename" os.makedirs(basedir) fn = os.path.join(basedir, "file") f = open(fn, "wb") f.write(b"a"*41) f.close() u = upload.FileName(fn, convergence=None) d = u.get_size() d.addCallback(self.failUnlessEqual, 41) d.addCallback(lambda res: u.read(1)) d.addCallback(self.shouldEqual, b"a") d.addCallback(lambda res: u.read(80)) d.addCallback(self.shouldEqual, b"a"*40) d.addCallback(lambda res: u.close()) return d def test_data(self): s = b"a"*41 u = upload.Data(s, convergence=None) d = u.get_size() d.addCallback(self.failUnlessEqual, 41) d.addCallback(lambda res: u.read(1)) d.addCallback(self.shouldEqual, b"a") d.addCallback(lambda res: u.read(80)) d.addCallback(self.shouldEqual, b"a"*40) d.addCallback(lambda res: u.close()) return d class ServerError(Exception): pass class SetDEPMixin(object): def set_encoding_parameters(self, k, happy, n, max_segsize=1*MiB): p = {"k": k, "happy": happy, "n": n, "max_segment_size": max_segsize, } self.node.encoding_params = p # This doesn't actually implement the whole interface, but adding a commented # interface implementation annotation for grepping purposes. #@implementer(RIStorageServer) class FakeStorageServer(object): """ A fake Foolscap remote object, implemented by overriding callRemote() to call local methods. """ def __init__(self, mode, reactor=None): self.mode = mode self.allocated = [] self._alloc_queries = 0 self._get_queries = 0 self.version = { b"http://allmydata.org/tahoe/protocols/storage/v1" : { b"maximum-immutable-share-size": 2**32 - 1, }, b"application-version": bytes(allmydata.__full_version__, "ascii"), } if mode == "small": self.version = { b"http://allmydata.org/tahoe/protocols/storage/v1" : { b"maximum-immutable-share-size": 10, }, b"application-version": bytes(allmydata.__full_version__, "ascii"), } def callRemote(self, methname, *args, **kwargs): def _call(): meth = getattr(self, methname) return meth(*args, **kwargs) d = fireEventually() d.addCallback(lambda res: _call()) return d def allocate_buckets(self, storage_index, renew_secret, cancel_secret, sharenums, share_size, canary): # print("FakeStorageServer.allocate_buckets(num=%d, size=%d, mode=%s, queries=%d)" % (len(sharenums), share_size, self.mode, self._alloc_queries)) if self.mode == "timeout": return defer.Deferred() if self.mode == "first-fail": if self._alloc_queries == 0: raise ServerError if self.mode == "second-fail": if self._alloc_queries == 1: raise ServerError self._alloc_queries += 1 if self.mode == "full": return (set(), {},) elif self.mode == "already got them": return (set(sharenums), {},) else: for shnum in sharenums: self.allocated.append( (storage_index, shnum) ) return (set(), dict([( shnum, FakeBucketWriter(share_size) ) for shnum in sharenums]), ) def get_buckets(self, storage_index, **kw): # this should map shnum to a BucketReader but there isn't a # handy FakeBucketReader and we don't actually read the shares # back anyway (just the keys) return { shnum: None for (si, shnum) in self.allocated if si == storage_index } class FakeBucketWriter(object): # a diagnostic version of storageserver.BucketWriter def __init__(self, size): self.data = BytesIO() self.closed = False self._size = size def callRemote(self, methname, *args, **kwargs): def _call(): meth = getattr(self, "remote_" + methname) return meth(*args, **kwargs) d = fireEventually() d.addCallback(lambda res: _call()) return d def callRemoteOnly(self, methname, *args, **kwargs): d = self.callRemote(methname, *args, **kwargs) del d # callRemoteOnly ignores this return None def remote_write(self, offset, data): precondition(not self.closed) precondition(offset >= 0) precondition(offset+len(data) <= self._size, "offset=%d + data=%d > size=%d" % (offset, len(data), self._size)) self.data.seek(offset) self.data.write(data) def remote_close(self): precondition(not self.closed) self.closed = True def remote_abort(self): pass class FakeClient(object): DEFAULT_ENCODING_PARAMETERS = { "k":25, "happy": 25, "n": 100, "max_segment_size": 1 * MiB, } def __init__(self, mode="good", num_servers=50, reactor=None): self.num_servers = num_servers self.encoding_params = self.DEFAULT_ENCODING_PARAMETERS.copy() if isinstance(mode, str): mode = dict([i,mode] for i in range(num_servers)) servers = [ (b"%20d" % fakeid, FakeStorageServer(mode[fakeid], reactor=reactor)) for fakeid in range(self.num_servers) ] self.storage_broker = StorageFarmBroker( permute_peers=True, tub_maker=None, node_config=EMPTY_CLIENT_CONFIG, ) for (serverid, rref) in servers: ann = {"anonymous-storage-FURL": "pb://%s@nowhere/fake" % str(base32.b2a(serverid), "ascii"), "permutation-seed-base32": base32.b2a(serverid) } self.storage_broker.test_add_rref(serverid, rref, ann) self.last_servers = [s[1] for s in servers] def log(self, *args, **kwargs): pass def get_encoding_parameters(self): return self.encoding_params def get_storage_broker(self): return self.storage_broker _secret_holder = client.SecretHolder(b"lease secret", b"convergence secret") class GotTooFarError(Exception): pass class GiganticUploadable(upload.FileHandle): def __init__(self, size): self._size = size self._fp = 0 def get_encryption_key(self): return defer.succeed(b"\x00" * 16) def get_size(self): return defer.succeed(self._size) def read(self, length): left = self._size - self._fp length = min(left, length) self._fp += length if self._fp > 1000000: # terminate the test early. raise GotTooFarError("we shouldn't be allowed to get this far") return defer.succeed([b"\x00" * length]) def close(self): pass DATA = b""" Once upon a time, there was a beautiful princess named Buttercup. She lived in a magical land where every file was stored securely among millions of machines, and nobody ever worried about their data being lost ever again. The End. """ assert len(DATA) > upload.Uploader.URI_LIT_SIZE_THRESHOLD SIZE_ZERO = 0 SIZE_SMALL = 16 SIZE_LARGE = len(DATA) def upload_data(uploader, data, reactor=None): u = upload.Data(data, convergence=None) return uploader.upload(u, reactor=reactor) def upload_filename(uploader, filename, reactor=None): u = upload.FileName(filename, convergence=None) return uploader.upload(u, reactor=reactor) def upload_filehandle(uploader, fh, reactor=None): u = upload.FileHandle(fh, convergence=None) return uploader.upload(u, reactor=reactor) class GoodServer(unittest.TestCase, ShouldFailMixin, SetDEPMixin): def setUp(self): self.node = FakeClient(mode="good") self.u = upload.Uploader() self.u.running = True self.u.parent = self.node def _check_small(self, newuri, size): u = uri.from_string(newuri) self.failUnless(isinstance(u, uri.LiteralFileURI)) self.failUnlessEqual(len(u.data), size) def _check_large(self, newuri, size): u = uri.from_string(newuri) self.failUnless(isinstance(u, uri.CHKFileURI)) self.failUnless(isinstance(u.get_storage_index(), bytes)) self.failUnlessEqual(len(u.get_storage_index()), 16) self.failUnless(isinstance(u.key, bytes)) self.failUnlessEqual(len(u.key), 16) self.failUnlessEqual(u.size, size) def get_data(self, size): return DATA[:size] def test_too_large(self): # we've removed the 4GiB share size limit (see ticket #346 for # details), but still have an 8-byte field, so the limit is now # 2**64, so make sure we reject files larger than that. k = 3; happy = 7; n = 10 self.set_encoding_parameters(k, happy, n) big = k*(2**64) data1 = GiganticUploadable(big) d = self.shouldFail(FileTooLargeError, "test_too_large-data1", "This file is too large to be uploaded (data_size)", self.u.upload, data1) data2 = GiganticUploadable(big-3) d.addCallback(lambda res: self.shouldFail(FileTooLargeError, "test_too_large-data2", "This file is too large to be uploaded (offsets)", self.u.upload, data2)) # I don't know where the actual limit is.. it depends upon how large # the hash trees wind up. It's somewhere close to k*4GiB-ln2(size). return d def test_data_zero(self): data = self.get_data(SIZE_ZERO) d = upload_data(self.u, data) d.addCallback(extract_uri) d.addCallback(self._check_small, SIZE_ZERO) return d def test_data_small(self): data = self.get_data(SIZE_SMALL) d = upload_data(self.u, data) d.addCallback(extract_uri) d.addCallback(self._check_small, SIZE_SMALL) return d def test_data_large(self): data = self.get_data(SIZE_LARGE) d = upload_data(self.u, data) d.addCallback(extract_uri) d.addCallback(self._check_large, SIZE_LARGE) return d def test_data_large_odd_segments(self): data = self.get_data(SIZE_LARGE) segsize = int(SIZE_LARGE / 2.5) # we want 3 segments, since that's not a power of two self.set_encoding_parameters(25, 25, 100, segsize) d = upload_data(self.u, data) d.addCallback(extract_uri) d.addCallback(self._check_large, SIZE_LARGE) return d def test_filehandle_zero(self): data = self.get_data(SIZE_ZERO) d = upload_filehandle(self.u, BytesIO(data)) d.addCallback(extract_uri) d.addCallback(self._check_small, SIZE_ZERO) return d def test_filehandle_small(self): data = self.get_data(SIZE_SMALL) d = upload_filehandle(self.u, BytesIO(data)) d.addCallback(extract_uri) d.addCallback(self._check_small, SIZE_SMALL) return d def test_filehandle_large(self): data = self.get_data(SIZE_LARGE) d = upload_filehandle(self.u, BytesIO(data)) d.addCallback(extract_uri) d.addCallback(self._check_large, SIZE_LARGE) return d def test_filename_zero(self): fn = "Uploader-test_filename_zero.data" f = open(fn, "wb") data = self.get_data(SIZE_ZERO) f.write(data) f.close() d = upload_filename(self.u, fn) d.addCallback(extract_uri) d.addCallback(self._check_small, SIZE_ZERO) return d def test_filename_small(self): fn = "Uploader-test_filename_small.data" f = open(fn, "wb") data = self.get_data(SIZE_SMALL) f.write(data) f.close() d = upload_filename(self.u, fn) d.addCallback(extract_uri) d.addCallback(self._check_small, SIZE_SMALL) return d def test_filename_large(self): fn = "Uploader-test_filename_large.data" f = open(fn, "wb") data = self.get_data(SIZE_LARGE) f.write(data) f.close() d = upload_filename(self.u, fn) d.addCallback(extract_uri) d.addCallback(self._check_large, SIZE_LARGE) return d class ServerErrors(unittest.TestCase, ShouldFailMixin, SetDEPMixin): def make_node(self, mode, num_servers=10): self.node = FakeClient(mode, num_servers) self.u = upload.Uploader() self.u.running = True self.u.parent = self.node def _check_large(self, newuri, size): u = uri.from_string(newuri) self.failUnless(isinstance(u, uri.CHKFileURI)) self.failUnless(isinstance(u.get_storage_index(), bytes)) self.failUnlessEqual(len(u.get_storage_index()), 16) self.failUnless(isinstance(u.key, bytes)) self.failUnlessEqual(len(u.key), 16) self.failUnlessEqual(u.size, size) def test_first_error(self): mode = dict([(0,"good")] + [(i,"first-fail") for i in range(1,10)]) self.make_node(mode) self.set_encoding_parameters(k=25, happy=1, n=50) d = upload_data(self.u, DATA) d.addCallback(extract_uri) d.addCallback(self._check_large, SIZE_LARGE) return d def test_first_error_all(self): self.make_node("first-fail") d = self.shouldFail(UploadUnhappinessError, "first_error_all", "server selection failed", upload_data, self.u, DATA) def _check(f): # for some reason this is passed as a 1-tuple (f,) = f self.failUnlessIn("placed 0 shares out of 100 total", str(f.value)) # there should also be a 'last failure was' message self.failUnlessIn("ServerError", str(f.value)) d.addCallback(_check) return d def test_second_error_all(self): self.make_node("second-fail") d = self.shouldFail(UploadUnhappinessError, "second_error_all", "server selection failed", upload_data, self.u, DATA) def _check(f): # for some reason this is passed as a 1-tuple (f,) = f self.failUnlessIn("shares could be placed or found on only 10 server(s)", str(f.value)) d.addCallback(_check) return d def test_allocation_error_some(self): self.make_node({ 0: "good", 1: "good", 2: "good", 3: "good", 4: "good", 5: "first-fail", 6: "first-fail", 7: "first-fail", 8: "first-fail", 9: "first-fail", }) self.set_encoding_parameters(3, 7, 10) d = self.shouldFail(UploadUnhappinessError, "second_error_some", "server selection failed", upload_data, self.u, DATA) def _check(f): # for some reason this is passed as a 1-tuple (f,) = f self.failUnlessIn("shares could be placed on only 5 server(s)", str(f.value)) d.addCallback(_check) return d def test_allocation_error_recovery(self): self.make_node({ 0: "good", 1: "good", 2: "good", 3: "good", 4: "second-fail", 5: "second-fail", 6: "first-fail", 7: "first-fail", 8: "first-fail", 9: "first-fail", }) self.set_encoding_parameters(3, 7, 10) # we placed shares on 0 through 5, which wasn't enough. so # then we looped and only placed on 0-3 (because now 4-9 have # all failed) ... so the error message should say we only # placed on 6 servers (not 4) because those two shares *did* # at some point succeed. d = self.shouldFail(UploadUnhappinessError, "second_error_some", "server selection failed", upload_data, self.u, DATA) def _check(f): # for some reason this is passed as a 1-tuple (f,) = f self.failUnlessIn("shares could be placed on only 6 server(s)", str(f.value)) d.addCallback(_check) return d def test_good_servers_stay_writable(self): self.make_node({ 0: "good", 1: "good", 2: "second-fail", 3: "second-fail", 4: "second-fail", 5: "first-fail", 6: "first-fail", 7: "first-fail", 8: "first-fail", 9: "first-fail", }) self.set_encoding_parameters(3, 7, 10) # we placed shares on 0 through 5, which wasn't enough. so # then we looped and only placed on 0-3 (because now 4-9 have # all failed) ... so the error message should say we only # placed on 6 servers (not 4) because those two shares *did* # at some point succeed. d = self.shouldFail(UploadUnhappinessError, "good_servers_stay_writable", "server selection failed", upload_data, self.u, DATA) def _check(f): # for some reason this is passed as a 1-tuple (f,) = f self.failUnlessIn("shares could be placed on only 5 server(s)", str(f.value)) d.addCallback(_check) return d def test_timeout(self): clock = task.Clock() self.make_node("timeout") self.set_encoding_parameters(k=25, happy=1, n=50) d = self.shouldFail( UploadUnhappinessError, __name__, "server selection failed", upload_data, self.u, DATA, reactor=clock, ) # XXX double-check; it's doing 3 iterations? # XXX should only do 1! clock.advance(15) clock.advance(15) return d class FullServer(unittest.TestCase): def setUp(self): self.node = FakeClient(mode="full") self.u = upload.Uploader() self.u.running = True self.u.parent = self.node def _should_fail(self, f): self.failUnless(isinstance(f, Failure) and f.check(UploadUnhappinessError), f) def test_data_large(self): data = DATA d = upload_data(self.u, data) d.addBoth(self._should_fail) return d class ServerSelection(unittest.TestCase): def make_client(self, num_servers=50): self.node = FakeClient(mode="good", num_servers=num_servers) self.u = upload.Uploader() self.u.running = True self.u.parent = self.node def get_data(self, size): return DATA[:size] def _check_large(self, newuri, size): u = uri.from_string(newuri) self.failUnless(isinstance(u, uri.CHKFileURI)) self.failUnless(isinstance(u.get_storage_index(), bytes)) self.failUnlessEqual(len(u.get_storage_index()), 16) self.failUnless(isinstance(u.key, bytes)) self.failUnlessEqual(len(u.key), 16) self.failUnlessEqual(u.size, size) def set_encoding_parameters(self, k, happy, n, max_segsize=1*MiB): p = {"k": k, "happy": happy, "n": n, "max_segment_size": max_segsize, } self.node.encoding_params = p def test_one_each(self): # if we have 50 shares, and there are 50 servers, and they all accept # a share, we should get exactly one share per server self.make_client() data = self.get_data(SIZE_LARGE) self.set_encoding_parameters(25, 30, 50) d = upload_data(self.u, data) d.addCallback(extract_uri) d.addCallback(self._check_large, SIZE_LARGE) def _check(res): for s in self.node.last_servers: allocated = s.allocated self.failUnlessEqual(len(allocated), 1) self.failUnlessEqual(s._alloc_queries, 1) d.addCallback(_check) return d def test_two_each(self): # if we have 100 shares, and there are 50 servers, and they all # accept all shares, we should get exactly two shares per server self.make_client() data = self.get_data(SIZE_LARGE) # if there are 50 servers, then happy needs to be <= 50 self.set_encoding_parameters(50, 50, 100) d = upload_data(self.u, data) d.addCallback(extract_uri) d.addCallback(self._check_large, SIZE_LARGE) def _check(res): for s in self.node.last_servers: allocated = s.allocated self.failUnlessEqual(len(allocated), 2) self.failUnlessEqual(s._alloc_queries, 1) d.addCallback(_check) return d def test_one_each_plus_one_extra(self): # if we have 51 shares, and there are 50 servers, then one server # gets two shares and the rest get just one self.make_client() data = self.get_data(SIZE_LARGE) self.set_encoding_parameters(24, 41, 51) d = upload_data(self.u, data) d.addCallback(extract_uri) d.addCallback(self._check_large, SIZE_LARGE) def _check(res): got_one = [] got_two = [] for s in self.node.last_servers: allocated = s.allocated self.failUnless(len(allocated) in (1,2), len(allocated)) if len(allocated) == 1: self.failUnlessEqual(s._alloc_queries, 1) got_one.append(s) else: self.failUnlessEqual(s._alloc_queries, 1) got_two.append(s) self.failUnlessEqual(len(got_one), 49) self.failUnlessEqual(len(got_two), 1) d.addCallback(_check) return d def test_four_each(self): # if we have 200 shares, and there are 50 servers, then each server # gets 4 shares. The design goal is to accomplish this with only two # queries per server. self.make_client() data = self.get_data(SIZE_LARGE) # if there are 50 servers, then happy should be no more than 50 if we # want this to work. self.set_encoding_parameters(100, 50, 200) d = upload_data(self.u, data) d.addCallback(extract_uri) d.addCallback(self._check_large, SIZE_LARGE) def _check(res): for s in self.node.last_servers: allocated = s.allocated self.failUnlessEqual(len(allocated), 4) self.failUnlessEqual(s._alloc_queries, 1) d.addCallback(_check) return d def test_three_of_ten(self): # if we have 10 shares and 3 servers, I want to see 3+3+4 rather than # 4+4+2 self.make_client(3) data = self.get_data(SIZE_LARGE) self.set_encoding_parameters(3, 3, 10) d = upload_data(self.u, data) d.addCallback(extract_uri) d.addCallback(self._check_large, SIZE_LARGE) def _check(res): counts = {} for s in self.node.last_servers: allocated = s.allocated counts[len(allocated)] = counts.get(len(allocated), 0) + 1 histogram = [counts.get(i, 0) for i in range(5)] self.failUnlessEqual(histogram, [0,0,0,2,1]) d.addCallback(_check) return d def test_some_big_some_small(self): # 10 shares, 20 servers, but half the servers don't support a # share-size large enough for our file mode = dict([(i,{0:"good",1:"small"}[i%2]) for i in range(20)]) self.node = FakeClient(mode, num_servers=20) self.u = upload.Uploader() self.u.running = True self.u.parent = self.node data = self.get_data(SIZE_LARGE) self.set_encoding_parameters(3, 5, 10) d = upload_data(self.u, data) d.addCallback(extract_uri) d.addCallback(self._check_large, SIZE_LARGE) def _check(res): # we should have put one share each on the big servers, and zero # shares on the small servers total_allocated = 0 for p in self.node.last_servers: if p.mode == "good": self.failUnlessEqual(len(p.allocated), 1) elif p.mode == "small": self.failUnlessEqual(len(p.allocated), 0) total_allocated += len(p.allocated) self.failUnlessEqual(total_allocated, 10) d.addCallback(_check) return d def test_number_of_servers_contacted(self): # This tests ensures that Tahoe only contacts 2n servers # during peer selection self.make_client(40) self.set_encoding_parameters(3, 7, 10) data = self.get_data(SIZE_LARGE) d = upload_data(self.u, data) def _check(res): servers_contacted = [] for s in self.node.last_servers: if(s._alloc_queries != 0): servers_contacted.append(s) self.failUnless(len(servers_contacted), 20) d.addCallback(_check) return d class StorageIndex(unittest.TestCase): def test_params_must_matter(self): DATA = b"I am some data" PARAMS = _Client.DEFAULT_ENCODING_PARAMETERS u = upload.Data(DATA, convergence=b"") u.set_default_encoding_parameters(PARAMS) eu = upload.EncryptAnUploadable(u) d1 = eu.get_storage_index() # CHK means the same data should encrypt the same way u = upload.Data(DATA, convergence=b"") u.set_default_encoding_parameters(PARAMS) eu = upload.EncryptAnUploadable(u) d1a = eu.get_storage_index() # but if we use a different convergence string it should be different u = upload.Data(DATA, convergence=b"wheee!") u.set_default_encoding_parameters(PARAMS) eu = upload.EncryptAnUploadable(u) d1salt1 = eu.get_storage_index() # and if we add yet a different convergence it should be different again u = upload.Data(DATA, convergence=b"NOT wheee!") u.set_default_encoding_parameters(PARAMS) eu = upload.EncryptAnUploadable(u) d1salt2 = eu.get_storage_index() # and if we use the first string again it should be the same as last time u = upload.Data(DATA, convergence=b"wheee!") u.set_default_encoding_parameters(PARAMS) eu = upload.EncryptAnUploadable(u) d1salt1a = eu.get_storage_index() # and if we change the encoding parameters, it should be different (from the same convergence string with different encoding parameters) u = upload.Data(DATA, convergence=b"") u.set_default_encoding_parameters(PARAMS) u.encoding_param_k = u.default_encoding_param_k + 1 eu = upload.EncryptAnUploadable(u) d2 = eu.get_storage_index() # and if we use a random key, it should be different than the CHK u = upload.Data(DATA, convergence=None) u.set_default_encoding_parameters(PARAMS) eu = upload.EncryptAnUploadable(u) d3 = eu.get_storage_index() # and different from another instance u = upload.Data(DATA, convergence=None) u.set_default_encoding_parameters(PARAMS) eu = upload.EncryptAnUploadable(u) d4 = eu.get_storage_index() d = DeferredListShouldSucceed([d1,d1a,d1salt1,d1salt2,d1salt1a,d2,d3,d4]) def _done(res): si1, si1a, si1salt1, si1salt2, si1salt1a, si2, si3, si4 = res self.failUnlessEqual(si1, si1a) self.failIfEqual(si1, si2) self.failIfEqual(si1, si3) self.failIfEqual(si1, si4) self.failIfEqual(si3, si4) self.failIfEqual(si1salt1, si1) self.failIfEqual(si1salt1, si1salt2) self.failIfEqual(si1salt2, si1) self.failUnlessEqual(si1salt1, si1salt1a) d.addCallback(_done) return d # copied from python docs because itertools.combinations was added in # python 2.6 and we support >= 2.4. def combinations(iterable, r): # combinations('ABCD', 2) --> AB AC AD BC BD CD # combinations(range(4), 3) --> 012 013 023 123 pool = tuple(iterable) n = len(pool) if r > n: return indices = list(range(r)) yield tuple(pool[i] for i in indices) while True: for i in reversed(list(range(r))): if indices[i] != i + n - r: break else: return indices[i] += 1 for j in range(i+1, r): indices[j] = indices[j-1] + 1 yield tuple(pool[i] for i in indices) def is_happy_enough(servertoshnums, h, k): """ I calculate whether servertoshnums achieves happiness level h. I do this with a naïve "brute force search" approach. (See src/allmydata/util/happinessutil.py for a better algorithm.) """ if len(servertoshnums) < h: return False for happysetcombo in combinations(iter(servertoshnums.keys()), h): for subsetcombo in combinations(happysetcombo, k): shnums = reduce(set.union, [ servertoshnums[s] for s in subsetcombo ]) if len(shnums) < k: return False return True class FileHandleTests(unittest.TestCase): """ Tests for ``FileHandle``. """ def test_get_encryption_key_convergent(self): """ When ``FileHandle`` is initialized with a convergence secret, ``FileHandle.get_encryption_key`` returns a deterministic result that is a function of that secret. """ secret = b"\x42" * 16 handle = upload.FileHandle(BytesIO(b"hello world"), secret) handle.set_default_encoding_parameters({ "k": 3, "happy": 5, "n": 10, # Remember this is the *max* segment size. In reality, the data # size is much smaller so the actual segment size incorporated # into the encryption key is also smaller. "max_segment_size": 128 * 1024, }) self.assertEqual( b64encode(self.successResultOf(handle.get_encryption_key())), b"oBcuR/wKdCgCV2GKKXqiNg==", ) class EncodingParameters(GridTestMixin, unittest.TestCase, SetDEPMixin, ShouldFailMixin): def setUp(self): d = super(EncodingParameters, self).setUp() self._curdir = os.path.abspath(os.path.curdir) return d def tearDown(self): d = super(EncodingParameters, self).tearDown() self.assertEqual( os.path.abspath(os.path.curdir), self._curdir, ) return d def find_all_shares(self, unused=None): """Locate shares on disk. Returns a dict that maps server to set of sharenums. """ assert self.g, "I tried to find a grid at self.g, but failed" servertoshnums = {} # k: server, v: set(shnum) for i, c in self.g.servers_by_number.items(): for (dirp, dirns, fns) in os.walk(c.sharedir): for fn in fns: try: sharenum = int(fn) except TypeError: # Whoops, I guess that's not a share file then. pass else: servertoshnums.setdefault(i, set()).add(sharenum) return servertoshnums def _do_upload_with_broken_servers(self, servers_to_break): """ I act like a normal upload, but before I send the results of Tahoe2ServerSelector to the Encoder, I break the first servers_to_break ServerTrackers in the upload_servers part of the return result. """ assert self.g, "I tried to find a grid at self.g, but failed" broker = self.g.clients[0].storage_broker sh = self.g.clients[0]._secret_holder data = upload.Data(b"data" * 10000, convergence=b"") data.set_default_encoding_parameters({'k': 3, 'happy': 4, 'n': 10}) uploadable = upload.EncryptAnUploadable(data) encoder = encode.Encoder() encoder.set_encrypted_uploadable(uploadable) status = upload.UploadStatus() selector = upload.Tahoe2ServerSelector("dglev", "test", status) storage_index = encoder.get_param("storage_index") share_size = encoder.get_param("share_size") block_size = encoder.get_param("block_size") num_segments = encoder.get_param("num_segments") d = selector.get_shareholders(broker, sh, storage_index, share_size, block_size, num_segments, 10, 3, 4, encoder.get_uri_extension_size()) def _have_shareholders(upload_trackers_and_already_servers): (upload_trackers, already_servers) = upload_trackers_and_already_servers assert servers_to_break <= len(upload_trackers) for index in range(servers_to_break): tracker = list(upload_trackers)[index] for share in list(tracker.buckets.keys()): tracker.buckets[share].abort() buckets = {} servermap = already_servers.copy() for tracker in upload_trackers: buckets.update(tracker.buckets) for bucket in tracker.buckets: servermap.setdefault(bucket, set()).add(tracker.get_serverid()) encoder.set_shareholders(buckets, servermap) d = encoder.start() return d d.addCallback(_have_shareholders) return d def _has_happy_share_distribution(self): servertoshnums = self.find_all_shares() k = self.g.clients[0].encoding_params['k'] h = self.g.clients[0].encoding_params['happy'] return is_happy_enough(servertoshnums, h, k) def _add_server(self, server_number, readonly=False): assert self.g, "I tried to find a grid at self.g, but failed" ss = self.g.make_server(server_number, readonly) log.msg("just created a server, number: %s => %s" % (server_number, ss,)) self.g.add_server(server_number, ss) self.g.rebuild_serverlist() def _add_server_with_share(self, server_number, share_number=None, readonly=False): self._add_server(server_number, readonly) if share_number is not None: self._copy_share_to_server(share_number, server_number) def _copy_share_to_server(self, share_number, server_number): ss = self.g.servers_by_number[server_number] # Copy share i from the directory associated with the first # storage server to the directory associated with this one. assert self.g, "I tried to find a grid at self.g, but failed" assert self.shares, "I tried to find shares at self.shares, but failed" old_share_location = self.shares[share_number][2] new_share_location = os.path.join(ss.storedir, "shares") si = uri.from_string(self.uri).get_storage_index() new_share_location = os.path.join(new_share_location, storage_index_to_dir(si)) if not os.path.exists(new_share_location): os.makedirs(new_share_location) new_share_location = os.path.join(new_share_location, str(share_number)) if old_share_location != new_share_location: shutil.copy(old_share_location, new_share_location) shares = self.find_uri_shares(self.uri) # Make sure that the storage server has the share. self.failUnless((share_number, ss.my_nodeid, new_share_location) in shares) def _setup_grid(self): """ I set up a NoNetworkGrid with a single server and client. """ self.set_up_grid(num_clients=1, num_servers=1) def _setup_and_upload(self, **kwargs): """ I set up a NoNetworkGrid with a single server and client, upload a file to it, store its uri in self.uri, and store its sharedata in self.shares. """ self._setup_grid() client = self.g.clients[0] client.encoding_params['happy'] = 1 if "n" in kwargs and "k" in kwargs: client.encoding_params['k'] = kwargs['k'] client.encoding_params['n'] = kwargs['n'] data = upload.Data(b"data" * 10000, convergence=b"") self.data = data d = client.upload(data) def _store_uri(ur): self.uri = ur.get_uri() d.addCallback(_store_uri) d.addCallback(lambda ign: self.find_uri_shares(self.uri)) def _store_shares(shares): self.shares = shares d.addCallback(_store_shares) return d def test_configure_parameters(self): self.basedir = self.mktemp() hooks = {0: self._set_up_nodes_extra_config} self.set_up_grid(client_config_hooks=hooks) c0 = self.g.clients[0] DATA = b"data" * 100 u = upload.Data(DATA, convergence=b"") d = c0.upload(u) d.addCallback(lambda ur: c0.create_node_from_uri(ur.get_uri())) m = monitor.Monitor() d.addCallback(lambda fn: fn.check(m)) def _check(cr): self.failUnlessEqual(cr.get_encoding_needed(), 7) self.failUnlessEqual(cr.get_encoding_expected(), 12) d.addCallback(_check) return d def _setUp(self, ns): # Used by test_happy_semantics and test_preexisting_share_behavior # to set up the grid. self.node = FakeClient(mode="good", num_servers=ns) self.u = upload.Uploader() self.u.running = True self.u.parent = self.node def test_happy_semantics(self): self._setUp(2) DATA = upload.Data(b"kittens" * 10000, convergence=b"") # These parameters are unsatisfiable with only 2 servers. self.set_encoding_parameters(k=3, happy=5, n=10) d = self.shouldFail(UploadUnhappinessError, "test_happy_semantics", "shares could be placed or found on only 2 " "server(s). We were asked to place shares on " "at least 5 server(s) such that any 3 of them " "have enough shares to recover the file", self.u.upload, DATA) # Let's reset the client to have 10 servers d.addCallback(lambda ign: self._setUp(10)) # These parameters are satisfiable with 10 servers. d.addCallback(lambda ign: self.set_encoding_parameters(k=3, happy=5, n=10)) d.addCallback(lambda ign: self.u.upload(DATA)) # Let's reset the client to have 7 servers # (this is less than n, but more than h) d.addCallback(lambda ign: self._setUp(7)) # These parameters are satisfiable with 7 servers. d.addCallback(lambda ign: self.set_encoding_parameters(k=3, happy=5, n=10)) d.addCallback(lambda ign: self.u.upload(DATA)) return d def test_aborted_shares(self): self.basedir = "upload/EncodingParameters/aborted_shares" self.set_up_grid(num_servers=4) c = self.g.clients[0] DATA = upload.Data(100 * b"kittens", convergence=b"") # These parameters are unsatisfiable with only 4 servers, but should # work with 5, as long as the original 4 are not stuck in the open # BucketWriter state (open() but not parms = {"k":2, "happy":5, "n":5, "max_segment_size": 1*MiB} c.encoding_params = parms d = self.shouldFail(UploadUnhappinessError, "test_aborted_shares", "shares could be placed on only 4 " "server(s) such that any 2 of them have enough " "shares to recover the file, but we were asked " "to place shares on at least 5 such servers", c.upload, DATA) # now add the 5th server d.addCallback(lambda ign: self._add_server(4, False)) # and this time the upload ought to succeed d.addCallback(lambda ign: c.upload(DATA)) d.addCallback(lambda ign: self.failUnless(self._has_happy_share_distribution())) return d def test_problem_layout_comment_52(self): def _basedir(): self.basedir = self.mktemp() _basedir() # This scenario is at # http://allmydata.org/trac/tahoe-lafs/ticket/778#comment:52 # # The scenario in comment:52 proposes that we have a layout # like: # server 0: shares 1 - 9 # server 1: share 0, read-only # server 2: share 0, read-only # server 3: share 0, read-only # To get access to the shares, we will first upload to one # server, which will then have shares 0 - 9. We'll then # add three new servers, configure them to not accept any new # shares, then write share 0 directly into the serverdir of each, # and then remove share 0 from server 0 in the same way. # Then each of servers 1 - 3 will report that they have share 0, # and will not accept any new share, while server 0 will report that # it has shares 1 - 9 and will accept new shares. # We'll then set 'happy' = 4, and see that an upload fails # (as it should) d = self._setup_and_upload() d.addCallback(lambda ign: self._add_server_with_share(server_number=1, share_number=0, readonly=True)) d.addCallback(lambda ign: self._add_server_with_share(server_number=2, share_number=0, readonly=True)) d.addCallback(lambda ign: self._add_server_with_share(server_number=3, share_number=0, readonly=True)) # Remove the first share from server 0. def _remove_share_0_from_server_0(): share_location = self.shares[0][2] os.remove(share_location) d.addCallback(lambda ign: _remove_share_0_from_server_0()) # Set happy = 4 in the client. def _prepare(): client = self.g.clients[0] client.encoding_params['happy'] = 4 return client d.addCallback(lambda ign: _prepare()) # Uploading data should fail d.addCallback(lambda client: self.shouldFail(UploadUnhappinessError, "test_problem_layout_comment_52_test_1", "shares could be placed or found on 4 server(s), " "but they are not spread out evenly enough to " "ensure that any 3 of these servers would have " "enough shares to recover the file. " "We were asked to place shares on at " "least 4 servers such that any 3 of them have " "enough shares to recover the file", client.upload, upload.Data(b"data" * 10000, convergence=b""))) # Do comment:52, but like this: # server 2: empty # server 3: share 0, read-only # server 1: share 0, read-only # server 0: shares 0-9 d.addCallback(lambda ign: _basedir()) d.addCallback(lambda ign: self._setup_and_upload()) d.addCallback(lambda ign: self._add_server(server_number=2)) d.addCallback(lambda ign: self._add_server_with_share(server_number=3, share_number=0, readonly=True)) d.addCallback(lambda ign: self._add_server_with_share(server_number=1, share_number=0, readonly=True)) def _prepare2(): client = self.g.clients[0] client.encoding_params['happy'] = 4 return client d.addCallback(lambda ign: _prepare2()) d.addCallback(lambda client: self.shouldFail(UploadUnhappinessError, "test_problem_layout_comment_52_test_2", "shares could be placed on only 3 server(s) such " "that any 3 of them have enough shares to recover " "the file, but we were asked to place shares on " "at least 4 such servers.", client.upload, upload.Data(b"data" * 10000, convergence=b""))) return d def test_problem_layout_comment_53(self): # This scenario is at # http://allmydata.org/trac/tahoe-lafs/ticket/778#comment:53 # # Set up the grid to have one server def _change_basedir(ign): self.basedir = self.mktemp() _change_basedir(None) # We start by uploading all of the shares to one server. # Next, we'll add three new servers to our NoNetworkGrid. We'll add # one share from our initial upload to each of these. # The counterintuitive ordering of the share numbers is to deal with # the permuting of these servers -- distributing the shares this # way ensures that the Tahoe2ServerSelector sees them in the order # described below. d = self._setup_and_upload() d.addCallback(lambda ign: self._add_server_with_share(server_number=1, share_number=2)) d.addCallback(lambda ign: self._add_server_with_share(server_number=2, share_number=0)) d.addCallback(lambda ign: self._add_server_with_share(server_number=3, share_number=1)) # So, we now have the following layout: # server 0: shares 0 - 9 # server 1: share 2 # server 2: share 0 # server 3: share 1 # We change the 'happy' parameter in the client to 4. # The Tahoe2ServerSelector will see the servers permuted as: # 2, 3, 1, 0 # Ideally, a reupload of our original data should work. def _reset_encoding_parameters(ign, happy=4): client = self.g.clients[0] client.encoding_params['happy'] = happy return client d.addCallback(_reset_encoding_parameters) d.addCallback(lambda client: client.upload(upload.Data(b"data" * 10000, convergence=b""))) d.addCallback(lambda ign: self.failUnless(self._has_happy_share_distribution())) # This scenario is basically comment:53, but changed so that the # Tahoe2ServerSelector sees the server with all of the shares before # any of the other servers. # The layout is: # server 2: shares 0 - 9 # server 3: share 0 # server 1: share 1 # server 4: share 2 # The Tahoe2ServerSelector sees the servers permuted as: # 2, 3, 1, 4 # Note that server 0 has been replaced by server 4; this makes it # easier to ensure that the last server seen by Tahoe2ServerSelector # has only one share. d.addCallback(_change_basedir) d.addCallback(lambda ign: self._setup_and_upload()) d.addCallback(lambda ign: self._add_server_with_share(server_number=2, share_number=0)) d.addCallback(lambda ign: self._add_server_with_share(server_number=3, share_number=1)) d.addCallback(lambda ign: self._add_server_with_share(server_number=1, share_number=2)) # Copy all of the other shares to server number 2 def _copy_shares(ign): for i in range(0, 10): self._copy_share_to_server(i, 2) d.addCallback(_copy_shares) # Remove the first server, and add a placeholder with share 0 d.addCallback(lambda ign: self.g.remove_server(self.g.servers_by_number[0].my_nodeid)) d.addCallback(lambda ign: self._add_server_with_share(server_number=4, share_number=0)) # Now try uploading. d.addCallback(_reset_encoding_parameters) d.addCallback(lambda client: client.upload(upload.Data(b"data" * 10000, convergence=b""))) d.addCallback(lambda ign: self.failUnless(self._has_happy_share_distribution())) # Try the same thing, but with empty servers after the first one # We want to make sure that Tahoe2ServerSelector will redistribute # shares as necessary, not simply discover an existing layout. # The layout is: # server 2: shares 0 - 9 # server 3: empty # server 1: empty # server 4: empty d.addCallback(_change_basedir) d.addCallback(lambda ign: self._setup_and_upload()) d.addCallback(lambda ign: self._add_server(server_number=2)) d.addCallback(lambda ign: self._add_server(server_number=3)) d.addCallback(lambda ign: self._add_server(server_number=1)) d.addCallback(lambda ign: self._add_server(server_number=4)) d.addCallback(_copy_shares) d.addCallback(lambda ign: self.g.remove_server(self.g.servers_by_number[0].my_nodeid)) d.addCallback(_reset_encoding_parameters) d.addCallback(lambda client: client.upload(upload.Data(b"data" * 10000, convergence=b""))) # Make sure that only as many shares as necessary to satisfy # servers of happiness were pushed. d.addCallback(lambda results: self.failUnlessEqual(results.get_pushed_shares(), 3)) d.addCallback(lambda ign: self.failUnless(self._has_happy_share_distribution())) return d def test_problem_layout_ticket_1124(self): self.basedir = self.mktemp() d = self._setup_and_upload(k=2, n=4) # server 0: shares 0, 1, 2, 3 # server 1: shares 0, 3 # server 2: share 1 # server 3: share 2 # With this layout, an upload should just be satisfied that the current distribution is good enough, right? def _setup(ign): self._add_server_with_share(server_number=0, share_number=None) self._add_server_with_share(server_number=1, share_number=0) self._add_server_with_share(server_number=2, share_number=1) self._add_server_with_share(server_number=3, share_number=2) # Copy shares self._copy_share_to_server(3, 1) client = self.g.clients[0] client.encoding_params['happy'] = 4 return client d.addCallback(_setup) d.addCallback(lambda client: client.upload(upload.Data(b"data" * 10000, convergence=b""))) d.addCallback(lambda ign: self.failUnless(self._has_happy_share_distribution())) return d def test_happiness_with_some_readonly_servers(self): # Try the following layout # server 2: shares 0-9 # server 4: share 0, read-only # server 3: share 1, read-only # server 1: share 2, read-only self.basedir = self.mktemp() d = self._setup_and_upload() d.addCallback(lambda ign: self._add_server_with_share(server_number=2, share_number=0)) d.addCallback(lambda ign: self._add_server_with_share(server_number=3, share_number=1, readonly=True)) d.addCallback(lambda ign: self._add_server_with_share(server_number=1, share_number=2, readonly=True)) # Copy all of the other shares to server number 2 def _copy_shares(ign): for i in range(1, 10): self._copy_share_to_server(i, 2) d.addCallback(_copy_shares) # Remove server 0, and add another in its place d.addCallback(lambda ign: self.g.remove_server(self.g.servers_by_number[0].my_nodeid)) d.addCallback(lambda ign: self._add_server_with_share(server_number=4, share_number=0, readonly=True)) def _reset_encoding_parameters(ign, happy=4): client = self.g.clients[0] client.encoding_params['happy'] = happy return client d.addCallback(_reset_encoding_parameters) d.addCallback(lambda client: client.upload(upload.Data(b"data" * 10000, convergence=b""))) d.addCallback(lambda ign: self.failUnless(self._has_happy_share_distribution())) return d def test_happiness_with_all_readonly_servers(self): # server 3: share 1, read-only # server 1: share 2, read-only # server 2: shares 0-9, read-only # server 4: share 0, read-only # The idea with this test is to make sure that the survey of # read-only servers doesn't undercount servers of happiness self.basedir = self.mktemp() d = self._setup_and_upload() d.addCallback(lambda ign: self._add_server_with_share(server_number=4, share_number=0, readonly=True)) d.addCallback(lambda ign: self._add_server_with_share(server_number=3, share_number=1, readonly=True)) d.addCallback(lambda ign: self._add_server_with_share(server_number=1, share_number=2, readonly=True)) d.addCallback(lambda ign: self._add_server_with_share(server_number=2, share_number=0, readonly=True)) def _copy_shares(ign): for i in range(1, 10): self._copy_share_to_server(i, 2) d.addCallback(_copy_shares) d.addCallback(lambda ign: self.g.remove_server(self.g.servers_by_number[0].my_nodeid)) def _reset_encoding_parameters(ign, happy=4): client = self.g.clients[0] client.encoding_params['happy'] = happy return client d.addCallback(_reset_encoding_parameters) d.addCallback(lambda client: client.upload(upload.Data(b"data" * 10000, convergence=b""))) d.addCallback(lambda ign: self.failUnless(self._has_happy_share_distribution())) return d def test_dropped_servers_in_encoder(self): # The Encoder does its own "servers_of_happiness" check if it # happens to lose a bucket during an upload (it assumes that # the layout presented to it satisfies "servers_of_happiness" # until a failure occurs) # # This test simulates an upload where servers break after server # selection, but before they are written to. def _set_basedir(ign=None): self.basedir = self.mktemp() _set_basedir() d = self._setup_and_upload(); # Add 5 servers def _do_server_setup(ign): self._add_server(server_number=1) self._add_server(server_number=2) self._add_server(server_number=3) self._add_server(server_number=4) self._add_server(server_number=5) d.addCallback(_do_server_setup) # remove the original server # (necessary to ensure that the Tahoe2ServerSelector will distribute # all the shares) def _remove_server(ign): server = self.g.servers_by_number[0] self.g.remove_server(server.my_nodeid) d.addCallback(_remove_server) # This should succeed; we still have 4 servers, and the # happiness of the upload is 4. d.addCallback(lambda ign: self._do_upload_with_broken_servers(1)) # Now, do the same thing over again, but drop 2 servers instead # of 1. This should fail, because servers_of_happiness is 4 and # we can't satisfy that. d.addCallback(_set_basedir) d.addCallback(lambda ign: self._setup_and_upload()) d.addCallback(_do_server_setup) d.addCallback(_remove_server) d.addCallback(lambda ign: self.shouldFail(UploadUnhappinessError, "test_dropped_servers_in_encoder", "shares could be placed on only 3 server(s) " "such that any 3 of them have enough shares to " "recover the file, but we were asked to place " "shares on at least 4", self._do_upload_with_broken_servers, 2)) # Now do the same thing over again, but make some of the servers # readonly, break some of the ones that aren't, and make sure that # happiness accounting is preserved. d.addCallback(_set_basedir) d.addCallback(lambda ign: self._setup_and_upload()) def _do_server_setup_2(ign): self._add_server(1) self._add_server(2) self._add_server(3) self._add_server_with_share(4, 7, readonly=True) self._add_server_with_share(5, 8, readonly=True) d.addCallback(_do_server_setup_2) d.addCallback(_remove_server) d.addCallback(lambda ign: self._do_upload_with_broken_servers(1)) d.addCallback(_set_basedir) d.addCallback(lambda ign: self._setup_and_upload()) d.addCallback(_do_server_setup_2) d.addCallback(_remove_server) d.addCallback(lambda ign: self.shouldFail(UploadUnhappinessError, "test_dropped_servers_in_encoder", "shares could be placed on only 3 server(s) " "such that any 3 of them have enough shares to " "recover the file, but we were asked to place " "shares on at least 4", self._do_upload_with_broken_servers, 2)) return d def test_existing_share_detection(self): self.basedir = self.mktemp() d = self._setup_and_upload() # Our final setup should look like this: # server 1: shares 0 - 9, read-only # server 2: empty # server 3: empty # server 4: empty # The purpose of this test is to make sure that the server selector # knows about the shares on server 1, even though it is read-only. # It used to simply filter these out, which would cause the test # to fail when servers_of_happiness = 4. d.addCallback(lambda ign: self._add_server_with_share(1, 0, True)) d.addCallback(lambda ign: self._add_server(2)) d.addCallback(lambda ign: self._add_server(3)) d.addCallback(lambda ign: self._add_server(4)) def _copy_shares(ign): for i in range(1, 10): self._copy_share_to_server(i, 1) d.addCallback(_copy_shares) d.addCallback(lambda ign: self.g.remove_server(self.g.servers_by_number[0].my_nodeid)) def _prepare_client(ign): client = self.g.clients[0] client.encoding_params['happy'] = 4 return client d.addCallback(_prepare_client) d.addCallback(lambda client: client.upload(upload.Data(b"data" * 10000, convergence=b""))) d.addCallback(lambda ign: self.failUnless(self._has_happy_share_distribution())) return d def test_query_counting(self): # If server selection fails, Tahoe2ServerSelector prints out a lot # of helpful diagnostic information, including query stats. # This test helps make sure that that information is accurate. self.basedir = self.mktemp() d = self._setup_and_upload() def _setup(ign): for i in range(1, 11): self._add_server(server_number=i) self.g.remove_server(self.g.servers_by_number[0].my_nodeid) c = self.g.clients[0] # We set happy to an unsatisfiable value so that we can check the # counting in the exception message. The same progress message # is also used when the upload is successful, but in that case it # only gets written to a log, so we can't see what it says. c.encoding_params['happy'] = 45 return c d.addCallback(_setup) d.addCallback(lambda c: self.shouldFail(UploadUnhappinessError, "test_query_counting", "0 queries placed some shares", c.upload, upload.Data(b"data" * 10000, convergence=b""))) # Now try with some readonly servers. We want to make sure that # the readonly server share discovery phase is counted correctly. def _reset(ign): self.basedir = self.mktemp() self.g = None d.addCallback(_reset) d.addCallback(lambda ign: self._setup_and_upload()) def _then(ign): for i in range(1, 11): self._add_server(server_number=i) self._add_server(server_number=11, readonly=True) self._add_server(server_number=12, readonly=True) self.g.remove_server(self.g.servers_by_number[0].my_nodeid) c = self.g.clients[0] c.encoding_params['happy'] = 45 return c d.addCallback(_then) d.addCallback(lambda c: self.shouldFail(UploadUnhappinessError, "test_query_counting", "4 placed none (of which 4 placed none due to " "the server being full", c.upload, upload.Data(b"data" * 10000, convergence=b""))) # Now try the case where the upload process finds a bunch of the # shares that it wants to place on the first server, including # the one that it wanted to allocate there. Though no shares will # be allocated in this request, it should still be called # productive, since it caused some homeless shares to be # removed. d.addCallback(_reset) d.addCallback(lambda ign: self._setup_and_upload()) def _next(ign): for i in range(1, 11): self._add_server(server_number=i) # Copy all of the shares to server 9, since that will be # the first one that the selector sees. for i in range(10): self._copy_share_to_server(i, 9) # Remove server 0, and its contents self.g.remove_server(self.g.servers_by_number[0].my_nodeid) # Make happiness unsatisfiable c = self.g.clients[0] c.encoding_params['happy'] = 45 return c d.addCallback(_next) d.addCallback(lambda c: self.shouldFail(UploadUnhappinessError, "test_query_counting", "0 queries placed some shares", c.upload, upload.Data(b"data" * 10000, convergence=b""))) return d def test_upper_limit_on_readonly_queries(self): self.basedir = self.mktemp() d = self._setup_and_upload() def _then(ign): for i in range(1, 11): self._add_server(server_number=i, readonly=True) self.g.remove_server(self.g.servers_by_number[0].my_nodeid) c = self.g.clients[0] c.encoding_params['k'] = 2 c.encoding_params['happy'] = 4 c.encoding_params['n'] = 4 return c d.addCallback(_then) d.addCallback(lambda client: self.shouldFail(UploadUnhappinessError, "test_upper_limit_on_readonly_queries", "sent 8 queries to 8 servers", client.upload, upload.Data(b'data' * 10000, convergence=b""))) return d def test_exception_messages_during_server_selection(self): # server 1: read-only, no shares # server 2: read-only, no shares # server 3: read-only, no shares # server 4: read-only, no shares # server 5: read-only, no shares # This will fail, but we want to make sure that the log messages # are informative about why it has failed. self.basedir = self.mktemp() d = self._setup_and_upload() d.addCallback(lambda ign: self._add_server(server_number=1, readonly=True)) d.addCallback(lambda ign: self._add_server(server_number=2, readonly=True)) d.addCallback(lambda ign: self._add_server(server_number=3, readonly=True)) d.addCallback(lambda ign: self._add_server(server_number=4, readonly=True)) d.addCallback(lambda ign: self._add_server(server_number=5, readonly=True)) d.addCallback(lambda ign: self.g.remove_server(self.g.servers_by_number[0].my_nodeid)) def _reset_encoding_parameters(ign, happy=4): client = self.g.clients[0] client.encoding_params['happy'] = happy return client d.addCallback(_reset_encoding_parameters) d.addCallback(lambda client: self.shouldFail(UploadUnhappinessError, "test_selection_exceptions", "placed 0 shares out of 10 " "total (10 homeless), want to place shares on at " "least 4 servers such that any 3 of them have " "enough shares to recover the file, " "sent 5 queries to 5 servers, 0 queries placed " "some shares, 5 placed none " "(of which 5 placed none due to the server being " "full and 0 placed none due to an error)", client.upload, upload.Data(b"data" * 10000, convergence=b""))) # server 1: read-only, no shares # server 2: broken, no shares # server 3: read-only, no shares # server 4: read-only, no shares # server 5: read-only, no shares def _reset(ign): self.basedir = self.mktemp() d.addCallback(_reset) d.addCallback(lambda ign: self._setup_and_upload()) d.addCallback(lambda ign: self._add_server(server_number=1, readonly=True)) d.addCallback(lambda ign: self._add_server(server_number=2)) def _break_server_2(ign): serverid = self.g.servers_by_number[2].my_nodeid self.g.break_server(serverid) d.addCallback(_break_server_2) d.addCallback(lambda ign: self._add_server(server_number=3, readonly=True)) d.addCallback(lambda ign: self._add_server(server_number=4, readonly=True)) d.addCallback(lambda ign: self._add_server(server_number=5, readonly=True)) d.addCallback(lambda ign: self.g.remove_server(self.g.servers_by_number[0].my_nodeid)) d.addCallback(_reset_encoding_parameters) d.addCallback(lambda client: self.shouldFail(UploadUnhappinessError, "test_selection_exceptions", "placed 0 shares out of 10 " "total (10 homeless), want to place shares on at " "least 4 servers such that any 3 of them have " "enough shares to recover the file, " "sent 5 queries to 5 servers, 0 queries placed " "some shares, 5 placed none " "(of which 4 placed none due to the server being " "full and 1 placed none due to an error)", client.upload, upload.Data(b"data" * 10000, convergence=b""))) # server 0, server 1 = empty, accepting shares # This should place all of the shares, but still fail with happy=4. # We want to make sure that the exception message is worded correctly. d.addCallback(_reset) d.addCallback(lambda ign: self._setup_grid()) d.addCallback(lambda ign: self._add_server(server_number=1)) d.addCallback(_reset_encoding_parameters) d.addCallback(lambda client: self.shouldFail(UploadUnhappinessError, "test_selection_exceptions", "shares could be placed or found on only 2 " "server(s). We were asked to place shares on at " "least 4 server(s) such that any 3 of them have " "enough shares to recover the file.", client.upload, upload.Data(b"data" * 10000, convergence=b""))) # servers 0 - 4 = empty, accepting shares # This too should place all the shares, and this too should fail, # but since the effective happiness is more than the k encoding # parameter, it should trigger a different error message than the one # above. d.addCallback(_reset) d.addCallback(lambda ign: self._setup_grid()) d.addCallback(lambda ign: self._add_server(server_number=1)) d.addCallback(lambda ign: self._add_server(server_number=2)) d.addCallback(lambda ign: self._add_server(server_number=3)) d.addCallback(lambda ign: self._add_server(server_number=4)) d.addCallback(_reset_encoding_parameters, happy=7) d.addCallback(lambda client: self.shouldFail(UploadUnhappinessError, "test_selection_exceptions", "shares could be placed on only 5 server(s) such " "that any 3 of them have enough shares to recover " "the file, but we were asked to place shares on " "at least 7 such servers.", client.upload, upload.Data(b"data" * 10000, convergence=b""))) # server 0: shares 0 - 9 # server 1: share 0, read-only # server 2: share 0, read-only # server 3: share 0, read-only # This should place all of the shares, but fail with happy=4. # Since the number of servers with shares is more than the number # necessary to reconstitute the file, this will trigger a different # error message than either of those above. d.addCallback(_reset) d.addCallback(lambda ign: self._setup_and_upload()) d.addCallback(lambda ign: self._add_server_with_share(server_number=1, share_number=0, readonly=True)) d.addCallback(lambda ign: self._add_server_with_share(server_number=2, share_number=0, readonly=True)) d.addCallback(lambda ign: self._add_server_with_share(server_number=3, share_number=0, readonly=True)) d.addCallback(_reset_encoding_parameters, happy=7) d.addCallback(lambda client: self.shouldFail(UploadUnhappinessError, "test_selection_exceptions", "shares could be placed or found on 4 server(s), " "but they are not spread out evenly enough to " "ensure that any 3 of these servers would have " "enough shares to recover the file. We were asked " "to place shares on at least 7 servers such that " "any 3 of them have enough shares to recover the " "file", client.upload, upload.Data(b"data" * 10000, convergence=b""))) return d def test_problem_layout_comment_187(self): # #778 comment 187 broke an initial attempt at a share # redistribution algorithm. This test is here to demonstrate the # breakage, and to test that subsequent algorithms don't also # break in the same way. self.basedir = self.mktemp() d = self._setup_and_upload(k=2, n=3) # server 1: shares 0, 1, 2, readonly # server 2: share 0, readonly # server 3: share 0 def _setup(ign): self._add_server_with_share(server_number=1, share_number=0, readonly=True) self._add_server_with_share(server_number=2, share_number=0, readonly=True) self._add_server_with_share(server_number=3, share_number=0) # Copy shares self._copy_share_to_server(1, 1) self._copy_share_to_server(2, 1) # Remove server 0 self.g.remove_server(self.g.servers_by_number[0].my_nodeid) client = self.g.clients[0] client.encoding_params['happy'] = 3 return client d.addCallback(_setup) d.addCallback(lambda client: client.upload(upload.Data(b"data" * 10000, convergence=b""))) d.addCallback(lambda ign: self.failUnless(self._has_happy_share_distribution())) return d def test_problem_layout_ticket_1118(self): # #1118 includes a report from a user who hit an assertion in # the upload code with this layout. # Note that 'servers of happiness' lets this test work now self.basedir = self.mktemp() d = self._setup_and_upload(k=2, n=4) # server 0: no shares # server 1: shares 0, 3 # server 3: share 1 # server 2: share 2 # The order that they get queries is 0, 1, 3, 2 def _setup(ign): self._add_server(server_number=0) self._add_server_with_share(server_number=1, share_number=0) self._add_server_with_share(server_number=2, share_number=2) self._add_server_with_share(server_number=3, share_number=1) # Copy shares self._copy_share_to_server(3, 1) self.delete_all_shares(self.get_serverdir(0)) client = self.g.clients[0] client.encoding_params['happy'] = 4 return client d.addCallback(_setup) return d def test_problem_layout_ticket_1128(self): # #1118 includes a report from a user who hit an assertion in # the upload code with this layout. self.basedir = self.mktemp() d = self._setup_and_upload(k=2, n=4) # server 0: no shares # server 1: shares 0, 3 # server 3: share 1 # server 2: share 2 # The order that they get queries is 0, 1, 3, 2 def _setup(ign): self._add_server(server_number=0) self._add_server_with_share(server_number=1, share_number=0) self._add_server_with_share(server_number=2, share_number=2) self._add_server_with_share(server_number=3, share_number=1) # Copy shares self._copy_share_to_server(3, 1) #Remove shares from server 0 self.delete_all_shares(self.get_serverdir(0)) client = self.g.clients[0] client.encoding_params['happy'] = 4 return client d.addCallback(_setup) d.addCallback(lambda client: client.upload(upload.Data(b"data" * 10000, convergence=b""))) d.addCallback(lambda ign: self.failUnless(self._has_happy_share_distribution())) return d def test_upload_succeeds_with_some_homeless_shares(self): # If the upload is forced to stop trying to place shares before # it has placed (or otherwise accounted) for all of them, but it # has placed enough to satisfy the upload health criteria that # we're using, it should still succeed. self.basedir = self.mktemp() d = self._setup_and_upload() def _server_setup(ign): # Add four servers so that we have a layout like this: # server 1: share 0, read-only # server 2: share 1, read-only # server 3: share 2, read-only # server 4: share 3, read-only # If we set happy = 4, the upload will manage to satisfy # servers of happiness, but not place all of the shares; we # want to test that the upload is declared successful in # this case. self._add_server_with_share(server_number=1, share_number=0, readonly=True) self._add_server_with_share(server_number=2, share_number=1, readonly=True) self._add_server_with_share(server_number=3, share_number=2, readonly=True) self._add_server_with_share(server_number=4, share_number=3, readonly=True) # Remove server 0. self.g.remove_server(self.g.servers_by_number[0].my_nodeid) # Set the client appropriately c = self.g.clients[0] c.encoding_params['happy'] = 4 return c d.addCallback(_server_setup) d.addCallback(lambda client: client.upload(upload.Data(b"data" * 10000, convergence=b""))) d.addCallback(lambda ign: self.failUnless(self._has_happy_share_distribution())) return d def test_uploader_skips_over_servers_with_only_one_share(self): # We want to make sure that the redistribution logic ignores # servers with only one share, since placing these shares # elsewhere will at best keep happiness the same as it was, and # at worst hurt it. self.basedir = self.mktemp() d = self._setup_and_upload() def _server_setup(ign): # Add some servers so that the upload will need to # redistribute, but will first pass over a couple of servers # that don't have enough shares to redistribute before # finding one that does have shares to redistribute. self._add_server_with_share(server_number=1, share_number=0) self._add_server_with_share(server_number=2, share_number=2) self._add_server_with_share(server_number=3, share_number=1) self._add_server_with_share(server_number=8, share_number=4) self._add_server_with_share(server_number=5, share_number=5) self._add_server_with_share(server_number=10, share_number=7) for i in range(4): self._copy_share_to_server(i, 2) return self.g.clients[0] d.addCallback(_server_setup) d.addCallback(lambda client: client.upload(upload.Data(b"data" * 10000, convergence=b""))) d.addCallback(lambda ign: self.failUnless(self._has_happy_share_distribution())) return d def test_server_selector_bucket_abort(self): # If server selection for an upload fails due to an unhappy # layout, the server selection process should abort the buckets it # allocates before failing, so that the space can be re-used. self.basedir = self.mktemp() self.set_up_grid(num_servers=5) # Try to upload a file with happy=7, which is unsatisfiable with # the current grid. This will fail, but should not take up any # space on the storage servers after it fails. client = self.g.clients[0] client.encoding_params['happy'] = 7 d = defer.succeed(None) d.addCallback(lambda ignored: self.shouldFail(UploadUnhappinessError, "test_server_selection_bucket_abort", "", client.upload, upload.Data(b"data" * 10000, convergence=b""))) # wait for the abort messages to get there. def _turn_barrier(res): return fireEventually(res) d.addCallback(_turn_barrier) def _then(ignored): for server in list(self.g.servers_by_number.values()): self.failUnlessEqual(server.allocated_size(), 0) d.addCallback(_then) return d def test_encoder_bucket_abort(self): # If enough servers die in the process of encoding and uploading # a file to make the layout unhappy, we should cancel the # newly-allocated buckets before dying. self.basedir = self.mktemp() self.set_up_grid(num_servers=4) client = self.g.clients[0] client.encoding_params['happy'] = 7 d = defer.succeed(None) d.addCallback(lambda ignored: self.shouldFail(UploadUnhappinessError, "test_encoder_bucket_abort", "", self._do_upload_with_broken_servers, 1)) def _turn_barrier(res): return fireEventually(res) d.addCallback(_turn_barrier) def _then(ignored): for server in list(self.g.servers_by_number.values()): self.failUnlessEqual(server.allocated_size(), 0) d.addCallback(_then) return d def _set_up_nodes_extra_config(self, clientdir): cfgfn = os.path.join(clientdir, "tahoe.cfg") oldcfg = open(cfgfn, "r").read() f = open(cfgfn, "wt") f.write(oldcfg) f.write("\n") f.write("[client]\n") f.write("shares.needed = 7\n") f.write("shares.total = 12\n") f.write("\n") f.close() return None class EncryptAnUploadableTests(unittest.TestCase): """ Tests for ``EncryptAnUploadable``. """ def test_same_length(self): """ ``EncryptAnUploadable.read_encrypted`` returns ciphertext of the same length as the underlying plaintext. """ plaintext = b"hello world" uploadable = upload.FileHandle(BytesIO(plaintext), None) uploadable.set_default_encoding_parameters({ # These values shouldn't matter. "k": 3, "happy": 5, "n": 10, "max_segment_size": 128 * 1024, }) encrypter = upload.EncryptAnUploadable(uploadable) ciphertext = b"".join(self.successResultOf(encrypter.read_encrypted(1024, False))) self.assertEqual(len(ciphertext), len(plaintext)) @given(just(b"hello world"), integers(min_value=0, max_value=len(b"hello world"))) def test_known_result(self, plaintext, split_at): """ ``EncryptAnUploadable.read_encrypted`` returns a known-correct ciphertext string for certain inputs. The ciphertext is independent of the read sizes. """ convergence = b"\x42" * 16 uploadable = upload.FileHandle(BytesIO(plaintext), convergence) uploadable.set_default_encoding_parameters({ # The convergence key is a function of k, n, and max_segment_size # (among other things). The value for happy doesn't matter # though. "k": 3, "happy": 5, "n": 10, "max_segment_size": 128 * 1024, }) encrypter = upload.EncryptAnUploadable(uploadable) def read(n): return b"".join(self.successResultOf(encrypter.read_encrypted(n, False))) # Read the string in one or two pieces to make sure underlying state # is maintained properly. first = read(split_at) second = read(len(plaintext) - split_at) third = read(1) ciphertext = first + second + third self.assertEqual( b"Jd2LHCRXozwrEJc=", b64encode(ciphertext), ) def test_large_read(self): """ ``EncryptAnUploadable.read_encrypted`` succeeds even when the requested data length is much larger than the chunk size. """ convergence = b"\x42" * 16 # 4kB of plaintext plaintext = b"\xde\xad\xbe\xef" * 1024 uploadable = upload.FileHandle(BytesIO(plaintext), convergence) uploadable.set_default_encoding_parameters({ "k": 3, "happy": 5, "n": 10, "max_segment_size": 128 * 1024, }) # Make the chunk size very small so we don't have to operate on a huge # amount of data to exercise the relevant codepath. encrypter = upload.EncryptAnUploadable(uploadable, chunk_size=1) d = encrypter.read_encrypted(len(plaintext), False) ciphertext = self.successResultOf(d) self.assertEqual( list(map(len, ciphertext)), # Chunk size was specified as 1 above so we will get the whole # plaintext in one byte chunks. [1] * len(plaintext), ) # TODO: # upload with exactly 75 servers (shares_of_happiness) # have a download fail # cancel a download (need to implement more cancel stuff) # from test_encode: # NoNetworkGrid, upload part of ciphertext, kill server, continue upload # check with Kevan, they want to live in test_upload, existing tests might cover # def test_lost_one_shareholder(self): # these are upload-side tests # def test_lost_one_shareholder_early(self): # def test_lost_many_shareholders(self): # def test_lost_all_shareholders(self): tahoe_lafs-1.20.0/src/allmydata/test/test_uri.py0000644000000000000000000007543013615410400016556 0ustar00""" Tests for allmydata.uri. Ported to Python 3. """ import os from twisted.trial import unittest from allmydata import uri from allmydata.util import hashutil, base32 from allmydata.interfaces import IURI, IFileURI, IDirnodeURI, IMutableFileURI, \ IVerifierURI, CapConstraintError import allmydata.test.common_util as testutil class Literal(testutil.ReallyEqualMixin, unittest.TestCase): def _help_test(self, data): u = uri.LiteralFileURI(data) self.failUnless(IURI.providedBy(u)) self.failUnless(IFileURI.providedBy(u)) self.failIf(IDirnodeURI.providedBy(u)) self.failUnlessReallyEqual(u.data, data) self.failUnlessReallyEqual(u.get_size(), len(data)) self.failUnless(u.is_readonly()) self.failIf(u.is_mutable()) u2 = uri.from_string(u.to_string()) self.failUnless(IURI.providedBy(u2)) self.failUnless(IFileURI.providedBy(u2)) self.failIf(IDirnodeURI.providedBy(u2)) self.failUnlessReallyEqual(u2.data, data) self.failUnlessReallyEqual(u2.get_size(), len(data)) self.failUnless(u2.is_readonly()) self.failIf(u2.is_mutable()) u2i = uri.from_string(u.to_string(), deep_immutable=True) self.failUnless(IFileURI.providedBy(u2i)) self.failIf(IDirnodeURI.providedBy(u2i)) self.failUnlessReallyEqual(u2i.data, data) self.failUnlessReallyEqual(u2i.get_size(), len(data)) self.failUnless(u2i.is_readonly()) self.failIf(u2i.is_mutable()) u3 = u.get_readonly() self.failUnlessIdentical(u, u3) self.failUnlessReallyEqual(u.get_verify_cap(), None) def test_empty(self): data = b"" # This data is some *very* small data! return self._help_test(data) def test_pack(self): data = b"This is some small data" return self._help_test(data) def test_nonascii(self): data = b"This contains \x00 and URI:LIT: and \n, oh my." return self._help_test(data) class Compare(testutil.ReallyEqualMixin, unittest.TestCase): def test_compare(self): lit1 = uri.LiteralFileURI(b"some data") fileURI = b'URI:CHK:f5ahxa25t4qkktywz6teyfvcx4:opuioq7tj2y6idzfp6cazehtmgs5fdcebcz3cygrxyydvcozrmeq:3:10:345834' chk1 = uri.CHKFileURI.init_from_string(fileURI) chk2 = uri.CHKFileURI.init_from_string(fileURI) unk = uri.UnknownURI(b"lafs://from_the_future") self.failIfEqual(lit1, chk1) self.failUnlessReallyEqual(chk1, chk2) self.failIfEqual(chk1, "not actually a URI") # these should be hashable too s = set([lit1, chk1, chk2, unk]) self.failUnlessReallyEqual(len(s), 3) # since chk1==chk2 def test_is_uri(self): lit1 = uri.LiteralFileURI(b"some data").to_string() self.failUnless(uri.is_uri(lit1)) self.failIf(uri.is_uri(None)) def test_is_literal_file_uri(self): lit1 = uri.LiteralFileURI(b"some data").to_string() self.failUnless(uri.is_literal_file_uri(lit1)) self.failIf(uri.is_literal_file_uri(None)) self.failIf(uri.is_literal_file_uri("foo")) self.failIf(uri.is_literal_file_uri("ro.foo")) self.failIf(uri.is_literal_file_uri(b"URI:LITfoo")) self.failUnless(uri.is_literal_file_uri("ro.URI:LIT:foo")) self.failUnless(uri.is_literal_file_uri("imm.URI:LIT:foo")) def test_has_uri_prefix(self): self.failUnless(uri.has_uri_prefix(b"URI:foo")) self.failUnless(uri.has_uri_prefix(b"ro.URI:foo")) self.failUnless(uri.has_uri_prefix(b"imm.URI:foo")) self.failUnless(uri.has_uri_prefix("URI:foo")) self.failUnless(uri.has_uri_prefix("ro.URI:foo")) self.failUnless(uri.has_uri_prefix("imm.URI:foo")) self.failIf(uri.has_uri_prefix(None)) self.failIf(uri.has_uri_prefix("foo")) class CHKFile(testutil.ReallyEqualMixin, unittest.TestCase): def test_pack(self): key = b"\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" storage_index = hashutil.storage_index_hash(key) uri_extension_hash = hashutil.uri_extension_hash(b"stuff") needed_shares = 25 total_shares = 100 size = 1234 u = uri.CHKFileURI(key=key, uri_extension_hash=uri_extension_hash, needed_shares=needed_shares, total_shares=total_shares, size=size) self.failUnlessReallyEqual(u.get_storage_index(), storage_index) self.failUnlessReallyEqual(u.key, key) self.failUnlessReallyEqual(u.uri_extension_hash, uri_extension_hash) self.failUnlessReallyEqual(u.needed_shares, needed_shares) self.failUnlessReallyEqual(u.total_shares, total_shares) self.failUnlessReallyEqual(u.size, size) self.failUnless(u.is_readonly()) self.failIf(u.is_mutable()) self.failUnless(IURI.providedBy(u)) self.failUnless(IFileURI.providedBy(u)) self.failIf(IDirnodeURI.providedBy(u)) self.failUnlessReallyEqual(u.get_size(), 1234) u_ro = u.get_readonly() self.failUnlessIdentical(u, u_ro) u2 = uri.from_string(u.to_string()) self.failUnlessReallyEqual(u2.get_storage_index(), storage_index) self.failUnlessReallyEqual(u2.key, key) self.failUnlessReallyEqual(u2.uri_extension_hash, uri_extension_hash) self.failUnlessReallyEqual(u2.needed_shares, needed_shares) self.failUnlessReallyEqual(u2.total_shares, total_shares) self.failUnlessReallyEqual(u2.size, size) self.failUnless(u2.is_readonly()) self.failIf(u2.is_mutable()) self.failUnless(IURI.providedBy(u2)) self.failUnless(IFileURI.providedBy(u2)) self.failIf(IDirnodeURI.providedBy(u2)) self.failUnlessReallyEqual(u2.get_size(), 1234) u2i = uri.from_string(u.to_string(), deep_immutable=True) self.failUnlessReallyEqual(u.to_string(), u2i.to_string()) u2ro = uri.from_string(uri.ALLEGED_READONLY_PREFIX + u.to_string()) self.failUnlessReallyEqual(u.to_string(), u2ro.to_string()) u2imm = uri.from_string(uri.ALLEGED_IMMUTABLE_PREFIX + u.to_string()) self.failUnlessReallyEqual(u.to_string(), u2imm.to_string()) v = u.get_verify_cap() self.failUnless(isinstance(v.to_string(), bytes)) self.failUnless(v.is_readonly()) self.failIf(v.is_mutable()) v2 = uri.from_string(v.to_string()) self.failUnlessReallyEqual(v, v2) v3 = uri.CHKFileVerifierURI(storage_index=b"\x00"*16, uri_extension_hash=b"\x00"*32, needed_shares=3, total_shares=10, size=1234) self.failUnless(isinstance(v3.to_string(), bytes)) self.failUnless(v3.is_readonly()) self.failIf(v3.is_mutable()) def test_pack_badly(self): key = b"\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" storage_index = hashutil.storage_index_hash(key) uri_extension_hash = hashutil.uri_extension_hash(b"stuff") needed_shares = 25 total_shares = 100 size = 1234 self.failUnlessRaises(TypeError, uri.CHKFileURI, key=key, uri_extension_hash=uri_extension_hash, needed_shares=needed_shares, total_shares=total_shares, size=size, bogus_extra_argument="reject me", ) self.failUnlessRaises(TypeError, uri.CHKFileVerifierURI, bogus="bogus") self.failUnlessRaises(TypeError, uri.CHKFileVerifierURI, storage_index=storage_index, uri_extension_hash=uri_extension_hash, needed_shares=3, total_shares=10, # leave size= missing ) class Extension(testutil.ReallyEqualMixin, unittest.TestCase): def test_pack(self): data = {b"stuff": b"value", b"size": 12, b"needed_shares": 3, b"big_hash": hashutil.tagged_hash(b"foo", b"bar"), } ext = uri.pack_extension(data) d = uri.unpack_extension(ext) self.failUnlessReallyEqual(d["stuff"], b"value") self.failUnlessReallyEqual(d["size"], 12) self.failUnlessReallyEqual(d["big_hash"], hashutil.tagged_hash(b"foo", b"bar")) readable = uri.unpack_extension_readable(ext) self.failUnlessReallyEqual(readable["needed_shares"], 3) self.failUnlessReallyEqual(readable["stuff"], b"value") self.failUnlessReallyEqual(readable["size"], 12) self.failUnlessReallyEqual(readable["big_hash"], base32.b2a(hashutil.tagged_hash(b"foo", b"bar"))) self.failUnlessReallyEqual(readable["UEB_hash"], base32.b2a(hashutil.uri_extension_hash(ext))) class Unknown(testutil.ReallyEqualMixin, unittest.TestCase): def test_from_future(self): # any URI type that we don't recognize should be treated as unknown future_uri = b"I am a URI from the future. Whatever you do, don't " u = uri.from_string(future_uri) self.failUnless(isinstance(u, uri.UnknownURI)) self.failUnlessReallyEqual(u.to_string(), future_uri) self.failUnless(u.get_readonly() is None) self.failUnless(u.get_error() is None) future_uri_unicode = future_uri.decode("utf-8") self.assertEqual(future_uri, uri.from_string(future_uri_unicode).to_string()) u2 = uri.UnknownURI(future_uri, error=CapConstraintError("...")) self.failUnlessReallyEqual(u.to_string(), future_uri) self.failUnless(u2.get_readonly() is None) self.failUnless(isinstance(u2.get_error(), CapConstraintError)) # Future caps might have non-ASCII chars in them. (Or maybe not, who can tell about the future?) future_uri = u"I am a cap from the \u263A future. Whatever you ".encode("utf-8") u = uri.from_string(future_uri) self.failUnless(isinstance(u, uri.UnknownURI)) self.failUnlessReallyEqual(u.to_string(), future_uri) self.failUnless(u.get_readonly() is None) self.failUnless(u.get_error() is None) u2 = uri.UnknownURI(future_uri, error=CapConstraintError("...")) self.failUnlessReallyEqual(u.to_string(), future_uri) self.failUnless(u2.get_readonly() is None) self.failUnless(isinstance(u2.get_error(), CapConstraintError)) class Constraint(testutil.ReallyEqualMixin, unittest.TestCase): def test_constraint(self): bad = b"http://127.0.0.1:3456/uri/URI%3ADIR2%3Agh3l5rbvnv2333mrfvalmjfr4i%3Alz6l7u3z3b7g37s4zkdmfpx5ly4ib4m6thrpbusi6ys62qtc6mma/" self.failUnlessRaises(uri.BadURIError, uri.DirectoryURI.init_from_string, bad) fileURI = b'URI:CHK:gh3l5rbvnv2333mrfvalmjfr4i:lz6l7u3z3b7g37s4zkdmfpx5ly4ib4m6thrpbusi6ys62qtc6mma:3:10:345834' uri.CHKFileURI.init_from_string(fileURI) class Mutable(testutil.ReallyEqualMixin, unittest.TestCase): def setUp(self): self.writekey = b"\x01" * 16 self.fingerprint = b"\x02" * 32 self.readkey = hashutil.ssk_readkey_hash(self.writekey) self.storage_index = hashutil.ssk_storage_index_hash(self.readkey) def test_pack(self): u = uri.WriteableSSKFileURI(self.writekey, self.fingerprint) self.failUnlessReallyEqual(u.writekey, self.writekey) self.failUnlessReallyEqual(u.fingerprint, self.fingerprint) self.failIf(u.is_readonly()) self.failUnless(u.is_mutable()) self.failUnless(IURI.providedBy(u)) self.failUnless(IMutableFileURI.providedBy(u)) self.failIf(IDirnodeURI.providedBy(u)) self.failUnless("WriteableSSKFileURI" in str(u)) u2 = uri.from_string(u.to_string()) self.failUnlessReallyEqual(u2.writekey, self.writekey) self.failUnlessReallyEqual(u2.fingerprint, self.fingerprint) self.failIf(u2.is_readonly()) self.failUnless(u2.is_mutable()) self.failUnless(IURI.providedBy(u2)) self.failUnless(IMutableFileURI.providedBy(u2)) self.failIf(IDirnodeURI.providedBy(u2)) u2i = uri.from_string(u.to_string(), deep_immutable=True) self.failUnless(isinstance(u2i, uri.UnknownURI), u2i) u2ro = uri.from_string(uri.ALLEGED_READONLY_PREFIX + u.to_string()) self.failUnless(isinstance(u2ro, uri.UnknownURI), u2ro) u2imm = uri.from_string(uri.ALLEGED_IMMUTABLE_PREFIX + u.to_string()) self.failUnless(isinstance(u2imm, uri.UnknownURI), u2imm) u3 = u2.get_readonly() readkey = hashutil.ssk_readkey_hash(self.writekey) self.failUnlessReallyEqual(u3.fingerprint, self.fingerprint) self.failUnlessReallyEqual(u3.readkey, readkey) self.failUnless(u3.is_readonly()) self.failUnless(u3.is_mutable()) self.failUnless(IURI.providedBy(u3)) self.failUnless(IMutableFileURI.providedBy(u3)) self.failIf(IDirnodeURI.providedBy(u3)) u3i = uri.from_string(u3.to_string(), deep_immutable=True) self.failUnless(isinstance(u3i, uri.UnknownURI), u3i) u3ro = uri.from_string(uri.ALLEGED_READONLY_PREFIX + u3.to_string()) self.failUnlessReallyEqual(u3.to_string(), u3ro.to_string()) u3imm = uri.from_string(uri.ALLEGED_IMMUTABLE_PREFIX + u3.to_string()) self.failUnless(isinstance(u3imm, uri.UnknownURI), u3imm) u4 = uri.ReadonlySSKFileURI(readkey, self.fingerprint) self.failUnlessReallyEqual(u4.fingerprint, self.fingerprint) self.failUnlessReallyEqual(u4.readkey, readkey) self.failUnless(u4.is_readonly()) self.failUnless(u4.is_mutable()) self.failUnless(IURI.providedBy(u4)) self.failUnless(IMutableFileURI.providedBy(u4)) self.failIf(IDirnodeURI.providedBy(u4)) u4i = uri.from_string(u4.to_string(), deep_immutable=True) self.failUnless(isinstance(u4i, uri.UnknownURI), u4i) u4ro = uri.from_string(uri.ALLEGED_READONLY_PREFIX + u4.to_string()) self.failUnlessReallyEqual(u4.to_string(), u4ro.to_string()) u4imm = uri.from_string(uri.ALLEGED_IMMUTABLE_PREFIX + u4.to_string()) self.failUnless(isinstance(u4imm, uri.UnknownURI), u4imm) u4a = uri.from_string(u4.to_string()) self.failUnlessReallyEqual(u4a, u4) self.failUnless("ReadonlySSKFileURI" in str(u4a)) self.failUnlessIdentical(u4a.get_readonly(), u4a) u5 = u4.get_verify_cap() self.failUnless(IVerifierURI.providedBy(u5)) self.failUnlessReallyEqual(u5.get_storage_index(), u.get_storage_index()) u7 = u.get_verify_cap() self.failUnless(IVerifierURI.providedBy(u7)) self.failUnlessReallyEqual(u7.get_storage_index(), u.get_storage_index()) def test_writeable_mdmf_cap(self): u1 = uri.WriteableMDMFFileURI(self.writekey, self.fingerprint) cap = u1.to_string() u = uri.WriteableMDMFFileURI.init_from_string(cap) self.failUnless(IMutableFileURI.providedBy(u)) self.failUnlessReallyEqual(u.fingerprint, self.fingerprint) self.failUnlessReallyEqual(u.writekey, self.writekey) self.failUnless(u.is_mutable()) self.failIf(u.is_readonly()) self.failUnlessEqual(cap, u.to_string()) # Now get a readonly cap from the writeable cap, and test that it # degrades gracefully. ru = u.get_readonly() self.failUnlessReallyEqual(self.readkey, ru.readkey) self.failUnlessReallyEqual(self.fingerprint, ru.fingerprint) self.failUnless(ru.is_mutable()) self.failUnless(ru.is_readonly()) # Now get a verifier cap. vu = ru.get_verify_cap() self.failUnlessReallyEqual(self.storage_index, vu.storage_index) self.failUnlessReallyEqual(self.fingerprint, vu.fingerprint) self.failUnless(IVerifierURI.providedBy(vu)) def test_readonly_mdmf_cap(self): u1 = uri.ReadonlyMDMFFileURI(self.readkey, self.fingerprint) cap = u1.to_string() u2 = uri.ReadonlyMDMFFileURI.init_from_string(cap) self.failUnlessReallyEqual(u2.fingerprint, self.fingerprint) self.failUnlessReallyEqual(u2.readkey, self.readkey) self.failUnless(u2.is_readonly()) self.failUnless(u2.is_mutable()) vu = u2.get_verify_cap() self.failUnlessEqual(vu.storage_index, self.storage_index) self.failUnlessEqual(vu.fingerprint, self.fingerprint) def test_create_writeable_mdmf_cap_from_readcap(self): # we shouldn't be able to create a writeable MDMF cap given only a # readcap. u1 = uri.ReadonlyMDMFFileURI(self.readkey, self.fingerprint) cap = u1.to_string() self.failUnlessRaises(uri.BadURIError, uri.WriteableMDMFFileURI.init_from_string, cap) def test_create_writeable_mdmf_cap_from_verifycap(self): u1 = uri.MDMFVerifierURI(self.storage_index, self.fingerprint) cap = u1.to_string() self.failUnlessRaises(uri.BadURIError, uri.WriteableMDMFFileURI.init_from_string, cap) def test_create_readonly_mdmf_cap_from_verifycap(self): u1 = uri.MDMFVerifierURI(self.storage_index, self.fingerprint) cap = u1.to_string() self.failUnlessRaises(uri.BadURIError, uri.ReadonlyMDMFFileURI.init_from_string, cap) def test_mdmf_verifier_cap(self): u1 = uri.MDMFVerifierURI(self.storage_index, self.fingerprint) self.failUnless(u1.is_readonly()) self.failIf(u1.is_mutable()) self.failUnlessReallyEqual(self.storage_index, u1.storage_index) self.failUnlessReallyEqual(self.fingerprint, u1.fingerprint) cap = u1.to_string() u2 = uri.MDMFVerifierURI.init_from_string(cap) self.failUnless(u2.is_readonly()) self.failIf(u2.is_mutable()) self.failUnlessReallyEqual(self.storage_index, u2.storage_index) self.failUnlessReallyEqual(self.fingerprint, u2.fingerprint) u3 = u2.get_readonly() self.failUnlessReallyEqual(u3, u2) u4 = u2.get_verify_cap() self.failUnlessReallyEqual(u4, u2) def test_mdmf_cap_ignore_extensions(self): # MDMF caps can be arbitrarily extended after the fingerprint and # key/storage index fields. tahoe-1.9 is supposed to ignore any # extensions, and not add any itself. u1 = uri.WriteableMDMFFileURI(self.writekey, self.fingerprint) cap = u1.to_string() cap2 = cap+b":I COME FROM THE FUTURE" u2 = uri.WriteableMDMFFileURI.init_from_string(cap2) self.failUnlessReallyEqual(self.writekey, u2.writekey) self.failUnlessReallyEqual(self.fingerprint, u2.fingerprint) self.failIf(u2.is_readonly()) self.failUnless(u2.is_mutable()) cap3 = cap+b":" + os.urandom(40) u3 = uri.WriteableMDMFFileURI.init_from_string(cap3) self.failUnlessReallyEqual(self.writekey, u3.writekey) self.failUnlessReallyEqual(self.fingerprint, u3.fingerprint) self.failIf(u3.is_readonly()) self.failUnless(u3.is_mutable()) cap4 = u1.get_readonly().to_string()+b":ooh scary future stuff" u4 = uri.from_string_mutable_filenode(cap4) self.failUnlessReallyEqual(self.readkey, u4.readkey) self.failUnlessReallyEqual(self.fingerprint, u4.fingerprint) self.failUnless(u4.is_readonly()) self.failUnless(u4.is_mutable()) cap5 = u1.get_verify_cap().to_string()+b":spoilers!" u5 = uri.from_string(cap5) self.failUnlessReallyEqual(self.storage_index, u5.storage_index) self.failUnlessReallyEqual(self.fingerprint, u5.fingerprint) self.failUnless(u5.is_readonly()) self.failIf(u5.is_mutable()) def test_mdmf_from_string(self): # Make sure that the from_string utility function works with # MDMF caps. u1 = uri.WriteableMDMFFileURI(self.writekey, self.fingerprint) cap = u1.to_string() self.failUnless(uri.is_uri(cap)) u2 = uri.from_string(cap) self.failUnlessReallyEqual(u1, u2) u3 = uri.from_string_mutable_filenode(cap) self.failUnlessEqual(u3, u1) u1 = uri.ReadonlyMDMFFileURI(self.readkey, self.fingerprint) cap = u1.to_string() self.failUnless(uri.is_uri(cap)) u2 = uri.from_string(cap) self.failUnlessReallyEqual(u1, u2) u3 = uri.from_string_mutable_filenode(cap) self.failUnlessEqual(u3, u1) u1 = uri.MDMFVerifierURI(self.storage_index, self.fingerprint) cap = u1.to_string() self.failUnless(uri.is_uri(cap)) u2 = uri.from_string(cap) self.failUnlessReallyEqual(u1, u2) u3 = uri.from_string_verifier(cap) self.failUnlessEqual(u3, u1) class Dirnode(testutil.ReallyEqualMixin, unittest.TestCase): def test_pack(self): writekey = b"\x01" * 16 fingerprint = b"\x02" * 32 n = uri.WriteableSSKFileURI(writekey, fingerprint) u1 = uri.DirectoryURI(n) self.failIf(u1.is_readonly()) self.failUnless(u1.is_mutable()) self.failUnless(IURI.providedBy(u1)) self.failIf(IFileURI.providedBy(u1)) self.failUnless(IDirnodeURI.providedBy(u1)) self.failUnless("DirectoryURI" in str(u1)) u1_filenode = u1.get_filenode_cap() self.failUnless(u1_filenode.is_mutable()) self.failIf(u1_filenode.is_readonly()) u2 = uri.from_string(u1.to_string()) self.failUnlessReallyEqual(u1.to_string(), u2.to_string()) self.failIf(u2.is_readonly()) self.failUnless(u2.is_mutable()) self.failUnless(IURI.providedBy(u2)) self.failIf(IFileURI.providedBy(u2)) self.failUnless(IDirnodeURI.providedBy(u2)) u2i = uri.from_string(u1.to_string(), deep_immutable=True) self.failUnless(isinstance(u2i, uri.UnknownURI)) u3 = u2.get_readonly() self.failUnless(u3.is_readonly()) self.failUnless(u3.is_mutable()) self.failUnless(IURI.providedBy(u3)) self.failIf(IFileURI.providedBy(u3)) self.failUnless(IDirnodeURI.providedBy(u3)) u3i = uri.from_string(u2.to_string(), deep_immutable=True) self.failUnless(isinstance(u3i, uri.UnknownURI)) u3n = u3._filenode_uri self.failUnless(u3n.is_readonly()) self.failUnless(u3n.is_mutable()) u3_filenode = u3.get_filenode_cap() self.failUnless(u3_filenode.is_mutable()) self.failUnless(u3_filenode.is_readonly()) u3a = uri.from_string(u3.to_string()) self.failUnlessIdentical(u3a, u3a.get_readonly()) u4 = uri.ReadonlyDirectoryURI(u2._filenode_uri.get_readonly()) self.failUnlessReallyEqual(u4.to_string(), u3.to_string()) self.failUnless(u4.is_readonly()) self.failUnless(u4.is_mutable()) self.failUnless(IURI.providedBy(u4)) self.failIf(IFileURI.providedBy(u4)) self.failUnless(IDirnodeURI.providedBy(u4)) u4_verifier = u4.get_verify_cap() u4_verifier_filenode = u4_verifier.get_filenode_cap() self.failUnless(isinstance(u4_verifier_filenode, uri.SSKVerifierURI)) verifiers = [u1.get_verify_cap(), u2.get_verify_cap(), u3.get_verify_cap(), u4.get_verify_cap(), uri.DirectoryURIVerifier(n.get_verify_cap()), ] for v in verifiers: self.failUnless(IVerifierURI.providedBy(v)) self.failUnlessReallyEqual(v._filenode_uri, u1.get_verify_cap()._filenode_uri) def test_immutable(self): readkey = b"\x01" * 16 uri_extension_hash = hashutil.uri_extension_hash(b"stuff") needed_shares = 3 total_shares = 10 size = 1234 fnuri = uri.CHKFileURI(key=readkey, uri_extension_hash=uri_extension_hash, needed_shares=needed_shares, total_shares=total_shares, size=size) fncap = fnuri.to_string() self.failUnlessReallyEqual(fncap, b"URI:CHK:aeaqcaibaeaqcaibaeaqcaibae:nf3nimquen7aeqm36ekgxomalstenpkvsdmf6fplj7swdatbv5oa:3:10:1234") u1 = uri.ImmutableDirectoryURI(fnuri) self.failUnless(u1.is_readonly()) self.failIf(u1.is_mutable()) self.failUnless(IURI.providedBy(u1)) self.failIf(IFileURI.providedBy(u1)) self.failUnless(IDirnodeURI.providedBy(u1)) self.failUnless("DirectoryURI" in str(u1)) u1_filenode = u1.get_filenode_cap() self.failIf(u1_filenode.is_mutable()) self.failUnless(u1_filenode.is_readonly()) self.failUnlessReallyEqual(u1_filenode.to_string(), fncap) self.failUnless(str(u1)) u2 = uri.from_string(u1.to_string()) self.failUnlessReallyEqual(u1.to_string(), u2.to_string()) self.failUnless(u2.is_readonly()) self.failIf(u2.is_mutable()) self.failUnless(IURI.providedBy(u2)) self.failIf(IFileURI.providedBy(u2)) self.failUnless(IDirnodeURI.providedBy(u2)) u2i = uri.from_string(u1.to_string(), deep_immutable=True) self.failUnlessReallyEqual(u1.to_string(), u2i.to_string()) u3 = u2.get_readonly() self.failUnlessReallyEqual(u3.to_string(), u2.to_string()) self.failUnless(str(u3)) u3i = uri.from_string(u2.to_string(), deep_immutable=True) self.failUnlessReallyEqual(u2.to_string(), u3i.to_string()) u2_verifier = u2.get_verify_cap() self.failUnless(isinstance(u2_verifier, uri.ImmutableDirectoryURIVerifier), u2_verifier) self.failUnless(IVerifierURI.providedBy(u2_verifier)) u2vs = u2_verifier.to_string() # URI:DIR2-CHK-Verifier:$key:$ueb:$k:$n:$size self.failUnless(u2vs.startswith(b"URI:DIR2-CHK-Verifier:"), u2vs) u2_verifier_fileuri = u2_verifier.get_filenode_cap() self.failUnless(IVerifierURI.providedBy(u2_verifier_fileuri)) u2vfs = u2_verifier_fileuri.to_string() # URI:CHK-Verifier:$key:$ueb:$k:$n:$size self.failUnlessReallyEqual(u2vfs, fnuri.get_verify_cap().to_string()) self.failUnlessReallyEqual(u2vs[len(b"URI:DIR2-"):], u2vfs[len(b"URI:"):]) self.failUnless(str(u2_verifier)) def test_literal(self): u0 = uri.LiteralFileURI(b"data") u1 = uri.LiteralDirectoryURI(u0) self.failUnless(str(u1)) self.failUnlessReallyEqual(u1.to_string(), b"URI:DIR2-LIT:mrqxiyi") self.failUnless(u1.is_readonly()) self.failIf(u1.is_mutable()) self.failUnless(IURI.providedBy(u1)) self.failIf(IFileURI.providedBy(u1)) self.failUnless(IDirnodeURI.providedBy(u1)) self.failUnlessReallyEqual(u1.get_verify_cap(), None) self.failUnlessReallyEqual(u1.get_storage_index(), None) self.failUnlessReallyEqual(u1.abbrev_si(), b"") def test_mdmf(self): writekey = b"\x01" * 16 fingerprint = b"\x02" * 32 uri1 = uri.WriteableMDMFFileURI(writekey, fingerprint) d1 = uri.MDMFDirectoryURI(uri1) self.failIf(d1.is_readonly()) self.failUnless(d1.is_mutable()) self.failUnless(IURI.providedBy(d1)) self.failUnless(IDirnodeURI.providedBy(d1)) d1_uri = d1.to_string() d2 = uri.from_string(d1_uri) self.failUnlessIsInstance(d2, uri.MDMFDirectoryURI) self.failIf(d2.is_readonly()) self.failUnless(d2.is_mutable()) self.failUnless(IURI.providedBy(d2)) self.failUnless(IDirnodeURI.providedBy(d2)) # It doesn't make sense to ask for a deep immutable URI for a # mutable directory, and we should get back a result to that # effect. d3 = uri.from_string(d2.to_string(), deep_immutable=True) self.failUnlessIsInstance(d3, uri.UnknownURI) def test_mdmf_attenuation(self): writekey = b"\x01" * 16 fingerprint = b"\x02" * 32 uri1 = uri.WriteableMDMFFileURI(writekey, fingerprint) d1 = uri.MDMFDirectoryURI(uri1) self.failUnless(d1.is_mutable()) self.failIf(d1.is_readonly()) self.failUnless(IURI.providedBy(d1)) self.failUnless(IDirnodeURI.providedBy(d1)) d1_uri = d1.to_string() d1_uri_from_fn = uri.MDMFDirectoryURI(d1.get_filenode_cap()).to_string() self.failUnlessEqual(d1_uri_from_fn, d1_uri) uri2 = uri.from_string(d1_uri) self.failUnlessIsInstance(uri2, uri.MDMFDirectoryURI) self.failUnless(IURI.providedBy(uri2)) self.failUnless(IDirnodeURI.providedBy(uri2)) self.failUnless(uri2.is_mutable()) self.failIf(uri2.is_readonly()) ro = uri2.get_readonly() self.failUnlessIsInstance(ro, uri.ReadonlyMDMFDirectoryURI) self.failUnless(ro.is_mutable()) self.failUnless(ro.is_readonly()) self.failUnless(IURI.providedBy(ro)) self.failUnless(IDirnodeURI.providedBy(ro)) ro_uri = ro.to_string() n = uri.from_string(ro_uri, deep_immutable=True) self.failUnlessIsInstance(n, uri.UnknownURI) fn_cap = ro.get_filenode_cap() fn_ro_cap = fn_cap.get_readonly() d3 = uri.ReadonlyMDMFDirectoryURI(fn_ro_cap) self.failUnlessEqual(ro.to_string(), d3.to_string()) self.failUnless(ro.is_mutable()) self.failUnless(ro.is_readonly()) def test_mdmf_verifier(self): # I'm not sure what I want to write here yet. writekey = b"\x01" * 16 fingerprint = b"\x02" * 32 uri1 = uri.WriteableMDMFFileURI(writekey, fingerprint) d1 = uri.MDMFDirectoryURI(uri1) v1 = d1.get_verify_cap() self.failUnlessIsInstance(v1, uri.MDMFDirectoryURIVerifier) self.failIf(v1.is_mutable()) d2 = uri.from_string(d1.to_string()) v2 = d2.get_verify_cap() self.failUnlessIsInstance(v2, uri.MDMFDirectoryURIVerifier) self.failIf(v2.is_mutable()) self.failUnlessEqual(v2.to_string(), v1.to_string()) # Now attenuate and make sure that works correctly. r3 = d2.get_readonly() v3 = r3.get_verify_cap() self.failUnlessIsInstance(v3, uri.MDMFDirectoryURIVerifier) self.failIf(v3.is_mutable()) self.failUnlessEqual(v3.to_string(), v1.to_string()) r4 = uri.from_string(r3.to_string()) v4 = r4.get_verify_cap() self.failUnlessIsInstance(v4, uri.MDMFDirectoryURIVerifier) self.failIf(v4.is_mutable()) self.failUnlessEqual(v4.to_string(), v3.to_string()) tahoe_lafs-1.20.0/src/allmydata/test/test_util.py0000644000000000000000000006065213615410400016734 0ustar00""" Ported to Python3. """ import os, time, sys import yaml import json from threading import current_thread from twisted.trial import unittest from foolscap.api import Violation, RemoteException from allmydata.util import idlib, mathutil from allmydata.util import fileutil from allmydata.util import jsonbytes from allmydata.util import pollmixin from allmydata.util import yamlutil from allmydata.util import rrefutil from allmydata.util.fileutil import EncryptedTemporaryFile from allmydata.util.cputhreadpool import defer_to_thread, disable_thread_pool_for_test from allmydata.test.common_util import ReallyEqualMixin from .no_network import fireNow, LocalWrapper long = int class IDLib(unittest.TestCase): def test_nodeid_b2a(self): result = idlib.nodeid_b2a(b"\x00"*20) self.assertEqual(result, "a"*32) self.assertIsInstance(result, str) class MyList(list): pass class Math(unittest.TestCase): def test_round_sigfigs(self): f = mathutil.round_sigfigs self.failUnlessEqual(f(22.0/3, 4), 7.3330000000000002) class FileUtil(ReallyEqualMixin, unittest.TestCase): def mkdir(self, basedir, path, mode=0o777): fn = os.path.join(basedir, path) fileutil.make_dirs(fn, mode) def touch(self, basedir, path, mode=None, data="touch\n"): fn = os.path.join(basedir, path) f = open(fn, "w") f.write(data) f.close() if mode is not None: os.chmod(fn, mode) def test_rm_dir(self): basedir = "util/FileUtil/test_rm_dir" fileutil.make_dirs(basedir) # create it again to test idempotency fileutil.make_dirs(basedir) d = os.path.join(basedir, "doomed") self.mkdir(d, "a/b") self.touch(d, "a/b/1.txt") self.touch(d, "a/b/2.txt", 0o444) self.touch(d, "a/b/3.txt", 0) self.mkdir(d, "a/c") self.touch(d, "a/c/1.txt") self.touch(d, "a/c/2.txt", 0o444) self.touch(d, "a/c/3.txt", 0) os.chmod(os.path.join(d, "a/c"), 0o444) self.mkdir(d, "a/d") self.touch(d, "a/d/1.txt") self.touch(d, "a/d/2.txt", 0o444) self.touch(d, "a/d/3.txt", 0) os.chmod(os.path.join(d, "a/d"), 0) fileutil.rm_dir(d) self.failIf(os.path.exists(d)) # remove it again to test idempotency fileutil.rm_dir(d) def test_remove_if_possible(self): basedir = "util/FileUtil/test_remove_if_possible" fileutil.make_dirs(basedir) self.touch(basedir, "here") fn = os.path.join(basedir, "here") fileutil.remove_if_possible(fn) self.failIf(os.path.exists(fn)) fileutil.remove_if_possible(fn) # should be idempotent fileutil.rm_dir(basedir) fileutil.remove_if_possible(fn) # should survive errors def test_write_atomically(self): basedir = "util/FileUtil/test_write_atomically" fileutil.make_dirs(basedir) fn = os.path.join(basedir, "here") fileutil.write_atomically(fn, b"one", "b") self.failUnlessEqual(fileutil.read(fn), b"one") fileutil.write_atomically(fn, u"two", mode="") # non-binary self.failUnlessEqual(fileutil.read(fn), b"two") def test_rename(self): basedir = "util/FileUtil/test_rename" fileutil.make_dirs(basedir) self.touch(basedir, "here") fn = os.path.join(basedir, "here") fn2 = os.path.join(basedir, "there") fileutil.rename(fn, fn2) self.failIf(os.path.exists(fn)) self.failUnless(os.path.exists(fn2)) def test_rename_no_overwrite(self): workdir = fileutil.abspath_expanduser_unicode(u"test_rename_no_overwrite") fileutil.make_dirs(workdir) source_path = os.path.join(workdir, "source") dest_path = os.path.join(workdir, "dest") # when neither file exists self.failUnlessRaises(OSError, fileutil.rename_no_overwrite, source_path, dest_path) # when only dest exists fileutil.write(dest_path, b"dest") self.failUnlessRaises(OSError, fileutil.rename_no_overwrite, source_path, dest_path) self.failUnlessEqual(fileutil.read(dest_path), b"dest") # when both exist fileutil.write(source_path, b"source") self.failUnlessRaises(OSError, fileutil.rename_no_overwrite, source_path, dest_path) self.failUnlessEqual(fileutil.read(source_path), b"source") self.failUnlessEqual(fileutil.read(dest_path), b"dest") # when only source exists os.remove(dest_path) fileutil.rename_no_overwrite(source_path, dest_path) self.failUnlessEqual(fileutil.read(dest_path), b"source") self.failIf(os.path.exists(source_path)) def test_replace_file(self): workdir = fileutil.abspath_expanduser_unicode(u"test_replace_file") fileutil.make_dirs(workdir) replaced_path = os.path.join(workdir, "replaced") replacement_path = os.path.join(workdir, "replacement") # when none of the files exist self.failUnlessRaises(fileutil.ConflictError, fileutil.replace_file, replaced_path, replacement_path) # when only replaced exists fileutil.write(replaced_path, b"foo") self.failUnlessRaises(fileutil.ConflictError, fileutil.replace_file, replaced_path, replacement_path) self.failUnlessEqual(fileutil.read(replaced_path), b"foo") # when both replaced and replacement exist fileutil.write(replacement_path, b"bar") fileutil.replace_file(replaced_path, replacement_path) self.failUnlessEqual(fileutil.read(replaced_path), b"bar") self.failIf(os.path.exists(replacement_path)) # when only replacement exists os.remove(replaced_path) fileutil.write(replacement_path, b"bar") fileutil.replace_file(replaced_path, replacement_path) self.failUnlessEqual(fileutil.read(replaced_path), b"bar") self.failIf(os.path.exists(replacement_path)) def test_du(self): basedir = "util/FileUtil/test_du" fileutil.make_dirs(basedir) d = os.path.join(basedir, "space-consuming") self.mkdir(d, "a/b") self.touch(d, "a/b/1.txt", data="a"*10) self.touch(d, "a/b/2.txt", data="b"*11) self.mkdir(d, "a/c") self.touch(d, "a/c/1.txt", data="c"*12) self.touch(d, "a/c/2.txt", data="d"*13) used = fileutil.du(basedir) self.failUnlessEqual(10+11+12+13, used) def test_abspath_expanduser_unicode(self): self.failUnlessRaises(AssertionError, fileutil.abspath_expanduser_unicode, b"bytestring") saved_cwd = os.path.normpath(os.getcwd()) abspath_cwd = fileutil.abspath_expanduser_unicode(u".") abspath_cwd_notlong = fileutil.abspath_expanduser_unicode(u".", long_path=False) self.failUnless(isinstance(saved_cwd, str), saved_cwd) self.failUnless(isinstance(abspath_cwd, str), abspath_cwd) if sys.platform == "win32": self.failUnlessReallyEqual(abspath_cwd, fileutil.to_windows_long_path(saved_cwd)) else: self.failUnlessReallyEqual(abspath_cwd, saved_cwd) self.failUnlessReallyEqual(abspath_cwd_notlong, saved_cwd) self.failUnlessReallyEqual(fileutil.to_windows_long_path(u"\\\\?\\foo"), u"\\\\?\\foo") self.failUnlessReallyEqual(fileutil.to_windows_long_path(u"\\\\.\\foo"), u"\\\\.\\foo") self.failUnlessReallyEqual(fileutil.to_windows_long_path(u"\\\\server\\foo"), u"\\\\?\\UNC\\server\\foo") self.failUnlessReallyEqual(fileutil.to_windows_long_path(u"C:\\foo"), u"\\\\?\\C:\\foo") self.failUnlessReallyEqual(fileutil.to_windows_long_path(u"C:\\foo/bar"), u"\\\\?\\C:\\foo\\bar") # adapted from foo = fileutil.abspath_expanduser_unicode(u"foo") self.failUnless(foo.endswith(u"%sfoo" % (os.path.sep,)), foo) foobar = fileutil.abspath_expanduser_unicode(u"bar", base=foo) self.failUnless(foobar.endswith(u"%sfoo%sbar" % (os.path.sep, os.path.sep)), foobar) if sys.platform == "win32": # This is checking that a drive letter is added for a path without one. baz = fileutil.abspath_expanduser_unicode(u"\\baz") self.failUnless(baz.startswith(u"\\\\?\\"), baz) self.failUnlessReallyEqual(baz[5 :], u":\\baz") bar = fileutil.abspath_expanduser_unicode(u"\\bar", base=baz) self.failUnless(bar.startswith(u"\\\\?\\"), bar) self.failUnlessReallyEqual(bar[5 :], u":\\bar") # not u":\\baz\\bar", because \bar is absolute on the current drive. self.failUnlessReallyEqual(baz[4], bar[4]) # same drive baz_notlong = fileutil.abspath_expanduser_unicode(u"\\baz", long_path=False) self.failIf(baz_notlong.startswith(u"\\\\?\\"), baz_notlong) self.failUnlessReallyEqual(baz_notlong[1 :], u":\\baz") bar_notlong = fileutil.abspath_expanduser_unicode(u"\\bar", base=baz_notlong, long_path=False) self.failIf(bar_notlong.startswith(u"\\\\?\\"), bar_notlong) self.failUnlessReallyEqual(bar_notlong[1 :], u":\\bar") # not u":\\baz\\bar", because \bar is absolute on the current drive. self.failUnlessReallyEqual(baz_notlong[0], bar_notlong[0]) # same drive self.failIfIn(u"~", fileutil.abspath_expanduser_unicode(u"~")) self.failIfIn(u"~", fileutil.abspath_expanduser_unicode(u"~", long_path=False)) cwds = ['cwd'] try: cwds.append(u'\xe7w\xf0'.encode(sys.getfilesystemencoding() or 'ascii')) except UnicodeEncodeError: pass # the cwd can't be encoded -- test with ascii cwd only for cwd in cwds: try: os.mkdir(cwd) os.chdir(cwd) for upath in (u'', u'fuu', u'f\xf9\xf9', u'/fuu', u'U:\\', u'~'): uabspath = fileutil.abspath_expanduser_unicode(upath) self.failUnless(isinstance(uabspath, str), uabspath) uabspath_notlong = fileutil.abspath_expanduser_unicode(upath, long_path=False) self.failUnless(isinstance(uabspath_notlong, str), uabspath_notlong) finally: os.chdir(saved_cwd) def test_make_dirs_with_absolute_mode(self): if sys.platform == 'win32': raise unittest.SkipTest("Permissions don't work the same on windows.") workdir = fileutil.abspath_expanduser_unicode(u"test_make_dirs_with_absolute_mode") fileutil.make_dirs(workdir) abspath = fileutil.abspath_expanduser_unicode(u"a/b/c/d", base=workdir) fileutil.make_dirs_with_absolute_mode(workdir, abspath, 0o766) new_mode = os.stat(os.path.join(workdir, "a", "b", "c", "d")).st_mode & 0o777 self.failUnlessEqual(new_mode, 0o766) new_mode = os.stat(os.path.join(workdir, "a", "b", "c")).st_mode & 0o777 self.failUnlessEqual(new_mode, 0o766) new_mode = os.stat(os.path.join(workdir, "a", "b")).st_mode & 0o777 self.failUnlessEqual(new_mode, 0o766) new_mode = os.stat(os.path.join(workdir, "a")).st_mode & 0o777 self.failUnlessEqual(new_mode, 0o766) new_mode = os.stat(workdir).st_mode & 0o777 self.failIfEqual(new_mode, 0o766) def test_create_long_path(self): """ Even for paths with total length greater than 260 bytes, ``fileutil.abspath_expanduser_unicode`` produces a path on which other path-related APIs can operate. https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx documents certain Windows-specific path length limitations this test is specifically intended to demonstrate can be overcome. """ workdir = u"test_create_long_path" fileutil.make_dirs(workdir) base_path = fileutil.abspath_expanduser_unicode(workdir) base_length = len(base_path) # Construct a path /just/ long enough to exercise the important case. # It would be nice if we could just use a seemingly globally valid # long file name (the `x...` portion) here - for example, a name 255 # bytes long- and a previous version of this test did just that. # However, aufs imposes a 242 byte length limit on file names. Most # other POSIX filesystems do allow names up to 255 bytes. It's not # clear there's anything we can *do* about lower limits, though, and # POSIX.1-2017 (and earlier) only requires that the maximum be at # least 14 (!!!) bytes. long_path = os.path.join(base_path, u'x' * (261 - base_length)) def _cleanup(): fileutil.remove(long_path) self.addCleanup(_cleanup) fileutil.write(long_path, b"test") self.failUnless(os.path.exists(long_path)) self.failUnlessEqual(fileutil.read(long_path), b"test") _cleanup() self.failIf(os.path.exists(long_path)) def _test_windows_expanduser(self, userprofile=None, homedrive=None, homepath=None): def call_windows_getenv(name): if name == u"USERPROFILE": return userprofile if name == u"HOMEDRIVE": return homedrive if name == u"HOMEPATH": return homepath self.fail("unexpected argument to call_windows_getenv") self.patch(fileutil, 'windows_getenv', call_windows_getenv) self.failUnlessReallyEqual(fileutil.windows_expanduser(u"~"), os.path.join(u"C:", u"\\Documents and Settings\\\u0100")) self.failUnlessReallyEqual(fileutil.windows_expanduser(u"~\\foo"), os.path.join(u"C:", u"\\Documents and Settings\\\u0100", u"foo")) self.failUnlessReallyEqual(fileutil.windows_expanduser(u"~/foo"), os.path.join(u"C:", u"\\Documents and Settings\\\u0100", u"foo")) self.failUnlessReallyEqual(fileutil.windows_expanduser(u"a"), u"a") self.failUnlessReallyEqual(fileutil.windows_expanduser(u"a~"), u"a~") self.failUnlessReallyEqual(fileutil.windows_expanduser(u"a\\~\\foo"), u"a\\~\\foo") def test_windows_expanduser_xp(self): return self._test_windows_expanduser(homedrive=u"C:", homepath=u"\\Documents and Settings\\\u0100") def test_windows_expanduser_win7(self): return self._test_windows_expanduser(userprofile=os.path.join(u"C:", u"\\Documents and Settings\\\u0100")) def test_disk_stats(self): avail = fileutil.get_available_space('.', 2**14) if avail == 0: raise unittest.SkipTest("This test will spuriously fail there is no disk space left.") disk = fileutil.get_disk_stats('.', 2**13) self.failUnless(disk['total'] > 0, disk['total']) # we tolerate used==0 for a Travis-CI bug, see #2290 self.failUnless(disk['used'] >= 0, disk['used']) self.failUnless(disk['free_for_root'] > 0, disk['free_for_root']) self.failUnless(disk['free_for_nonroot'] > 0, disk['free_for_nonroot']) self.failUnless(disk['avail'] > 0, disk['avail']) def test_disk_stats_avail_nonnegative(self): # This test will spuriously fail if you have more than 2^128 # bytes of available space on your filesystem. disk = fileutil.get_disk_stats('.', 2**128) self.failUnlessEqual(disk['avail'], 0) def test_get_pathinfo(self): basedir = "util/FileUtil/test_get_pathinfo" fileutil.make_dirs(basedir) # create a directory self.mkdir(basedir, "a") dirinfo = fileutil.get_pathinfo(basedir) self.failUnlessTrue(dirinfo.isdir) self.failUnlessTrue(dirinfo.exists) self.failUnlessFalse(dirinfo.isfile) self.failUnlessFalse(dirinfo.islink) # create a file f = os.path.join(basedir, "1.txt") fileutil.write(f, b"a"*10) fileinfo = fileutil.get_pathinfo(f) self.failUnlessTrue(fileinfo.isfile) self.failUnlessTrue(fileinfo.exists) self.failUnlessFalse(fileinfo.isdir) self.failUnlessFalse(fileinfo.islink) self.failUnlessEqual(fileinfo.size, 10) # path at which nothing exists dnename = os.path.join(basedir, "doesnotexist") now_ns = fileutil.seconds_to_ns(time.time()) dneinfo = fileutil.get_pathinfo(dnename, now_ns=now_ns) self.failUnlessFalse(dneinfo.exists) self.failUnlessFalse(dneinfo.isfile) self.failUnlessFalse(dneinfo.isdir) self.failUnlessFalse(dneinfo.islink) self.failUnlessEqual(dneinfo.size, None) self.failUnlessEqual(dneinfo.mtime_ns, now_ns) self.failUnlessEqual(dneinfo.ctime_ns, now_ns) def test_get_pathinfo_symlink(self): if not hasattr(os, 'symlink'): raise unittest.SkipTest("can't create symlinks on this platform") basedir = "util/FileUtil/test_get_pathinfo" fileutil.make_dirs(basedir) f = os.path.join(basedir, "1.txt") fileutil.write(f, b"a"*10) # create a symlink pointing to 1.txt slname = os.path.join(basedir, "linkto1.txt") os.symlink(f, slname) symlinkinfo = fileutil.get_pathinfo(slname) self.failUnlessTrue(symlinkinfo.islink) self.failUnlessTrue(symlinkinfo.exists) self.failUnlessFalse(symlinkinfo.isfile) self.failUnlessFalse(symlinkinfo.isdir) def test_encrypted_tempfile(self): f = EncryptedTemporaryFile() f.write(b"foobar") f.close() def test_write(self): """fileutil.write() can write both unicode and bytes.""" path = self.mktemp() fileutil.write(path, b"abc") with open(path, "rb") as f: self.assertEqual(f.read(), b"abc") fileutil.write(path, u"def \u1234") with open(path, "rb") as f: self.assertEqual(f.read(), u"def \u1234".encode("utf-8")) class PollMixinTests(unittest.TestCase): def setUp(self): self.pm = pollmixin.PollMixin() def test_PollMixin_True(self): d = self.pm.poll(check_f=lambda : True, pollinterval=0.1) return d def test_PollMixin_False_then_True(self): i = iter([False, True]) d = self.pm.poll(check_f=lambda: next(i), pollinterval=0.1) return d def test_timeout(self): d = self.pm.poll(check_f=lambda: False, pollinterval=0.01, timeout=1) def _suc(res): self.fail("poll should have failed, not returned %s" % (res,)) def _err(f): f.trap(pollmixin.TimeoutError) return None # success d.addCallbacks(_suc, _err) return d ctr = [0] class EqButNotIs(object): def __init__(self, x): self.x = x self.hash = ctr[0] ctr[0] += 1 def __repr__(self): return "<%s %s>" % (self.__class__.__name__, self.x,) def __hash__(self): return self.hash def __le__(self, other): return self.x <= other def __lt__(self, other): return self.x < other def __ge__(self, other): return self.x >= other def __gt__(self, other): return self.x > other def __ne__(self, other): return self.x != other def __eq__(self, other): return self.x == other class YAML(unittest.TestCase): def test_convert(self): """ Unicode and (ASCII) native strings get roundtripped to Unicode strings. """ data = yaml.safe_dump( ["str", "unicode", "\u1234nicode"] ) back = yamlutil.safe_load(data) self.assertIsInstance(back[0], str) self.assertIsInstance(back[1], str) self.assertIsInstance(back[2], str) class JSONBytes(unittest.TestCase): """Tests for jsonbytes module.""" def test_encode_bytes(self): """jsonbytes.dumps() encodes bytes. Bytes are presumed to be UTF-8 encoded. """ snowman = u"def\N{SNOWMAN}\uFF00" data = { b"hello": [1, b"cd", {b"abc": [123, snowman.encode("utf-8")]}], } expected = { u"hello": [1, u"cd", {u"abc": [123, snowman]}], } # Bytes get passed through as if they were UTF-8 Unicode: encoded = jsonbytes.dumps(data) self.assertEqual(json.loads(encoded), expected) self.assertEqual(jsonbytes.loads(encoded), expected) def test_encode_unicode(self): """jsonbytes.dumps() encodes Unicode string as usual.""" expected = { u"hello": [1, u"cd"], } encoded = jsonbytes.dumps(expected) self.assertEqual(json.loads(encoded), expected) def test_dumps_bytes(self): """jsonbytes.dumps_bytes always returns bytes.""" x = {u"def\N{SNOWMAN}\uFF00": 123} encoded = jsonbytes.dumps_bytes(x) self.assertIsInstance(encoded, bytes) self.assertEqual(json.loads(encoded), x) def test_any_bytes_unsupported_by_default(self): """By default non-UTF-8 bytes raise error.""" bytestring = b"abc\xff\x00" with self.assertRaises(UnicodeDecodeError): jsonbytes.dumps(bytestring) with self.assertRaises(UnicodeDecodeError): jsonbytes.dumps_bytes(bytestring) with self.assertRaises(UnicodeDecodeError): json.dumps(bytestring, cls=jsonbytes.UTF8BytesJSONEncoder) def test_any_bytes(self): """If any_bytes is True, non-UTF-8 bytes don't break encoding.""" bytestring = b"abc\xff\xff123" o = {bytestring: bytestring} expected = {"abc\\xff\\xff123": "abc\\xff\\xff123"} self.assertEqual( json.loads(jsonbytes.dumps(o, any_bytes=True)), expected, ) self.assertEqual( json.loads(json.dumps( o, cls=jsonbytes.AnyBytesJSONEncoder)), expected, ) self.assertEqual( json.loads(jsonbytes.dumps(o, any_bytes=True)), expected ) def test_dumps_bytes_unicode_separators(self): """Unicode separators don't prevent the result from being bytes.""" result = jsonbytes.dumps_bytes([1, 2], separators=(u',', u':')) self.assertIsInstance(result, bytes) self.assertEqual(result, b"[1,2]") class FakeGetVersion(object): """Emulate an object with a get_version.""" def __init__(self, result): self.result = result def remote_get_version(self): if isinstance(self.result, Exception): raise self.result return self.result class RrefUtilTests(unittest.TestCase): """Tests for rrefutil.""" def test_version_returned(self): """If get_version() succeeded, it is set on the rref.""" rref = LocalWrapper(FakeGetVersion(12345), fireNow) result = self.successResultOf( rrefutil.add_version_to_remote_reference(rref, "default") ) self.assertEqual(result.version, 12345) self.assertIdentical(result, rref) def test_exceptions(self): """If get_version() failed, default version is set on the rref.""" for exception in (Violation(), RemoteException(ValueError())): rref = LocalWrapper(FakeGetVersion(exception), fireNow) result = self.successResultOf( rrefutil.add_version_to_remote_reference(rref, "Default") ) self.assertEqual(result.version, "Default") self.assertIdentical(result, rref) class CPUThreadPool(unittest.TestCase): """Tests for cputhreadpool.""" async def test_runs_in_thread(self): """The given function runs in a thread.""" def f(*args, **kwargs): return current_thread(), args, kwargs this_thread = current_thread().ident thread, args, kwargs = await defer_to_thread(f, 1, 3, key=4, value=5) # The task ran in a different thread: self.assertNotEqual(thread.ident, this_thread) self.assertEqual(args, (1, 3)) self.assertEqual(kwargs, {"key": 4, "value": 5}) async def test_when_disabled_runs_in_same_thread(self): """ If the CPU thread pool is disabled, the given function runs in the current thread. """ disable_thread_pool_for_test(self) def f(*args, **kwargs): return current_thread().ident, args, kwargs this_thread = current_thread().ident thread, args, kwargs = await defer_to_thread(f, 1, 3, key=4, value=5) self.assertEqual(thread, this_thread) self.assertEqual(args, (1, 3)) self.assertEqual(kwargs, {"key": 4, "value": 5}) tahoe_lafs-1.20.0/src/allmydata/test/cli/__init__.py0000644000000000000000000000000013615410400017203 0ustar00tahoe_lafs-1.20.0/src/allmydata/test/cli/common.py0000644000000000000000000000445313615410400016754 0ustar00""" Ported to Python 3. """ from six import ensure_str, ensure_text from ...scripts import runner from ..common_util import ReallyEqualMixin, run_cli, run_cli_unicode def parse_options(basedir, command, args): args = [ensure_text(s) for s in args] o = runner.Options() o.parseOptions(["--node-directory", basedir, command] + args) while hasattr(o, "subOptions"): o = o.subOptions return o class CLITestMixin(ReallyEqualMixin): """ A mixin for use with ``GridTestMixin`` to execute CLI commands against nodes created by methods of that mixin. """ def do_cli_unicode(self, verb, argv, client_num=0, **kwargs): """ Run a Tahoe-LAFS CLI command. :param verb: See ``run_cli_unicode``. :param argv: See ``run_cli_unicode``. :param int client_num: The number of the ``GridTestMixin``-created node against which to execute the command. :param kwargs: Additional keyword arguments to pass to ``run_cli_unicode``. """ # client_num is used to execute client CLI commands on a specific # client. client_dir = self.get_clientdir(i=client_num) nodeargs = [ u"--node-directory", client_dir ] return run_cli_unicode(verb, argv, nodeargs=nodeargs, **kwargs) def do_cli(self, verb, *args, **kwargs): """ Like ``do_cli_unicode`` but work with ``bytes`` everywhere instead of ``unicode``. Where possible, prefer ``do_cli_unicode``. """ # client_num is used to execute client CLI commands on a specific # client. client_num = kwargs.pop("client_num", 0) # If we were really going to launch a child process then # `unicode_to_argv` would be the right thing to do here. However, # we're just going to call some Python functions directly and those # Python functions want native strings. So ignore the requirements # for passing arguments to another process and make sure this argument # is a native string. verb = ensure_str(verb) args = [ensure_str(arg) for arg in args] client_dir = ensure_str(self.get_clientdir(i=client_num)) nodeargs = [ "--node-directory", client_dir ] return run_cli(verb, *args, nodeargs=nodeargs, **kwargs) tahoe_lafs-1.20.0/src/allmydata/test/cli/test_admin.py0000644000000000000000000001601013615410400017603 0ustar00""" Ported to Python 3. """ # We're going to override stdin/stderr, so want to match their behavior on respective Python versions. from io import StringIO from twisted.python.usage import ( UsageError, ) from twisted.python.filepath import ( FilePath, ) from testtools.matchers import ( Contains, ) from allmydata.scripts.admin import ( migrate_crawler, add_grid_manager_cert, ) from allmydata.scripts.runner import ( Options, ) from allmydata.util import jsonbytes as json from ..common import ( SyncTestCase, ) class AdminMigrateCrawler(SyncTestCase): """ Tests related to 'tahoe admin migrate-crawler' """ def test_already(self): """ We've already migrated; don't do it again. """ root = FilePath(self.mktemp()) storage = root.child("storage") storage.makedirs() with storage.child("lease_checker.state.json").open("w") as f: f.write(b"{}\n") top = Options() top.parseOptions([ "admin", "migrate-crawler", "--basedir", storage.parent().path, ]) options = top.subOptions while hasattr(options, "subOptions"): options = options.subOptions options.stdout = StringIO() migrate_crawler(options) self.assertThat( options.stdout.getvalue(), Contains("Already converted:"), ) def test_usage(self): """ We've already migrated; don't do it again. """ root = FilePath(self.mktemp()) storage = root.child("storage") storage.makedirs() with storage.child("lease_checker.state.json").open("w") as f: f.write(b"{}\n") top = Options() top.parseOptions([ "admin", "migrate-crawler", "--basedir", storage.parent().path, ]) options = top.subOptions while hasattr(options, "subOptions"): options = options.subOptions self.assertThat( str(options), Contains("security issues with pickle") ) fake_cert = { "certificate": "{\"expires\":1601687822,\"public_key\":\"pub-v0-cbq6hcf3pxcz6ouoafrbktmkixkeuywpcpbcomzd3lqbkq4nmfga\",\"version\":1}", "signature": "fvjd3uvvupf2v6tnvkwjd473u3m3inyqkwiclhp7balmchkmn3px5pei3qyfjnhymq4cjcwvbpqmcwwnwswdtrfkpnlaxuih2zbdmda" } class AddCertificateOptions(SyncTestCase): """ Tests for 'tahoe admin add-grid-manager-cert' option validation """ def setUp(self): self.tahoe = Options() return super(AddCertificateOptions, self).setUp() def test_parse_no_data(self): """ When no data is passed to stdin an error is produced """ self.tahoe.stdin = StringIO("") self.tahoe.stderr = StringIO() # suppress message with self.assertRaises(UsageError) as ctx: self.tahoe.parseOptions( [ "admin", "add-grid-manager-cert", "--name", "random-name", "--filename", "-", ] ) self.assertIn( "Reading certificate from stdin failed", str(ctx.exception) ) def test_read_cert_file(self): """ A certificate can be read from a file """ tmp = self.mktemp() with open(tmp, "wb") as f: f.write(json.dumps_bytes(fake_cert)) # certificate should be loaded self.tahoe.parseOptions( [ "admin", "add-grid-manager-cert", "--name", "random-name", "--filename", tmp, ] ) opts = self.tahoe.subOptions.subOptions self.assertEqual( fake_cert, opts.certificate_data ) def test_bad_certificate(self): """ Unparseable data produces an error """ self.tahoe.stdin = StringIO("{}") self.tahoe.stderr = StringIO() # suppress message with self.assertRaises(UsageError) as ctx: self.tahoe.parseOptions( [ "admin", "add-grid-manager-cert", "--name", "random-name", "--filename", "-", ] ) self.assertIn( "Grid Manager certificate must contain", str(ctx.exception) ) class AddCertificateCommand(SyncTestCase): """ Tests for 'tahoe admin add-grid-manager-cert' operation """ def setUp(self): self.tahoe = Options() self.node_path = FilePath(self.mktemp()) self.node_path.makedirs() with self.node_path.child("tahoe.cfg").open("w") as f: f.write(b"# minimal test config\n") return super(AddCertificateCommand, self).setUp() def test_add_one(self): """ Adding a certificate succeeds """ self.tahoe.stdin = StringIO(json.dumps(fake_cert)) self.tahoe.stderr = StringIO() self.tahoe.parseOptions( [ "--node-directory", self.node_path.path, "admin", "add-grid-manager-cert", "--name", "zero", "--filename", "-", ] ) self.tahoe.subOptions.subOptions.stdin = self.tahoe.stdin self.tahoe.subOptions.subOptions.stderr = self.tahoe.stderr rc = add_grid_manager_cert(self.tahoe.subOptions.subOptions) self.assertEqual(rc, 0) self.assertEqual( {"zero.cert", "tahoe.cfg"}, set(self.node_path.listdir()) ) self.assertIn( "There are now 1 certificates", self.tahoe.stderr.getvalue() ) def test_add_two(self): """ An error message is produced when adding a certificate with a duplicate name. """ self.tahoe.stdin = StringIO(json.dumps(fake_cert)) self.tahoe.stderr = StringIO() self.tahoe.parseOptions( [ "--node-directory", self.node_path.path, "admin", "add-grid-manager-cert", "--name", "zero", "--filename", "-", ] ) self.tahoe.subOptions.subOptions.stdin = self.tahoe.stdin self.tahoe.subOptions.subOptions.stderr = self.tahoe.stderr rc = add_grid_manager_cert(self.tahoe.subOptions.subOptions) self.assertEqual(rc, 0) self.tahoe.stdin = StringIO(json.dumps(fake_cert)) self.tahoe.parseOptions( [ "--node-directory", self.node_path.path, "admin", "add-grid-manager-cert", "--name", "zero", "--filename", "-", ] ) self.tahoe.subOptions.subOptions.stdin = self.tahoe.stdin self.tahoe.subOptions.subOptions.stderr = self.tahoe.stderr rc = add_grid_manager_cert(self.tahoe.subOptions.subOptions) self.assertEqual(rc, 1) self.assertIn( "Already have certificate for 'zero'", self.tahoe.stderr.getvalue() ) tahoe_lafs-1.20.0/src/allmydata/test/cli/test_alias.py0000644000000000000000000000756713615410400017625 0ustar00""" Ported to Python 3. """ import json from twisted.trial import unittest from twisted.internet.defer import inlineCallbacks from allmydata.scripts.common import get_aliases from allmydata.test.no_network import GridTestMixin from .common import CLITestMixin from allmydata.util import encodingutil # see also test_create_alias class ListAlias(GridTestMixin, CLITestMixin, unittest.TestCase): @inlineCallbacks def _check_create_alias(self, alias, encoding): """ Verify that ``tahoe create-alias`` can be used to create an alias named ``alias`` when argv is encoded using ``encoding``. :param unicode alias: The alias to try to create. :param NoneType|str encoding: The name of an encoding to force the ``create-alias`` implementation to use. This simulates the effects of setting LANG and doing other locale-foolishness without actually having to mess with this process's global locale state. If this is ``None`` then the encoding used will be ascii but the stdio objects given to the code under test will not declare any encoding (this is like Python 2 when stdio is not a tty). :return Deferred: A Deferred that fires with success if the alias can be created and that creation is reported on stdout appropriately encoded or with failure if something goes wrong. """ self.basedir = self.mktemp() self.set_up_grid(oneshare=True) # We can pass an encoding into the test utilities to invoke the code # under test but we can't pass such a parameter directly to the code # under test. Instead, that code looks at io_encoding. So, # monkey-patch that value to our desired value here. This is the code # that most directly takes the place of messing with LANG or the # locale module. self.patch(encodingutil, "io_encoding", encoding or "ascii") rc, stdout, stderr = yield self.do_cli_unicode( u"create-alias", [alias], encoding=encoding, ) # Make sure the result of the create-alias command is as we want it to # be. self.assertEqual(u"Alias '{}' created\n".format(alias), stdout) self.assertEqual("", stderr) self.assertEqual(0, rc) # Make sure it had the intended side-effect, too - an alias created in # the node filesystem state. aliases = get_aliases(self.get_clientdir()) self.assertIn(alias, aliases) self.assertTrue(aliases[alias].startswith(b"URI:DIR2:")) # And inspect the state via the user interface list-aliases command # too. rc, stdout, stderr = yield self.do_cli_unicode( u"list-aliases", [u"--json"], encoding=encoding, ) self.assertEqual(0, rc) data = json.loads(stdout) self.assertIn(alias, data) data = data[alias] self.assertIn(u"readwrite", data) self.assertIn(u"readonly", data) def test_list_none(self): """ An alias composed of all ASCII-encodeable code points can be created when stdio aren't clearly marked with an encoding. """ return self._check_create_alias( u"tahoe", encoding=None, ) def test_list_ascii(self): """ An alias composed of all ASCII-encodeable code points can be created when the active encoding is ASCII. """ return self._check_create_alias( u"tahoe", encoding="ascii", ) def test_list_utf_8(self): """ An alias composed of all UTF-8-encodeable code points can be created when the active encoding is UTF-8. """ return self._check_create_alias( u"tahoe\N{SNOWMAN}", encoding="utf-8", ) tahoe_lafs-1.20.0/src/allmydata/test/cli/test_backup.py0000644000000000000000000006240113615410400017765 0ustar00""" Ported to Python 3. """ import os.path from io import StringIO from datetime import timedelta import re from twisted.trial import unittest from twisted.python.monkey import MonkeyPatcher from allmydata.util import fileutil from allmydata.util.fileutil import abspath_expanduser_unicode from allmydata.util.encodingutil import unicode_to_argv from allmydata.util.namespace import Namespace from allmydata.scripts import cli, backupdb from ..common_util import StallMixin from ..no_network import GridTestMixin from .common import ( CLITestMixin, parse_options, ) def _unsupported(what): return "{} are not supported by Python on this platform.".format(what) class Backup(GridTestMixin, CLITestMixin, StallMixin, unittest.TestCase): def writeto(self, path, data): full_path = os.path.join(self.basedir, "home", path) fileutil.make_dirs(os.path.dirname(full_path)) fileutil.write(full_path, data) def count_output(self, out): mo = re.search(r"(\d)+ files uploaded \((\d+) reused\), " "(\d)+ files skipped, " "(\d+) directories created \((\d+) reused\), " "(\d+) directories skipped", out) return [int(s) for s in mo.groups()] def count_output2(self, out): mo = re.search(r"(\d)+ files checked, (\d+) directories checked", out) return [int(s) for s in mo.groups()] def progress_output(self, out): def parse_timedelta(h, m, s): return timedelta(int(h), int(m), int(s)) mos = re.findall( r"Backing up (\d)+/(\d)+\.\.\. (\d+)h (\d+)m (\d+)s elapsed\.\.\.", out, ) return list( (int(progress), int(total), parse_timedelta(h, m, s)) for (progress, total, h, m, s) in mos ) def test_backup(self): self.basedir = "cli/Backup/backup" self.set_up_grid(oneshare=True) # is the backupdb available? If so, we test that a second backup does # not create new directories. hush = StringIO() bdb = backupdb.get_backupdb(os.path.join(self.basedir, "dbtest"), hush) self.failUnless(bdb) # create a small local directory with a couple of files source = os.path.join(self.basedir, "home") fileutil.make_dirs(os.path.join(source, "empty")) self.writeto("parent/subdir/foo.txt", "foo") self.writeto("parent/subdir/bar.txt", "bar\n" * 1000) self.writeto("parent/blah.txt", "blah") def do_backup(verbose=False): cmd = ["backup"] if verbose: cmd.append("--verbose") cmd.append(source) cmd.append("tahoe:backups") return self.do_cli(*cmd) d = self.do_cli("create-alias", "tahoe") d.addCallback(lambda res: do_backup(True)) def _check0(args): (rc, out, err) = args self.assertEqual(len(err), 0, err) self.failUnlessReallyEqual(rc, 0) ( files_uploaded, files_reused, files_skipped, directories_created, directories_reused, directories_skipped, ) = self.count_output(out) # foo.txt, bar.txt, blah.txt self.failUnlessReallyEqual(files_uploaded, 3) self.failUnlessReallyEqual(files_reused, 0) self.failUnlessReallyEqual(files_skipped, 0) # empty, home, home/parent, home/parent/subdir self.failUnlessReallyEqual(directories_created, 4) self.failUnlessReallyEqual(directories_reused, 0) self.failUnlessReallyEqual(directories_skipped, 0) # This is the first-upload scenario so there should have been # nothing to check. (files_checked, directories_checked) = self.count_output2(out) self.failUnlessReallyEqual(files_checked, 0) self.failUnlessReallyEqual(directories_checked, 0) progress = self.progress_output(out) for left, right in zip(progress[:-1], progress[1:]): # Progress as measured by file count should progress # monotonically. self.assertTrue( left[0] < right[0], "Failed: {} < {}".format(left[0], right[0]), ) # Total work to do should remain the same. self.assertEqual(left[1], right[1]) # Amount of elapsed time should only go up. Allow it to # remain the same to account for resolution of the report. self.assertTrue( left[2] <= right[2], "Failed: {} <= {}".format(left[2], right[2]), ) for element in progress: # Can't have more progress than the total. self.assertTrue( element[0] <= element[1], "Failed: {} <= {}".format(element[0], element[1]), ) d.addCallback(_check0) d.addCallback(lambda res: self.do_cli("ls", "--uri", "tahoe:backups")) def _check1(args): (rc, out, err) = args self.assertEqual(len(err), 0, err) self.failUnlessReallyEqual(rc, 0) lines = out.split("\n") children = dict([line.split() for line in lines if line]) latest_uri = children["Latest"] self.failUnless(latest_uri.startswith("URI:DIR2-CHK:"), latest_uri) childnames = list(children.keys()) self.failUnlessReallyEqual(sorted(childnames), ["Archives", "Latest"]) d.addCallback(_check1) d.addCallback(lambda res: self.do_cli("ls", "tahoe:backups/Latest")) def _check2(args): (rc, out, err) = args self.assertEqual(len(err), 0, err) self.failUnlessReallyEqual(rc, 0) self.failUnlessReallyEqual(sorted(out.split()), ["empty", "parent"]) d.addCallback(_check2) d.addCallback(lambda res: self.do_cli("ls", "tahoe:backups/Latest/empty")) def _check2a(args): (rc, out, err) = args self.assertEqual(len(err), 0, err) self.failUnlessReallyEqual(rc, 0) self.assertFalse(out.strip()) d.addCallback(_check2a) d.addCallback(lambda res: self.do_cli("get", "tahoe:backups/Latest/parent/subdir/foo.txt")) def _check3(args): (rc, out, err) = args self.assertFalse(err) self.failUnlessReallyEqual(rc, 0) self.assertEqual(out, "foo") d.addCallback(_check3) d.addCallback(lambda res: self.do_cli("ls", "tahoe:backups/Archives")) def _check4(args): (rc, out, err) = args self.assertFalse(err) self.failUnlessReallyEqual(rc, 0) self.old_archives = out.split() self.failUnlessReallyEqual(len(self.old_archives), 1) d.addCallback(_check4) d.addCallback(self.stall, 1.1) d.addCallback(lambda res: do_backup()) def _check4a(args): # second backup should reuse everything, if the backupdb is # available (rc, out, err) = args self.assertFalse(err) self.failUnlessReallyEqual(rc, 0) fu, fr, fs, dc, dr, ds = self.count_output(out) # foo.txt, bar.txt, blah.txt self.failUnlessReallyEqual(fu, 0) self.failUnlessReallyEqual(fr, 3) self.failUnlessReallyEqual(fs, 0) # empty, home, home/parent, home/parent/subdir self.failUnlessReallyEqual(dc, 0) self.failUnlessReallyEqual(dr, 4) self.failUnlessReallyEqual(ds, 0) d.addCallback(_check4a) # sneak into the backupdb, crank back the "last checked" # timestamp to force a check on all files def _reset_last_checked(res): dbfile = self.get_client_config().get_private_path("backupdb.sqlite") self.failUnless(os.path.exists(dbfile), dbfile) bdb = backupdb.get_backupdb(dbfile) bdb.cursor.execute("UPDATE last_upload SET last_checked=0") bdb.cursor.execute("UPDATE directories SET last_checked=0") bdb.connection.commit() d.addCallback(_reset_last_checked) d.addCallback(self.stall, 1.1) d.addCallback(lambda res: do_backup(verbose=True)) def _check4b(args): # we should check all files, and re-use all of them. None of # the directories should have been changed, so we should # re-use all of them too. (rc, out, err) = args self.assertFalse(err) self.failUnlessReallyEqual(rc, 0) fu, fr, fs, dc, dr, ds = self.count_output(out) fchecked, dchecked = self.count_output2(out) self.failUnlessReallyEqual(fchecked, 3) self.failUnlessReallyEqual(fu, 0) self.failUnlessReallyEqual(fr, 3) self.failUnlessReallyEqual(fs, 0) self.failUnlessReallyEqual(dchecked, 4) self.failUnlessReallyEqual(dc, 0) self.failUnlessReallyEqual(dr, 4) self.failUnlessReallyEqual(ds, 0) d.addCallback(_check4b) d.addCallback(lambda res: self.do_cli("ls", "tahoe:backups/Archives")) def _check5(args): (rc, out, err) = args self.assertFalse(err) self.failUnlessReallyEqual(rc, 0) self.new_archives = out.split() self.failUnlessReallyEqual(len(self.new_archives), 3, out) # the original backup should still be the oldest (i.e. sorts # alphabetically towards the beginning) self.failUnlessReallyEqual(sorted(self.new_archives)[0], self.old_archives[0]) d.addCallback(_check5) d.addCallback(self.stall, 1.1) def _modify(res): self.writeto("parent/subdir/foo.txt", "FOOF!") # and turn a file into a directory os.unlink(os.path.join(source, "parent/blah.txt")) os.mkdir(os.path.join(source, "parent/blah.txt")) self.writeto("parent/blah.txt/surprise file", "surprise") self.writeto("parent/blah.txt/surprisedir/subfile", "surprise") # turn a directory into a file os.rmdir(os.path.join(source, "empty")) self.writeto("empty", "imagine nothing being here") return do_backup() d.addCallback(_modify) def _check5a(args): # second backup should reuse bar.txt (if backupdb is available), # and upload the rest. None of the directories can be reused. (rc, out, err) = args self.assertFalse(err) self.failUnlessReallyEqual(rc, 0) fu, fr, fs, dc, dr, ds = self.count_output(out) # new foo.txt, surprise file, subfile, empty self.failUnlessReallyEqual(fu, 4) # old bar.txt self.failUnlessReallyEqual(fr, 1) self.failUnlessReallyEqual(fs, 0) # home, parent, subdir, blah.txt, surprisedir self.failUnlessReallyEqual(dc, 5) self.failUnlessReallyEqual(dr, 0) self.failUnlessReallyEqual(ds, 0) d.addCallback(_check5a) d.addCallback(lambda res: self.do_cli("ls", "tahoe:backups/Archives")) def _check6(args): (rc, out, err) = args self.assertFalse(err) self.failUnlessReallyEqual(rc, 0) self.new_archives = out.split() self.failUnlessReallyEqual(len(self.new_archives), 4) self.failUnlessReallyEqual(sorted(self.new_archives)[0], self.old_archives[0]) d.addCallback(_check6) d.addCallback(lambda res: self.do_cli("get", "tahoe:backups/Latest/parent/subdir/foo.txt")) def _check7(args): (rc, out, err) = args self.assertFalse(err) self.failUnlessReallyEqual(rc, 0) self.assertEqual(out, "FOOF!") # the old snapshot should not be modified return self.do_cli("get", "tahoe:backups/Archives/%s/parent/subdir/foo.txt" % self.old_archives[0]) d.addCallback(_check7) def _check8(args): (rc, out, err) = args self.assertFalse(err) self.failUnlessReallyEqual(rc, 0) self.assertEqual(out, "foo") d.addCallback(_check8) return d def _check_filtering(self, filtered, all, included, excluded): filtered = set(filtered) all = set(all) included = set(included) excluded = set(excluded) self.failUnlessReallyEqual(filtered, included) self.failUnlessReallyEqual(all.difference(filtered), excluded) def test_exclude_options(self): root_listdir = (u'lib.a', u'_darcs', u'subdir', u'nice_doc.lyx') subdir_listdir = (u'another_doc.lyx', u'run_snake_run.py', u'CVS', u'.svn', u'_darcs') basedir = "cli/Backup/exclude_options" fileutil.make_dirs(basedir) nodeurl_path = os.path.join(basedir, 'node.url') fileutil.write(nodeurl_path, 'http://example.net:2357/') def parse(args): return parse_options(basedir, "backup", args) # test simple exclude backup_options = parse(['--exclude', '*lyx', 'from', 'to']) filtered = list(backup_options.filter_listdir(root_listdir)) self._check_filtering(filtered, root_listdir, (u'lib.a', u'_darcs', u'subdir'), (u'nice_doc.lyx',)) # multiple exclude backup_options = parse(['--exclude', '*lyx', '--exclude', 'lib.?', 'from', 'to']) filtered = list(backup_options.filter_listdir(root_listdir)) self._check_filtering(filtered, root_listdir, (u'_darcs', u'subdir'), (u'nice_doc.lyx', u'lib.a')) # vcs metadata exclusion backup_options = parse(['--exclude-vcs', 'from', 'to']) filtered = list(backup_options.filter_listdir(subdir_listdir)) self._check_filtering(filtered, subdir_listdir, (u'another_doc.lyx', u'run_snake_run.py',), (u'CVS', u'.svn', u'_darcs')) # read exclude patterns from file exclusion_string = "_darcs\n*py\n.svn" excl_filepath = os.path.join(basedir, 'exclusion') fileutil.write(excl_filepath, exclusion_string) backup_options = parse(['--exclude-from-utf-8', excl_filepath, 'from', 'to']) filtered = list(backup_options.filter_listdir(subdir_listdir)) self._check_filtering(filtered, subdir_listdir, (u'another_doc.lyx', u'CVS'), (u'.svn', u'_darcs', u'run_snake_run.py')) # test BackupConfigurationError self.failUnlessRaises(cli.BackupConfigurationError, parse, ['--exclude-from-utf-8', excl_filepath + '.no', 'from', 'to']) # test that an iterator works too backup_options = parse(['--exclude', '*lyx', 'from', 'to']) filtered = list(backup_options.filter_listdir(iter(root_listdir))) self._check_filtering(filtered, root_listdir, (u'lib.a', u'_darcs', u'subdir'), (u'nice_doc.lyx',)) def test_exclude_options_unicode(self): nice_doc = u"nice_d\u00F8c.lyx" try: doc_pattern_arg_unicode = doc_pattern_arg = u"*d\u00F8c*" except UnicodeEncodeError: raise unittest.SkipTest("A non-ASCII command argument could not be encoded on this platform.") root_listdir = (u'lib.a', u'_darcs', u'subdir', nice_doc) basedir = "cli/Backup/exclude_options_unicode" fileutil.make_dirs(basedir) nodeurl_path = os.path.join(basedir, 'node.url') fileutil.write(nodeurl_path, 'http://example.net:2357/') def parse(args): return parse_options(basedir, "backup", args) # test simple exclude backup_options = parse(['--exclude', doc_pattern_arg, 'from', 'to']) filtered = list(backup_options.filter_listdir(root_listdir)) self._check_filtering(filtered, root_listdir, (u'lib.a', u'_darcs', u'subdir'), (nice_doc,)) # multiple exclude backup_options = parse(['--exclude', doc_pattern_arg, '--exclude', 'lib.?', 'from', 'to']) filtered = list(backup_options.filter_listdir(root_listdir)) self._check_filtering(filtered, root_listdir, (u'_darcs', u'subdir'), (nice_doc, u'lib.a')) # read exclude patterns from file exclusion_string = (doc_pattern_arg_unicode + "\nlib.?").encode("utf-8") excl_filepath = os.path.join(basedir, 'exclusion') fileutil.write(excl_filepath, exclusion_string) backup_options = parse(['--exclude-from-utf-8', excl_filepath, 'from', 'to']) filtered = list(backup_options.filter_listdir(root_listdir)) self._check_filtering(filtered, root_listdir, (u'_darcs', u'subdir'), (nice_doc, u'lib.a')) # test that an iterator works too backup_options = parse(['--exclude', doc_pattern_arg, 'from', 'to']) filtered = list(backup_options.filter_listdir(iter(root_listdir))) self._check_filtering(filtered, root_listdir, (u'lib.a', u'_darcs', u'subdir'), (nice_doc,)) def test_exclude_from_tilde_expansion(self): basedir = "cli/Backup/exclude_from_tilde_expansion" fileutil.make_dirs(basedir) nodeurl_path = os.path.join(basedir, 'node.url') fileutil.write(nodeurl_path, 'http://example.net:2357/') # ensure that tilde expansion is performed on exclude-from argument exclude_file = u'~/.tahoe/excludes.dummy' ns = Namespace() ns.called = False original_open = open def call_file(name, *args, **kwargs): if name.endswith("excludes.dummy"): ns.called = True self.failUnlessEqual(name, abspath_expanduser_unicode(exclude_file)) return StringIO() else: return original_open(name, *args, **kwargs) import builtins as module_to_patch patcher = MonkeyPatcher((module_to_patch, 'open', call_file)) patcher.runWithPatches(parse_options, basedir, "backup", ['--exclude-from-utf-8', unicode_to_argv(exclude_file), 'from', 'to']) self.failUnless(ns.called) def test_ignore_symlinks(self): """ A symlink encountered in the backed-up directory is skipped with a warning. """ if not hasattr(os, 'symlink'): raise unittest.SkipTest(_unsupported("Symlinks")) def make_symlink(path): self.writeto("foo.txt", "foo") os.symlink( os.path.join( os.path.dirname(path), "foo.txt", ), path, ) return self._ignore_something_test(u"Symlink", make_symlink) def test_ignore_fifo(self): """ A FIFO encountered in the backed-up directory is skipped with a warning. """ if getattr(os, "mkfifo", None) is None: raise unittest.SkipTest(_unsupported("FIFOs")) def make_fifo(path): # Create the thing to ignore os.makedirs(os.path.dirname(path)) os.mkfifo(path) # Also create anothing thing so the counts end up the same as # those in the symlink test and it's easier to re-use the testing # helper. self.writeto("count-dummy.txt", "foo") return self._ignore_something_test(u"special", make_fifo) def _ignore_something_test(self, kind_of_thing, make_something_to_ignore): """ Assert that when a a certain kind of file is encountered in the backed-up directory a warning that it is not supported is emitted and the backup proceeds to other files with no other error. :param unicode kind_of_thing: The name of the kind of file that will be ignored. This is expected to appear in the warning. :param make_something_to_ignore: A one-argument callable which creates the file that is expected to be ignored. It is called with the path at which the file must be created. :return Deferred: A ``Deferred`` that fires when the assertion has been made. """ self.basedir = os.path.dirname(self.mktemp()) self.set_up_grid(oneshare=True) source = os.path.join(self.basedir, "home") ignored_path = os.path.join(source, "foo2.txt") make_something_to_ignore(ignored_path) d = self.do_cli("create-alias", "tahoe") d.addCallback(lambda res: self.do_cli("backup", "--verbose", source, "tahoe:test")) def _check(args): (rc, out, err) = args self.failUnlessReallyEqual(rc, 2) self.assertIn( "WARNING: cannot backup {} ".format(kind_of_thing.lower()), err, ) self.assertIn(ignored_path, err) fu, fr, fs, dc, dr, ds = self.count_output(out) # foo.txt self.failUnlessReallyEqual(fu, 1) self.failUnlessReallyEqual(fr, 0) # foo2.txt self.failUnlessReallyEqual(fs, 1) # home self.failUnlessReallyEqual(dc, 1) self.failUnlessReallyEqual(dr, 0) self.failUnlessReallyEqual(ds, 0) d.addCallback(_check) return d def test_ignore_unreadable_file(self): self.basedir = os.path.dirname(self.mktemp()) self.set_up_grid(oneshare=True) source = os.path.join(self.basedir, "home") self.writeto("foo.txt", "foo") os.chmod(os.path.join(source, "foo.txt"), 0000) d = self.do_cli("create-alias", "tahoe") d.addCallback(lambda res: self.do_cli("backup", source, "tahoe:test")) def _check(args): (rc, out, err) = args self.failUnlessReallyEqual(rc, 2) self.failUnlessReallyEqual(err, "WARNING: permission denied on file %s\n" % os.path.join(source, "foo.txt")) fu, fr, fs, dc, dr, ds = self.count_output(out) self.failUnlessReallyEqual(fu, 0) self.failUnlessReallyEqual(fr, 0) # foo.txt self.failUnlessReallyEqual(fs, 1) # home self.failUnlessReallyEqual(dc, 1) self.failUnlessReallyEqual(dr, 0) self.failUnlessReallyEqual(ds, 0) d.addCallback(_check) # This is necessary for the temp files to be correctly removed def _cleanup(self): os.chmod(os.path.join(source, "foo.txt"), 0o644) d.addCallback(_cleanup) d.addErrback(_cleanup) return d def test_ignore_unreadable_directory(self): self.basedir = os.path.dirname(self.mktemp()) self.set_up_grid(oneshare=True) source = os.path.join(self.basedir, "home") os.mkdir(source) os.mkdir(os.path.join(source, "test")) os.chmod(os.path.join(source, "test"), 0000) d = self.do_cli("create-alias", "tahoe") d.addCallback(lambda res: self.do_cli("backup", source, "tahoe:test")) def _check(args): (rc, out, err) = args self.failUnlessReallyEqual(rc, 2) self.failUnlessReallyEqual(err, "WARNING: permission denied on directory %s\n" % os.path.join(source, "test")) fu, fr, fs, dc, dr, ds = self.count_output(out) self.failUnlessReallyEqual(fu, 0) self.failUnlessReallyEqual(fr, 0) self.failUnlessReallyEqual(fs, 0) # home, test self.failUnlessReallyEqual(dc, 2) self.failUnlessReallyEqual(dr, 0) # test self.failUnlessReallyEqual(ds, 1) d.addCallback(_check) # This is necessary for the temp files to be correctly removed def _cleanup(self): os.chmod(os.path.join(source, "test"), 0o655) d.addCallback(_cleanup) d.addErrback(_cleanup) return d def test_backup_without_alias(self): # 'tahoe backup' should output a sensible error message when invoked # without an alias instead of a stack trace. self.basedir = os.path.dirname(self.mktemp()) self.set_up_grid(oneshare=True) source = os.path.join(self.basedir, "file1") d = self.do_cli('backup', source, source) def _check(args): (rc, out, err) = args self.failUnlessReallyEqual(rc, 1) self.failUnlessIn("error:", err) self.assertEqual(len(out), 0) d.addCallback(_check) return d def test_backup_with_nonexistent_alias(self): # 'tahoe backup' should output a sensible error message when invoked # with a nonexistent alias. self.basedir = os.path.dirname(self.mktemp()) self.set_up_grid(oneshare=True) source = os.path.join(self.basedir, "file1") d = self.do_cli("backup", source, "nonexistent:" + source) def _check(args): (rc, out, err) = args self.failUnlessReallyEqual(rc, 1) self.failUnlessIn("error:", err) self.failUnlessIn("nonexistent", err) self.assertEqual(len(out), 0) d.addCallback(_check) return d tahoe_lafs-1.20.0/src/allmydata/test/cli/test_backupdb.py0000644000000000000000000002244613615410400020300 0ustar00""" Ported to Python 3. """ import sys import os.path, time from io import StringIO from twisted.trial import unittest from allmydata.util import fileutil from allmydata.util.encodingutil import listdir_unicode from allmydata.scripts import backupdb from ..common_util import skip_if_cannot_represent_filename class BackupDB(unittest.TestCase): def create(self, dbfile): stderr = StringIO() bdb = backupdb.get_backupdb(dbfile, stderr=stderr) self.failUnless(bdb, "unable to create backupdb from %r" % (dbfile,)) return bdb def test_basic(self): self.basedir = basedir = os.path.join("backupdb", "create") fileutil.make_dirs(basedir) dbfile = os.path.join(basedir, "dbfile") bdb = self.create(dbfile) self.failUnlessEqual(bdb.VERSION, 2) def test_upgrade_v1_v2(self): self.basedir = basedir = os.path.join("backupdb", "upgrade_v1_v2") fileutil.make_dirs(basedir) dbfile = os.path.join(basedir, "dbfile") stderr = StringIO() created = backupdb.get_backupdb(dbfile, stderr=stderr, create_version=(backupdb.SCHEMA_v1, 1), just_create=True) self.failUnless(created, "unable to create v1 backupdb") # now we should have a v1 database on disk bdb = self.create(dbfile) self.failUnlessEqual(bdb.VERSION, 2) def test_fail(self): self.basedir = basedir = os.path.join("backupdb", "fail") fileutil.make_dirs(basedir) # put a non-DB file in the way not_a_db = ("I do not look like a sqlite database\n" + "I'M NOT" * 1000) # OS-X sqlite-2.3.2 takes some convincing self.writeto("not-a-database", not_a_db) stderr_f = StringIO() bdb = backupdb.get_backupdb(os.path.join(basedir, "not-a-database"), stderr_f) self.failUnlessEqual(bdb, None) stderr = stderr_f.getvalue() self.failUnlessIn("backupdb file is unusable", stderr) # sqlite-3.19.3 says "file is encrypted or is not a database" # sqlite-3.20.0 says "file is not a database" self.failUnlessIn("is not a database", stderr) # put a directory in the way, to exercise a different error path where = os.path.join(basedir, "roadblock-dir") fileutil.make_dirs(where) stderr_f = StringIO() bdb = backupdb.get_backupdb(where, stderr_f) self.failUnlessEqual(bdb, None) stderr = stderr_f.getvalue() # the error-message is different under PyPy ... not sure why? if 'pypy' in sys.version.lower(): self.failUnlessIn("Could not open database", stderr) else: self.failUnlessIn("unable to open database file", stderr) def writeto(self, filename, data): fn = os.path.join(self.basedir, filename) parentdir = os.path.dirname(fn) fileutil.make_dirs(parentdir) fileutil.write(fn, data) return fn def test_check(self): self.basedir = basedir = os.path.join("backupdb", "check") fileutil.make_dirs(basedir) dbfile = os.path.join(basedir, "dbfile") bdb = self.create(dbfile) foo_fn = self.writeto("foo.txt", "foo.txt") blah_fn = self.writeto("bar/blah.txt", "blah.txt") r = bdb.check_file(foo_fn) self.failUnlessEqual(r.was_uploaded(), False) r.did_upload(b"foo-cap") r = bdb.check_file(blah_fn) self.failUnlessEqual(r.was_uploaded(), False) r.did_upload("blah-cap") r = bdb.check_file(foo_fn) self.failUnlessEqual(r.was_uploaded(), b"foo-cap") self.failUnlessEqual(type(r.was_uploaded()), bytes) self.failUnlessEqual(r.should_check(), False) time.sleep(1.0) # make sure the timestamp changes self.writeto("foo.txt", "NEW") r = bdb.check_file(foo_fn) self.failUnlessEqual(r.was_uploaded(), False) r.did_upload(b"new-cap") r = bdb.check_file(foo_fn) self.failUnlessEqual(r.was_uploaded(), b"new-cap") self.failUnlessEqual(r.should_check(), False) # if we spontaneously decide to upload it anyways, nothing should # break r.did_upload(b"new-cap") r = bdb.check_file(foo_fn, use_timestamps=False) self.failUnlessEqual(r.was_uploaded(), False) r.did_upload(b"new-cap") r = bdb.check_file(foo_fn) self.failUnlessEqual(r.was_uploaded(), b"new-cap") self.failUnlessEqual(r.should_check(), False) bdb.NO_CHECK_BEFORE = 0 bdb.ALWAYS_CHECK_AFTER = 0.1 r = bdb.check_file(blah_fn) self.failUnlessEqual(r.was_uploaded(), b"blah-cap") self.failUnlessEqual(r.should_check(), True) r.did_check_healthy("results") # we know they're ignored for now bdb.NO_CHECK_BEFORE = 200 bdb.ALWAYS_CHECK_AFTER = 400 r = bdb.check_file(blah_fn) self.failUnlessEqual(r.was_uploaded(), b"blah-cap") self.failUnlessEqual(r.should_check(), False) os.unlink(os.path.join(basedir, "foo.txt")) fileutil.make_dirs(os.path.join(basedir, "foo.txt")) # file becomes dir r = bdb.check_file(foo_fn) self.failUnlessEqual(r.was_uploaded(), False) def test_wrong_version(self): self.basedir = basedir = os.path.join("backupdb", "wrong_version") fileutil.make_dirs(basedir) where = os.path.join(basedir, "tooold.db") bdb = self.create(where) # reach into the DB and make it old bdb.cursor.execute("UPDATE version SET version=0") bdb.connection.commit() # now the next time we open the database, it should be an unusable # version stderr_f = StringIO() bdb = backupdb.get_backupdb(where, stderr_f) self.failUnlessEqual(bdb, None) stderr = stderr_f.getvalue() self.failUnlessEqual(stderr.strip(), "Unable to handle backupdb version 0") def test_directory(self): self.basedir = basedir = os.path.join("backupdb", "directory") fileutil.make_dirs(basedir) dbfile = os.path.join(basedir, "dbfile") bdb = self.create(dbfile) contents = {u"file1": b"URI:CHK:blah1", u"file2": b"URI:CHK:blah2", u"dir1": b"URI:DIR2-CHK:baz2"} r = bdb.check_directory(contents) self.failUnless(isinstance(r, backupdb.DirectoryResult)) self.failIf(r.was_created()) dircap = b"URI:DIR2-CHK:foo1" r.did_create(dircap) r = bdb.check_directory(contents) self.failUnless(r.was_created()) self.failUnlessEqual(r.was_created(), dircap) self.failUnlessEqual(r.should_check(), False) # if we spontaneously decide to upload it anyways, nothing should # break r.did_create(dircap) r = bdb.check_directory(contents) self.failUnless(r.was_created()) self.failUnlessEqual(r.was_created(), dircap) self.failUnlessEqual(type(r.was_created()), bytes) self.failUnlessEqual(r.should_check(), False) bdb.NO_CHECK_BEFORE = 0 bdb.ALWAYS_CHECK_AFTER = 0.1 time.sleep(1.0) r = bdb.check_directory(contents) self.failUnless(r.was_created()) self.failUnlessEqual(r.was_created(), dircap) self.failUnlessEqual(r.should_check(), True) r.did_check_healthy("results") bdb.NO_CHECK_BEFORE = 200 bdb.ALWAYS_CHECK_AFTER = 400 r = bdb.check_directory(contents) self.failUnless(r.was_created()) self.failUnlessEqual(r.was_created(), dircap) self.failUnlessEqual(r.should_check(), False) contents2 = {u"file1": b"URI:CHK:blah1", u"dir1": b"URI:DIR2-CHK:baz2"} r = bdb.check_directory(contents2) self.failIf(r.was_created()) contents3 = {u"file1": b"URI:CHK:blah1", u"file2": b"URI:CHK:blah3", u"dir1": b"URI:DIR2-CHK:baz2"} r = bdb.check_directory(contents3) self.failIf(r.was_created()) def test_unicode(self): skip_if_cannot_represent_filename(u"f\u00f6\u00f6.txt") skip_if_cannot_represent_filename(u"b\u00e5r.txt") self.basedir = basedir = os.path.join("backupdb", "unicode") fileutil.make_dirs(basedir) dbfile = os.path.join(basedir, "dbfile") bdb = self.create(dbfile) self.writeto(u"f\u00f6\u00f6.txt", "foo.txt") files = [fn for fn in listdir_unicode(str(basedir)) if fn.endswith(".txt")] self.failUnlessEqual(len(files), 1) foo_fn = os.path.join(basedir, files[0]) #print(foo_fn, type(foo_fn)) r = bdb.check_file(foo_fn) self.failUnlessEqual(r.was_uploaded(), False) r.did_upload(b"foo-cap") r = bdb.check_file(foo_fn) self.failUnlessEqual(r.was_uploaded(), b"foo-cap") self.failUnlessEqual(r.should_check(), False) bar_fn = self.writeto(u"b\u00e5r.txt", "bar.txt") #print(bar_fn, type(bar_fn)) r = bdb.check_file(bar_fn) self.failUnlessEqual(r.was_uploaded(), False) r.did_upload(b"bar-cap") r = bdb.check_file(bar_fn) self.failUnlessEqual(r.was_uploaded(), b"bar-cap") self.failUnlessEqual(r.should_check(), False) tahoe_lafs-1.20.0/src/allmydata/test/cli/test_check.py0000644000000000000000000004517613615410400017607 0ustar00from six import ensure_text import os.path import json from twisted.trial import unittest from io import StringIO from allmydata import uri from allmydata.util import base32 from allmydata.util.encodingutil import to_bytes, quote_output_u from allmydata.mutable.publish import MutableData from allmydata.immutable import upload from allmydata.scripts import debug from ..no_network import GridTestMixin from .common import CLITestMixin class Check(GridTestMixin, CLITestMixin, unittest.TestCase): def test_check(self): self.basedir = "cli/Check/check" self.set_up_grid() c0 = self.g.clients[0] DATA = b"data" * 100 DATA_uploadable = MutableData(DATA) d = c0.create_mutable_file(DATA_uploadable) def _stash_uri(n): self.uri = n.get_uri() d.addCallback(_stash_uri) d.addCallback(lambda ign: self.do_cli("check", self.uri)) def _check1(args): (rc, out, err) = args self.assertEqual(len(err), 0, err) self.failUnlessReallyEqual(rc, 0) lines = out.splitlines() self.failUnless("Summary: Healthy" in lines, out) self.failUnless(" good-shares: 10 (encoding is 3-of-10)" in lines, out) d.addCallback(_check1) d.addCallback(lambda ign: self.do_cli("check", "--raw", self.uri)) def _check2(args): (rc, out, err) = args self.assertEqual(len(err), 0, err) self.failUnlessReallyEqual(rc, 0) data = json.loads(out) self.failUnlessReallyEqual(to_bytes(data["summary"]), b"Healthy") self.failUnlessReallyEqual(data["results"]["healthy"], True) d.addCallback(_check2) d.addCallback(lambda ign: c0.upload(upload.Data(b"literal", convergence=b""))) def _stash_lit_uri(n): self.lit_uri = n.get_uri() d.addCallback(_stash_lit_uri) d.addCallback(lambda ign: self.do_cli("check", self.lit_uri)) def _check_lit(args): (rc, out, err) = args self.assertEqual(len(err), 0, err) self.failUnlessReallyEqual(rc, 0) lines = out.splitlines() self.failUnless("Summary: Healthy (LIT)" in lines, out) d.addCallback(_check_lit) d.addCallback(lambda ign: self.do_cli("check", "--raw", self.lit_uri)) def _check_lit_raw(args): (rc, out, err) = args self.assertEqual(len(err), 0, err) self.failUnlessReallyEqual(rc, 0) data = json.loads(out) self.failUnlessReallyEqual(data["results"]["healthy"], True) d.addCallback(_check_lit_raw) d.addCallback(lambda ign: c0.create_immutable_dirnode({}, convergence=b"")) def _stash_lit_dir_uri(n): self.lit_dir_uri = n.get_uri() d.addCallback(_stash_lit_dir_uri) d.addCallback(lambda ign: self.do_cli("check", self.lit_dir_uri)) d.addCallback(_check_lit) d.addCallback(lambda ign: self.do_cli("check", "--raw", self.lit_uri)) d.addCallback(_check_lit_raw) def _clobber_shares(ignored): # delete one, corrupt a second shares = self.find_uri_shares(self.uri) self.failUnlessReallyEqual(len(shares), 10) os.unlink(shares[0][2]) cso = debug.CorruptShareOptions() cso.stdout = StringIO() cso.parseOptions([shares[1][2]]) storage_index = uri.from_string(self.uri).get_storage_index() self._corrupt_share_line = " server %s, SI %s, shnum %d" % \ (str(base32.b2a(shares[1][1]), "ascii"), str(base32.b2a(storage_index), "ascii"), shares[1][0]) debug.corrupt_share(cso) d.addCallback(_clobber_shares) d.addCallback(lambda ign: self.do_cli("check", "--verify", self.uri)) def _check3(args): (rc, out, err) = args self.assertEqual(len(err), 0, err) self.failUnlessReallyEqual(rc, 0) lines = out.splitlines() summary = [l for l in lines if l.startswith("Summary")][0] self.failUnless("Summary: Unhealthy: 8 shares (enc 3-of-10)" in summary, summary) self.failUnless(" good-shares: 8 (encoding is 3-of-10)" in lines, out) self.failUnless(" corrupt shares:" in lines, out) self.failUnless(self._corrupt_share_line in lines, out) d.addCallback(_check3) d.addCallback(lambda ign: self.do_cli("check", "--verify", "--raw", self.uri)) def _check3_raw(args): (rc, out, err) = args self.assertEqual(len(err), 0, err) self.failUnlessReallyEqual(rc, 0) data = json.loads(out) self.failUnlessReallyEqual(data["results"]["healthy"], False) self.failUnlessIn("Unhealthy: 8 shares (enc 3-of-10)", data["summary"]) self.failUnlessReallyEqual(data["results"]["count-shares-good"], 8) self.failUnlessReallyEqual(data["results"]["count-corrupt-shares"], 1) self.failUnlessIn("list-corrupt-shares", data["results"]) d.addCallback(_check3_raw) d.addCallback(lambda ign: self.do_cli("check", "--verify", "--repair", self.uri)) def _check4(args): (rc, out, err) = args self.assertEqual(len(err), 0, err) self.failUnlessReallyEqual(rc, 0) lines = out.splitlines() self.failUnless("Summary: not healthy" in lines, out) self.failUnless(" good-shares: 8 (encoding is 3-of-10)" in lines, out) self.failUnless(" corrupt shares:" in lines, out) self.failUnless(self._corrupt_share_line in lines, out) self.failUnless(" repair successful" in lines, out) d.addCallback(_check4) d.addCallback(lambda ign: self.do_cli("check", "--verify", "--repair", self.uri)) def _check5(args): (rc, out, err) = args self.assertEqual(len(err), 0, err) self.failUnlessReallyEqual(rc, 0) lines = out.splitlines() self.failUnless("Summary: healthy" in lines, out) self.failUnless(" good-shares: 10 (encoding is 3-of-10)" in lines, out) self.failIf(" corrupt shares:" in lines, out) d.addCallback(_check5) return d def test_deep_check(self): self.basedir = "cli/Check/deep_check" self.set_up_grid() c0 = self.g.clients[0] self.uris = {} self.fileurls = {} DATA = b"data" * 100 quoted_good = quote_output_u("g\u00F6\u00F6d") d = c0.create_dirnode() def _stash_root_and_create_file(n): self.rootnode = n self.rooturi = n.get_uri() return n.add_file(u"g\u00F6\u00F6d", upload.Data(DATA, convergence=b"")) d.addCallback(_stash_root_and_create_file) def _stash_uri(fn, which): self.uris[which] = fn.get_uri() return fn d.addCallback(_stash_uri, u"g\u00F6\u00F6d") d.addCallback(lambda ign: self.rootnode.add_file(u"small", upload.Data(b"literal", convergence=b""))) d.addCallback(_stash_uri, "small") d.addCallback(lambda ign: c0.create_mutable_file(MutableData(DATA+b"1"))) d.addCallback(lambda fn: self.rootnode.set_node(u"mutable", fn)) d.addCallback(_stash_uri, "mutable") d.addCallback(lambda ign: self.do_cli("deep-check", self.rooturi)) def _check1(args): (rc, out, err) = args self.assertEqual(len(err), 0, err) self.failUnlessReallyEqual(rc, 0) lines = out.splitlines() self.failUnless("done: 4 objects checked, 4 healthy, 0 unhealthy" in lines, out) d.addCallback(_check1) # root # root/g\u00F6\u00F6d # root/small # root/mutable d.addCallback(lambda ign: self.do_cli("deep-check", "--verbose", self.rooturi)) def _check2(args): (rc, out, err) = args self.assertEqual(len(err), 0, err) self.failUnlessReallyEqual(rc, 0) out = ensure_text(out) lines = out.splitlines() self.failUnless("'': Healthy" in lines, out) self.failUnless("'small': Healthy (LIT)" in lines, out) self.failUnless((quoted_good + ": Healthy") in lines, out) self.failUnless("'mutable': Healthy" in lines, out) self.failUnless("done: 4 objects checked, 4 healthy, 0 unhealthy" in lines, out) d.addCallback(_check2) d.addCallback(lambda ign: self.do_cli("stats", self.rooturi)) def _check_stats(args): (rc, out, err) = args self.assertEqual(len(err), 0, err) self.failUnlessReallyEqual(rc, 0) lines = out.splitlines() self.failUnlessIn(" count-immutable-files: 1", lines) self.failUnlessIn(" count-mutable-files: 1", lines) self.failUnlessIn(" count-literal-files: 1", lines) self.failUnlessIn(" count-directories: 1", lines) self.failUnlessIn(" size-immutable-files: 400", lines) self.failUnlessIn("Size Histogram:", lines) self.failUnlessIn(" 4-10 : 1 (10 B, 10 B)", lines) self.failUnlessIn(" 317-1000 : 1 (1000 B, 1000 B)", lines) d.addCallback(_check_stats) def _clobber_shares(ignored): shares = self.find_uri_shares(self.uris[u"g\u00F6\u00F6d"]) self.failUnlessReallyEqual(len(shares), 10) os.unlink(shares[0][2]) shares = self.find_uri_shares(self.uris["mutable"]) cso = debug.CorruptShareOptions() cso.stdout = StringIO() cso.parseOptions([shares[1][2]]) storage_index = uri.from_string(self.uris["mutable"]).get_storage_index() self._corrupt_share_line = " corrupt: server %s, SI %s, shnum %d" % \ (str(base32.b2a(shares[1][1]), "ascii"), str(base32.b2a(storage_index), "ascii"), shares[1][0]) debug.corrupt_share(cso) d.addCallback(_clobber_shares) # root # root/g\u00F6\u00F6d [9 shares] # root/small # root/mutable [1 corrupt share] d.addCallback(lambda ign: self.do_cli("deep-check", "--verbose", self.rooturi)) def _check3(args): (rc, out, err) = args self.assertEqual(len(err), 0, err) self.failUnlessReallyEqual(rc, 0) out = ensure_text(out) lines = out.splitlines() self.failUnless("'': Healthy" in lines, out) self.failUnless("'small': Healthy (LIT)" in lines, out) self.failUnless("'mutable': Healthy" in lines, out) # needs verifier self.failUnless((quoted_good + ": Not Healthy: 9 shares (enc 3-of-10)") in lines, out) self.failIf(self._corrupt_share_line in lines, out) self.failUnless("done: 4 objects checked, 3 healthy, 1 unhealthy" in lines, out) d.addCallback(_check3) d.addCallback(lambda ign: self.do_cli("deep-check", "--verbose", "--verify", self.rooturi)) def _check4(args): (rc, out, err) = args self.assertEqual(len(err), 0, err) self.failUnlessReallyEqual(rc, 0) out = ensure_text(out) lines = out.splitlines() self.failUnless("'': Healthy" in lines, out) self.failUnless("'small': Healthy (LIT)" in lines, out) mutable = [l for l in lines if l.startswith("'mutable'")][0] self.failUnless(mutable.startswith("'mutable': Unhealthy: 9 shares (enc 3-of-10)"), mutable) self.failUnless(self._corrupt_share_line in lines, out) self.failUnless((quoted_good + ": Not Healthy: 9 shares (enc 3-of-10)") in lines, out) self.failUnless("done: 4 objects checked, 2 healthy, 2 unhealthy" in lines, out) d.addCallback(_check4) d.addCallback(lambda ign: self.do_cli("deep-check", "--raw", self.rooturi)) def _check5(args): (rc, out, err) = args self.assertEqual(len(err), 0, err) self.failUnlessReallyEqual(rc, 0) lines = out.splitlines() units = [json.loads(line) for line in lines] # root, small, g\u00F6\u00F6d, mutable, stats self.failUnlessReallyEqual(len(units), 4+1) d.addCallback(_check5) d.addCallback(lambda ign: self.do_cli("deep-check", "--verbose", "--verify", "--repair", self.rooturi)) def _check6(args): (rc, out, err) = args self.assertEqual(len(err), 0, err) self.failUnlessReallyEqual(rc, 0) out = ensure_text(out) lines = out.splitlines() self.failUnless("'': healthy" in lines, out) self.failUnless("'small': healthy" in lines, out) self.failUnless("'mutable': not healthy" in lines, out) self.failUnless(self._corrupt_share_line in lines, out) self.failUnless((quoted_good + ": not healthy") in lines, out) self.failUnless("done: 4 objects checked" in lines, out) self.failUnless(" pre-repair: 2 healthy, 2 unhealthy" in lines, out) self.failUnless(" 2 repairs attempted, 2 successful, 0 failed" in lines, out) self.failUnless(" post-repair: 4 healthy, 0 unhealthy" in lines,out) d.addCallback(_check6) # now add a subdir, and a file below that, then make the subdir # unrecoverable d.addCallback(lambda ign: self.rootnode.create_subdirectory(u"subdir")) d.addCallback(_stash_uri, "subdir") d.addCallback(lambda fn: fn.add_file(u"subfile", upload.Data(DATA+b"2", b""))) d.addCallback(lambda ign: self.delete_shares_numbered(self.uris["subdir"], list(range(10)))) # root # rootg\u00F6\u00F6d/ # root/small # root/mutable # root/subdir [unrecoverable: 0 shares] # root/subfile d.addCallback(lambda ign: self.do_cli("manifest", self.rooturi)) def _manifest_failed(args): (rc, out, err) = args self.failIfEqual(rc, 0) self.failUnlessIn("ERROR: UnrecoverableFileError", err) # the fatal directory should still show up, as the last line self.failUnlessIn(" subdir\n", ensure_text(out)) d.addCallback(_manifest_failed) d.addCallback(lambda ign: self.do_cli("deep-check", self.rooturi)) def _deep_check_failed(args): (rc, out, err) = args self.failIfEqual(rc, 0) self.failUnlessIn("ERROR: UnrecoverableFileError", err) # we want to make sure that the error indication is the last # thing that gets emitted self.failIf("done:" in out, out) d.addCallback(_deep_check_failed) # this test is disabled until the deep-repair response to an # unrepairable directory is fixed. The failure-to-repair should not # throw an exception, but the failure-to-traverse that follows # should throw UnrecoverableFileError. #d.addCallback(lambda ign: # self.do_cli("deep-check", "--repair", self.rooturi)) #def _deep_check_repair_failed((rc, out, err)): # self.failIfEqual(rc, 0) # print(err) # self.failUnlessIn("ERROR: UnrecoverableFileError", err) # self.failIf("done:" in out, out) #d.addCallback(_deep_check_repair_failed) return d def test_check_without_alias(self): # 'tahoe check' should output a sensible error message if it needs to # find the default alias and can't self.basedir = "cli/Check/check_without_alias" self.set_up_grid(oneshare=True) d = self.do_cli("check") def _check(args): (rc, out, err) = args self.failUnlessReallyEqual(rc, 1) self.failUnlessIn("error:", err) self.assertEqual(len(out), 0, out) d.addCallback(_check) d.addCallback(lambda ign: self.do_cli("deep-check")) d.addCallback(_check) return d def test_check_with_nonexistent_alias(self): # 'tahoe check' should output a sensible error message if it needs to # find an alias and can't. self.basedir = "cli/Check/check_with_nonexistent_alias" self.set_up_grid(oneshare=True) d = self.do_cli("check", "nonexistent:") def _check(args): (rc, out, err) = args self.failUnlessReallyEqual(rc, 1) self.failUnlessIn("error:", err) self.failUnlessIn("nonexistent", err) self.assertEqual(len(out), 0, out) d.addCallback(_check) return d def test_check_with_multiple_aliases(self): self.basedir = "cli/Check/check_with_multiple_aliases" self.set_up_grid(oneshare=True) self.uriList = [] c0 = self.g.clients[0] d = c0.create_dirnode() def _stash_uri(n): self.uriList.append(n.get_uri()) d.addCallback(_stash_uri) d.addCallback(lambda _: c0.create_dirnode()) d.addCallback(_stash_uri) d.addCallback(lambda ign: self.do_cli("check", self.uriList[0], self.uriList[1])) def _check(args): (rc, out, err) = args self.failUnlessReallyEqual(rc, 0) self.assertEqual(len(err), 0, err) #Ensure healthy appears for each uri self.failUnlessIn("Healthy", out[:len(out)//2]) self.failUnlessIn("Healthy", out[len(out)//2:]) d.addCallback(_check) d.addCallback(lambda ign: self.do_cli("check", self.uriList[0], "nonexistent:")) def _check2(args): (rc, out, err) = args self.failUnlessReallyEqual(rc, 1) self.failUnlessIn("Healthy", out) self.failUnlessIn("error:", err) self.failUnlessIn("nonexistent", err) d.addCallback(_check2) return d tahoe_lafs-1.20.0/src/allmydata/test/cli/test_cli.py0000644000000000000000000016767713615410400017314 0ustar00""" Ported to Python 3. """ from io import StringIO import re from six import ensure_text import os.path from urllib.parse import quote as url_quote from twisted.trial import unittest from twisted.internet.testing import ( MemoryReactor, ) from twisted.internet.test.modulehelpers import ( AlternateReactor, ) import allmydata from allmydata.crypto import ed25519 from allmydata.util import fileutil, hashutil, base32 from allmydata import uri from allmydata.immutable import upload from allmydata.dirnode import normalize from allmydata.scripts.common_http import socket_error import allmydata.scripts.common_http # Test that the scripts can be imported. from allmydata.scripts import create_node, debug, \ tahoe_add_alias, tahoe_backup, tahoe_check, tahoe_cp, tahoe_get, tahoe_ls, \ tahoe_manifest, tahoe_mkdir, tahoe_mv, tahoe_put, tahoe_unlink, tahoe_webopen, \ tahoe_run _hush_pyflakes = [create_node, debug, tahoe_add_alias, tahoe_backup, tahoe_check, tahoe_cp, tahoe_get, tahoe_ls, tahoe_manifest, tahoe_mkdir, tahoe_mv, tahoe_put, tahoe_unlink, tahoe_webopen, tahoe_run] from allmydata.scripts import common from allmydata.scripts.common import DEFAULT_ALIAS, get_aliases, get_alias, \ DefaultAliasMarker from allmydata.scripts import cli, debug, runner from allmydata.test.common_util import (ReallyEqualMixin, skip_if_cannot_represent_filename, run_cli) from allmydata.test.no_network import GridTestMixin from allmydata.test.cli.common import CLITestMixin, parse_options from twisted.python import usage from allmydata.util.encodingutil import listdir_unicode, get_io_encoding class CLI(CLITestMixin, unittest.TestCase): def _dump_cap(self, *args): args = [ensure_text(s) for s in args] config = debug.DumpCapOptions() config.stdout,config.stderr = StringIO(), StringIO() config.parseOptions(args) debug.dump_cap(config) self.failIf(config.stderr.getvalue()) output = config.stdout.getvalue() return output def test_dump_cap_chk(self): key = b"\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" uri_extension_hash = hashutil.uri_extension_hash(b"stuff") needed_shares = 25 total_shares = 100 size = 1234 u = uri.CHKFileURI(key=key, uri_extension_hash=uri_extension_hash, needed_shares=needed_shares, total_shares=total_shares, size=size) output = self._dump_cap(u.to_string()) self.failUnless("CHK File:" in output, output) self.failUnless("key: aaaqeayeaudaocajbifqydiob4" in output, output) self.failUnless("UEB hash: nf3nimquen7aeqm36ekgxomalstenpkvsdmf6fplj7swdatbv5oa" in output, output) self.failUnless("size: 1234" in output, output) self.failUnless("k/N: 25/100" in output, output) self.failUnless("storage index: hdis5iaveku6lnlaiccydyid7q" in output, output) output = self._dump_cap("--client-secret", "5s33nk3qpvnj2fw3z4mnm2y6fa", u.to_string()) self.failUnless("client renewal secret: znxmki5zdibb5qlt46xbdvk2t55j7hibejq3i5ijyurkr6m6jkhq" in output, output) output = self._dump_cap(str(u.get_verify_cap().to_string(), "ascii")) self.failIf("key: " in output, output) self.failUnless("UEB hash: nf3nimquen7aeqm36ekgxomalstenpkvsdmf6fplj7swdatbv5oa" in output, output) self.failUnless("size: 1234" in output, output) self.failUnless("k/N: 25/100" in output, output) self.failUnless("storage index: hdis5iaveku6lnlaiccydyid7q" in output, output) prefixed_u = "http://127.0.0.1/uri/%s" % url_quote(u.to_string()) output = self._dump_cap(prefixed_u) self.failUnless("CHK File:" in output, output) self.failUnless("key: aaaqeayeaudaocajbifqydiob4" in output, output) self.failUnless("UEB hash: nf3nimquen7aeqm36ekgxomalstenpkvsdmf6fplj7swdatbv5oa" in output, output) self.failUnless("size: 1234" in output, output) self.failUnless("k/N: 25/100" in output, output) self.failUnless("storage index: hdis5iaveku6lnlaiccydyid7q" in output, output) def test_dump_cap_lit(self): u = uri.LiteralFileURI(b"this is some data") output = self._dump_cap(u.to_string()) self.failUnless("Literal File URI:" in output, output) self.failUnless("data: 'this is some data'" in output, output) def test_dump_cap_sdmf(self): writekey = b"\x01" * 16 fingerprint = b"\xfe" * 32 u = uri.WriteableSSKFileURI(writekey, fingerprint) output = self._dump_cap(u.to_string()) self.failUnless("SDMF Writeable URI:" in output, output) self.failUnless("writekey: aeaqcaibaeaqcaibaeaqcaibae" in output, output) self.failUnless("readkey: nvgh5vj2ekzzkim5fgtb4gey5y" in output, output) self.failUnless("storage index: nt4fwemuw7flestsezvo2eveke" in output, output) self.failUnless("fingerprint: 737p57x6737p57x6737p57x6737p57x6737p57x6737p57x6737a" in output, output) output = self._dump_cap("--client-secret", "5s33nk3qpvnj2fw3z4mnm2y6fa", u.to_string()) self.failUnless("file renewal secret: arpszxzc2t6kb4okkg7sp765xgkni5z7caavj7lta73vmtymjlxq" in output, output) fileutil.make_dirs("cli/test_dump_cap/private") fileutil.write("cli/test_dump_cap/private/secret", "5s33nk3qpvnj2fw3z4mnm2y6fa\n") output = self._dump_cap("--client-dir", "cli/test_dump_cap", u.to_string()) self.failUnless("file renewal secret: arpszxzc2t6kb4okkg7sp765xgkni5z7caavj7lta73vmtymjlxq" in output, output) output = self._dump_cap("--client-dir", "cli/test_dump_cap_BOGUS", u.to_string()) self.failIf("file renewal secret:" in output, output) output = self._dump_cap("--nodeid", "tqc35esocrvejvg4mablt6aowg6tl43j", u.to_string()) self.failUnless("write_enabler: mgcavriox2wlb5eer26unwy5cw56elh3sjweffckkmivvsxtaknq" in output, output) self.failIf("file renewal secret:" in output, output) output = self._dump_cap("--nodeid", "tqc35esocrvejvg4mablt6aowg6tl43j", "--client-secret", "5s33nk3qpvnj2fw3z4mnm2y6fa", u.to_string()) self.failUnless("write_enabler: mgcavriox2wlb5eer26unwy5cw56elh3sjweffckkmivvsxtaknq" in output, output) self.failUnless("file renewal secret: arpszxzc2t6kb4okkg7sp765xgkni5z7caavj7lta73vmtymjlxq" in output, output) self.failUnless("lease renewal secret: 7pjtaumrb7znzkkbvekkmuwpqfjyfyamznfz4bwwvmh4nw33lorq" in output, output) u = u.get_readonly() output = self._dump_cap(u.to_string()) self.failUnless("SDMF Read-only URI:" in output, output) self.failUnless("readkey: nvgh5vj2ekzzkim5fgtb4gey5y" in output, output) self.failUnless("storage index: nt4fwemuw7flestsezvo2eveke" in output, output) self.failUnless("fingerprint: 737p57x6737p57x6737p57x6737p57x6737p57x6737p57x6737a" in output, output) u = u.get_verify_cap() output = self._dump_cap(u.to_string()) self.failUnless("SDMF Verifier URI:" in output, output) self.failUnless("storage index: nt4fwemuw7flestsezvo2eveke" in output, output) self.failUnless("fingerprint: 737p57x6737p57x6737p57x6737p57x6737p57x6737p57x6737a" in output, output) def test_dump_cap_mdmf(self): writekey = b"\x01" * 16 fingerprint = b"\xfe" * 32 u = uri.WriteableMDMFFileURI(writekey, fingerprint) output = self._dump_cap(u.to_string()) self.failUnless("MDMF Writeable URI:" in output, output) self.failUnless("writekey: aeaqcaibaeaqcaibaeaqcaibae" in output, output) self.failUnless("readkey: nvgh5vj2ekzzkim5fgtb4gey5y" in output, output) self.failUnless("storage index: nt4fwemuw7flestsezvo2eveke" in output, output) self.failUnless("fingerprint: 737p57x6737p57x6737p57x6737p57x6737p57x6737p57x6737a" in output, output) output = self._dump_cap("--client-secret", "5s33nk3qpvnj2fw3z4mnm2y6fa", u.to_string()) self.failUnless("file renewal secret: arpszxzc2t6kb4okkg7sp765xgkni5z7caavj7lta73vmtymjlxq" in output, output) fileutil.make_dirs("cli/test_dump_cap/private") fileutil.write("cli/test_dump_cap/private/secret", "5s33nk3qpvnj2fw3z4mnm2y6fa\n") output = self._dump_cap("--client-dir", "cli/test_dump_cap", u.to_string()) self.failUnless("file renewal secret: arpszxzc2t6kb4okkg7sp765xgkni5z7caavj7lta73vmtymjlxq" in output, output) output = self._dump_cap("--client-dir", "cli/test_dump_cap_BOGUS", u.to_string()) self.failIf("file renewal secret:" in output, output) output = self._dump_cap("--nodeid", "tqc35esocrvejvg4mablt6aowg6tl43j", u.to_string()) self.failUnless("write_enabler: mgcavriox2wlb5eer26unwy5cw56elh3sjweffckkmivvsxtaknq" in output, output) self.failIf("file renewal secret:" in output, output) output = self._dump_cap("--nodeid", "tqc35esocrvejvg4mablt6aowg6tl43j", "--client-secret", "5s33nk3qpvnj2fw3z4mnm2y6fa", u.to_string()) self.failUnless("write_enabler: mgcavriox2wlb5eer26unwy5cw56elh3sjweffckkmivvsxtaknq" in output, output) self.failUnless("file renewal secret: arpszxzc2t6kb4okkg7sp765xgkni5z7caavj7lta73vmtymjlxq" in output, output) self.failUnless("lease renewal secret: 7pjtaumrb7znzkkbvekkmuwpqfjyfyamznfz4bwwvmh4nw33lorq" in output, output) u = u.get_readonly() output = self._dump_cap(u.to_string()) self.failUnless("MDMF Read-only URI:" in output, output) self.failUnless("readkey: nvgh5vj2ekzzkim5fgtb4gey5y" in output, output) self.failUnless("storage index: nt4fwemuw7flestsezvo2eveke" in output, output) self.failUnless("fingerprint: 737p57x6737p57x6737p57x6737p57x6737p57x6737p57x6737a" in output, output) u = u.get_verify_cap() output = self._dump_cap(u.to_string()) self.failUnless("MDMF Verifier URI:" in output, output) self.failUnless("storage index: nt4fwemuw7flestsezvo2eveke" in output, output) self.failUnless("fingerprint: 737p57x6737p57x6737p57x6737p57x6737p57x6737p57x6737a" in output, output) def test_dump_cap_chk_directory(self): key = b"\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" uri_extension_hash = hashutil.uri_extension_hash(b"stuff") needed_shares = 25 total_shares = 100 size = 1234 u1 = uri.CHKFileURI(key=key, uri_extension_hash=uri_extension_hash, needed_shares=needed_shares, total_shares=total_shares, size=size) u = uri.ImmutableDirectoryURI(u1) output = self._dump_cap(u.to_string()) self.failUnless("CHK Directory URI:" in output, output) self.failUnless("key: aaaqeayeaudaocajbifqydiob4" in output, output) self.failUnless("UEB hash: nf3nimquen7aeqm36ekgxomalstenpkvsdmf6fplj7swdatbv5oa" in output, output) self.failUnless("size: 1234" in output, output) self.failUnless("k/N: 25/100" in output, output) self.failUnless("storage index: hdis5iaveku6lnlaiccydyid7q" in output, output) output = self._dump_cap("--client-secret", "5s33nk3qpvnj2fw3z4mnm2y6fa", u.to_string()) self.failUnless("file renewal secret: csrvkjgomkyyyil5yo4yk5np37p6oa2ve2hg6xmk2dy7kaxsu6xq" in output, output) u = u.get_verify_cap() output = self._dump_cap(u.to_string()) self.failUnless("CHK Directory Verifier URI:" in output, output) self.failIf("key: " in output, output) self.failUnless("UEB hash: nf3nimquen7aeqm36ekgxomalstenpkvsdmf6fplj7swdatbv5oa" in output, output) self.failUnless("size: 1234" in output, output) self.failUnless("k/N: 25/100" in output, output) self.failUnless("storage index: hdis5iaveku6lnlaiccydyid7q" in output, output) def test_dump_cap_sdmf_directory(self): writekey = b"\x01" * 16 fingerprint = b"\xfe" * 32 u1 = uri.WriteableSSKFileURI(writekey, fingerprint) u = uri.DirectoryURI(u1) output = self._dump_cap(u.to_string()) self.failUnless("Directory Writeable URI:" in output, output) self.failUnless("writekey: aeaqcaibaeaqcaibaeaqcaibae" in output, output) self.failUnless("readkey: nvgh5vj2ekzzkim5fgtb4gey5y" in output, output) self.failUnless("storage index: nt4fwemuw7flestsezvo2eveke" in output, output) self.failUnless("fingerprint: 737p57x6737p57x6737p57x6737p57x6737p57x6737p57x6737a" in output, output) output = self._dump_cap("--client-secret", "5s33nk3qpvnj2fw3z4mnm2y6fa", u.to_string()) self.failUnless("file renewal secret: arpszxzc2t6kb4okkg7sp765xgkni5z7caavj7lta73vmtymjlxq" in output, output) output = self._dump_cap("--nodeid", "tqc35esocrvejvg4mablt6aowg6tl43j", u.to_string()) self.failUnless("write_enabler: mgcavriox2wlb5eer26unwy5cw56elh3sjweffckkmivvsxtaknq" in output, output) self.failIf("file renewal secret:" in output, output) output = self._dump_cap("--nodeid", "tqc35esocrvejvg4mablt6aowg6tl43j", "--client-secret", "5s33nk3qpvnj2fw3z4mnm2y6fa", u.to_string()) self.failUnless("write_enabler: mgcavriox2wlb5eer26unwy5cw56elh3sjweffckkmivvsxtaknq" in output, output) self.failUnless("file renewal secret: arpszxzc2t6kb4okkg7sp765xgkni5z7caavj7lta73vmtymjlxq" in output, output) self.failUnless("lease renewal secret: 7pjtaumrb7znzkkbvekkmuwpqfjyfyamznfz4bwwvmh4nw33lorq" in output, output) u = u.get_readonly() output = self._dump_cap(u.to_string()) self.failUnless("Directory Read-only URI:" in output, output) self.failUnless("readkey: nvgh5vj2ekzzkim5fgtb4gey5y" in output, output) self.failUnless("storage index: nt4fwemuw7flestsezvo2eveke" in output, output) self.failUnless("fingerprint: 737p57x6737p57x6737p57x6737p57x6737p57x6737p57x6737a" in output, output) u = u.get_verify_cap() output = self._dump_cap(u.to_string()) self.failUnless("Directory Verifier URI:" in output, output) self.failUnless("storage index: nt4fwemuw7flestsezvo2eveke" in output, output) self.failUnless("fingerprint: 737p57x6737p57x6737p57x6737p57x6737p57x6737p57x6737a" in output, output) def test_dump_cap_mdmf_directory(self): writekey = b"\x01" * 16 fingerprint = b"\xfe" * 32 u1 = uri.WriteableMDMFFileURI(writekey, fingerprint) u = uri.MDMFDirectoryURI(u1) output = self._dump_cap(u.to_string()) self.failUnless("Directory Writeable URI:" in output, output) self.failUnless("writekey: aeaqcaibaeaqcaibaeaqcaibae" in output, output) self.failUnless("readkey: nvgh5vj2ekzzkim5fgtb4gey5y" in output, output) self.failUnless("storage index: nt4fwemuw7flestsezvo2eveke" in output, output) self.failUnless("fingerprint: 737p57x6737p57x6737p57x6737p57x6737p57x6737p57x6737a" in output, output) output = self._dump_cap("--client-secret", "5s33nk3qpvnj2fw3z4mnm2y6fa", u.to_string()) self.failUnless("file renewal secret: arpszxzc2t6kb4okkg7sp765xgkni5z7caavj7lta73vmtymjlxq" in output, output) output = self._dump_cap("--nodeid", "tqc35esocrvejvg4mablt6aowg6tl43j", u.to_string()) self.failUnless("write_enabler: mgcavriox2wlb5eer26unwy5cw56elh3sjweffckkmivvsxtaknq" in output, output) self.failIf("file renewal secret:" in output, output) output = self._dump_cap("--nodeid", "tqc35esocrvejvg4mablt6aowg6tl43j", "--client-secret", "5s33nk3qpvnj2fw3z4mnm2y6fa", u.to_string()) self.failUnless("write_enabler: mgcavriox2wlb5eer26unwy5cw56elh3sjweffckkmivvsxtaknq" in output, output) self.failUnless("file renewal secret: arpszxzc2t6kb4okkg7sp765xgkni5z7caavj7lta73vmtymjlxq" in output, output) self.failUnless("lease renewal secret: 7pjtaumrb7znzkkbvekkmuwpqfjyfyamznfz4bwwvmh4nw33lorq" in output, output) u = u.get_readonly() output = self._dump_cap(u.to_string()) self.failUnless("Directory Read-only URI:" in output, output) self.failUnless("readkey: nvgh5vj2ekzzkim5fgtb4gey5y" in output, output) self.failUnless("storage index: nt4fwemuw7flestsezvo2eveke" in output, output) self.failUnless("fingerprint: 737p57x6737p57x6737p57x6737p57x6737p57x6737p57x6737a" in output, output) u = u.get_verify_cap() output = self._dump_cap(u.to_string()) self.failUnless("Directory Verifier URI:" in output, output) self.failUnless("storage index: nt4fwemuw7flestsezvo2eveke" in output, output) self.failUnless("fingerprint: 737p57x6737p57x6737p57x6737p57x6737p57x6737p57x6737a" in output, output) def _catalog_shares(self, *basedirs): o = debug.CatalogSharesOptions() o.stdout,o.stderr = StringIO(), StringIO() args = list(basedirs) o.parseOptions(args) debug.catalog_shares(o) out = o.stdout.getvalue() err = o.stderr.getvalue() return out, err def test_catalog_shares_error(self): nodedir1 = "cli/test_catalog_shares/node1" sharedir = os.path.join(nodedir1, "storage", "shares", "mq", "mqfblse6m5a6dh45isu2cg7oji") fileutil.make_dirs(sharedir) fileutil.write("cli/test_catalog_shares/node1/storage/shares/mq/not-a-dir", "") # write a bogus share that looks a little bit like CHK fileutil.write(os.path.join(sharedir, "8"), b"\x00\x00\x00\x01" + b"\xff" * 200) # this triggers an assert nodedir2 = "cli/test_catalog_shares/node2" fileutil.make_dirs(nodedir2) fileutil.write("cli/test_catalog_shares/node1/storage/shares/not-a-dir", "") # now make sure that the 'catalog-shares' commands survives the error out, err = self._catalog_shares(nodedir1, nodedir2) self.assertEqual(out, "") self.failUnless("Error processing " in err, "didn't see 'error processing' in '%s'" % err) #self.failUnless(nodedir1 in err, # "didn't see '%s' in '%s'" % (nodedir1, err)) # windows mangles the path, and os.path.join isn't enough to make # up for it, so just look for individual strings self.failUnless("node1" in err, "didn't see 'node1' in '%s'" % err) self.failUnless("mqfblse6m5a6dh45isu2cg7oji" in err, "didn't see 'mqfblse6m5a6dh45isu2cg7oji' in '%s'" % err) def test_alias(self): def s128(c): return base32.b2a(c*(128//8)) def s256(c): return base32.b2a(c*(256//8)) TA = b"URI:DIR2:%s:%s" % (s128(b"T"), s256(b"T")) WA = b"URI:DIR2:%s:%s" % (s128(b"W"), s256(b"W")) CA = b"URI:DIR2:%s:%s" % (s128(b"C"), s256(b"C")) aliases = {"tahoe": TA, "work": WA, "c": CA} def ga1(path): return get_alias(aliases, path, u"tahoe") uses_lettercolon = common.platform_uses_lettercolon_drivename() self.failUnlessReallyEqual(ga1(u"bare"), (TA, b"bare")) self.failUnlessReallyEqual(ga1(u"baredir/file"), (TA, b"baredir/file")) self.failUnlessReallyEqual(ga1(u"baredir/file:7"), (TA, b"baredir/file:7")) self.failUnlessReallyEqual(ga1(u"tahoe:"), (TA, b"")) self.failUnlessReallyEqual(ga1(u"tahoe:file"), (TA, b"file")) self.failUnlessReallyEqual(ga1(u"tahoe:dir/file"), (TA, b"dir/file")) self.failUnlessReallyEqual(ga1(u"work:"), (WA, b"")) self.failUnlessReallyEqual(ga1(u"work:file"), (WA, b"file")) self.failUnlessReallyEqual(ga1(u"work:dir/file"), (WA, b"dir/file")) # default != None means we really expect a tahoe path, regardless of # whether we're on windows or not. This is what 'tahoe get' uses. self.failUnlessReallyEqual(ga1(u"c:"), (CA, b"")) self.failUnlessReallyEqual(ga1(u"c:file"), (CA, b"file")) self.failUnlessReallyEqual(ga1(u"c:dir/file"), (CA, b"dir/file")) self.failUnlessReallyEqual(ga1(u"URI:stuff"), (b"URI:stuff", b"")) self.failUnlessReallyEqual(ga1(u"URI:stuff/file"), (b"URI:stuff", b"file")) self.failUnlessReallyEqual(ga1(u"URI:stuff:./file"), (b"URI:stuff", b"file")) self.failUnlessReallyEqual(ga1(u"URI:stuff/dir/file"), (b"URI:stuff", b"dir/file")) self.failUnlessReallyEqual(ga1(u"URI:stuff:./dir/file"), (b"URI:stuff", b"dir/file")) self.failUnlessRaises(common.UnknownAliasError, ga1, u"missing:") self.failUnlessRaises(common.UnknownAliasError, ga1, u"missing:dir") self.failUnlessRaises(common.UnknownAliasError, ga1, u"missing:dir/file") def ga2(path): return get_alias(aliases, path, None) self.failUnlessReallyEqual(ga2(u"bare"), (DefaultAliasMarker, b"bare")) self.failUnlessReallyEqual(ga2(u"baredir/file"), (DefaultAliasMarker, b"baredir/file")) self.failUnlessReallyEqual(ga2(u"baredir/file:7"), (DefaultAliasMarker, b"baredir/file:7")) self.failUnlessReallyEqual(ga2(u"baredir/sub:1/file:7"), (DefaultAliasMarker, b"baredir/sub:1/file:7")) self.failUnlessReallyEqual(ga2(u"tahoe:"), (TA, b"")) self.failUnlessReallyEqual(ga2(u"tahoe:file"), (TA, b"file")) self.failUnlessReallyEqual(ga2(u"tahoe:dir/file"), (TA, b"dir/file")) # on windows, we really want c:foo to indicate a local file. # default==None is what 'tahoe cp' uses. if uses_lettercolon: self.failUnlessReallyEqual(ga2(u"c:"), (DefaultAliasMarker, b"c:")) self.failUnlessReallyEqual(ga2(u"c:file"), (DefaultAliasMarker, b"c:file")) self.failUnlessReallyEqual(ga2(u"c:dir/file"), (DefaultAliasMarker, b"c:dir/file")) else: self.failUnlessReallyEqual(ga2(u"c:"), (CA, b"")) self.failUnlessReallyEqual(ga2(u"c:file"), (CA, b"file")) self.failUnlessReallyEqual(ga2(u"c:dir/file"), (CA, b"dir/file")) self.failUnlessReallyEqual(ga2(u"work:"), (WA, b"")) self.failUnlessReallyEqual(ga2(u"work:file"), (WA, b"file")) self.failUnlessReallyEqual(ga2(u"work:dir/file"), (WA, b"dir/file")) self.failUnlessReallyEqual(ga2(u"URI:stuff"), (b"URI:stuff", b"")) self.failUnlessReallyEqual(ga2(u"URI:stuff/file"), (b"URI:stuff", b"file")) self.failUnlessReallyEqual(ga2(u"URI:stuff:./file"), (b"URI:stuff", b"file")) self.failUnlessReallyEqual(ga2(u"URI:stuff/dir/file"), (b"URI:stuff", b"dir/file")) self.failUnlessReallyEqual(ga2(u"URI:stuff:./dir/file"), (b"URI:stuff", b"dir/file")) self.failUnlessRaises(common.UnknownAliasError, ga2, u"missing:") self.failUnlessRaises(common.UnknownAliasError, ga2, u"missing:dir") self.failUnlessRaises(common.UnknownAliasError, ga2, u"missing:dir/file") def ga3(path): old = common.pretend_platform_uses_lettercolon try: common.pretend_platform_uses_lettercolon = True retval = get_alias(aliases, path, None) finally: common.pretend_platform_uses_lettercolon = old return retval self.failUnlessReallyEqual(ga3(u"bare"), (DefaultAliasMarker, b"bare")) self.failUnlessReallyEqual(ga3(u"baredir/file"), (DefaultAliasMarker, b"baredir/file")) self.failUnlessReallyEqual(ga3(u"baredir/file:7"), (DefaultAliasMarker, b"baredir/file:7")) self.failUnlessReallyEqual(ga3(u"baredir/sub:1/file:7"), (DefaultAliasMarker, b"baredir/sub:1/file:7")) self.failUnlessReallyEqual(ga3(u"tahoe:"), (TA, b"")) self.failUnlessReallyEqual(ga3(u"tahoe:file"), (TA, b"file")) self.failUnlessReallyEqual(ga3(u"tahoe:dir/file"), (TA, b"dir/file")) self.failUnlessReallyEqual(ga3(u"c:"), (DefaultAliasMarker, b"c:")) self.failUnlessReallyEqual(ga3(u"c:file"), (DefaultAliasMarker, b"c:file")) self.failUnlessReallyEqual(ga3(u"c:dir/file"), (DefaultAliasMarker, b"c:dir/file")) self.failUnlessReallyEqual(ga3(u"work:"), (WA, b"")) self.failUnlessReallyEqual(ga3(u"work:file"), (WA, b"file")) self.failUnlessReallyEqual(ga3(u"work:dir/file"), (WA, b"dir/file")) self.failUnlessReallyEqual(ga3(u"URI:stuff"), (b"URI:stuff", b"")) self.failUnlessReallyEqual(ga3(u"URI:stuff:./file"), (b"URI:stuff", b"file")) self.failUnlessReallyEqual(ga3(u"URI:stuff:./dir/file"), (b"URI:stuff", b"dir/file")) self.failUnlessRaises(common.UnknownAliasError, ga3, u"missing:") self.failUnlessRaises(common.UnknownAliasError, ga3, u"missing:dir") self.failUnlessRaises(common.UnknownAliasError, ga3, u"missing:dir/file") # calling get_alias with a path that doesn't include an alias and # default set to something that isn't in the aliases argument should # raise an UnknownAliasError. def ga4(path): return get_alias(aliases, path, u"badddefault:") self.failUnlessRaises(common.UnknownAliasError, ga4, u"afile") self.failUnlessRaises(common.UnknownAliasError, ga4, u"a/dir/path/") def ga5(path): old = common.pretend_platform_uses_lettercolon try: common.pretend_platform_uses_lettercolon = True retval = get_alias(aliases, path, u"baddefault:") finally: common.pretend_platform_uses_lettercolon = old return retval self.failUnlessRaises(common.UnknownAliasError, ga5, u"C:\\Windows") def test_alias_tolerance(self): def s128(c): return base32.b2a(c*(128//8)) def s256(c): return base32.b2a(c*(256//8)) TA = b"URI:DIR2:%s:%s" % (s128(b"T"), s256(b"T")) aliases = {"present": TA, "future": b"URI-FROM-FUTURE:ooh:aah"} def ga1(path): return get_alias(aliases, path, u"tahoe") self.failUnlessReallyEqual(ga1(u"present:file"), (TA, b"file")) # this throws, via assert IDirnodeURI.providedBy(), since get_alias() # wants a dirnode, and the future cap gives us UnknownURI instead. self.failUnlessRaises(AssertionError, ga1, u"future:stuff") def test_listdir_unicode_good(self): filenames = [u'L\u00F4zane', u'Bern', u'Gen\u00E8ve'] # must be NFC for name in filenames: skip_if_cannot_represent_filename(name) basedir = "cli/common/listdir_unicode_good" fileutil.make_dirs(basedir) for name in filenames: open(os.path.join(str(basedir), name), "wb").close() for file in listdir_unicode(str(basedir)): self.failUnlessIn(normalize(file), filenames) def test_exception_catcher(self): """ An exception that is otherwise unhandled during argument dispatch is written to stderr and causes the process to exit with code 1. """ self.basedir = "cli/exception_catcher" exc = Exception("canary") class BrokenOptions(object): def parseOptions(self, argv): raise exc stderr = StringIO() reactor = MemoryReactor() with AlternateReactor(reactor): with self.assertRaises(SystemExit) as ctx: runner.run( configFactory=BrokenOptions, argv=["tahoe"], stderr=stderr, ) self.assertTrue(reactor.hasRun) self.assertFalse(reactor.running) self.failUnlessIn(str(exc), stderr.getvalue()) self.assertEqual(1, ctx.exception.code) class Help(unittest.TestCase): def failUnlessInNormalized(self, x, y): # helper function to deal with the --help output being wrapped to # various widths, depending on the $COLUMNS environment variable self.failUnlessIn(x.replace("\n", " "), y.replace("\n", " ")) def test_get(self): help = str(cli.GetOptions()) self.failUnlessIn("[options] REMOTE_FILE LOCAL_FILE", help) self.failUnlessIn("% tahoe get FOO |less", help) def test_put(self): help = str(cli.PutOptions()) self.failUnlessIn("[options] LOCAL_FILE REMOTE_FILE", help) self.failUnlessIn("% cat FILE | tahoe put", help) def test_ls(self): help = str(cli.ListOptions()) self.failUnlessIn("[options] [PATH]", help) def test_unlink(self): help = str(cli.UnlinkOptions()) self.failUnlessIn("[options] REMOTE_FILE", help) def test_mv(self): help = str(cli.MvOptions()) self.failUnlessIn("[options] FROM TO", help) self.failUnlessInNormalized("Use 'tahoe mv' to move files", help) def test_cp(self): help = str(cli.CpOptions()) self.failUnlessIn("[options] FROM.. TO", help) self.failUnlessInNormalized("Use 'tahoe cp' to copy files", help) def test_ln(self): help = str(cli.LnOptions()) self.failUnlessIn("[options] FROM_LINK TO_LINK", help) self.failUnlessInNormalized("Use 'tahoe ln' to duplicate a link", help) def test_mkdir(self): help = str(cli.MakeDirectoryOptions()) self.failUnlessIn("[options] [REMOTE_DIR]", help) self.failUnlessInNormalized("Create a new directory", help) def test_backup(self): help = str(cli.BackupOptions()) self.failUnlessIn("[options] FROM ALIAS:TO", help) def test_webopen(self): help = str(cli.WebopenOptions()) self.failUnlessIn("[options] [ALIAS:PATH]", help) def test_manifest(self): help = str(cli.ManifestOptions()) self.failUnlessIn("[options] [ALIAS:PATH]", help) def test_stats(self): help = str(cli.StatsOptions()) self.failUnlessIn("[options] [ALIAS:PATH]", help) def test_check(self): help = str(cli.CheckOptions()) self.failUnlessIn("[options] [ALIAS:PATH]", help) def test_deep_check(self): help = str(cli.DeepCheckOptions()) self.failUnlessIn("[options] [ALIAS:PATH]", help) def test_create_alias(self): help = str(cli.CreateAliasOptions()) self.failUnlessIn("[options] ALIAS[:]", help) def test_add_alias(self): help = str(cli.AddAliasOptions()) self.failUnlessIn("[options] ALIAS[:] DIRCAP", help) def test_list_aliases(self): help = str(cli.ListAliasesOptions()) self.failUnlessIn("[options]", help) def test_run(self): help = str(tahoe_run.RunOptions()) self.failUnlessIn("[options] [NODEDIR [twistd-options]]", help) def test_create_client(self): help = str(create_node.CreateClientOptions()) self.failUnlessIn("[options] [NODEDIR]", help) def test_create_node(self): help = str(create_node.CreateNodeOptions()) self.failUnlessIn("[options] [NODEDIR]", help) def test_create_introducer(self): help = str(create_node.CreateIntroducerOptions()) self.failUnlessIn("[options] NODEDIR", help) def test_debug_flogtool(self): options = debug.FlogtoolOptions() help = str(options) self.failUnlessIn(" [global-options] debug flogtool ", help) self.failUnlessInNormalized("The 'tahoe debug flogtool' command uses the correct imports", help) for (option, shortcut, oClass, desc) in options.subCommands: subhelp = str(oClass()) self.failUnlessIn(" [global-options] debug flogtool %s " % (option,), subhelp) class Ln(GridTestMixin, CLITestMixin, unittest.TestCase): def _create_test_file(self): data = "puppies" * 1000 path = os.path.join(self.basedir, "datafile") fileutil.write(path, data) self.datafile = path def test_ln_without_alias(self): # if invoked without an alias when the 'tahoe' alias doesn't # exist, 'tahoe ln' should output a useful error message and not # a stack trace self.basedir = "cli/Ln/ln_without_alias" self.set_up_grid(oneshare=True) d = self.do_cli("ln", "from", "to") def _check(args): (rc, out, err) = args self.failUnlessReallyEqual(rc, 1) self.failUnlessIn("error:", err) self.assertEqual(out, "") d.addCallback(_check) # Make sure that validation extends to the "to" parameter d.addCallback(lambda ign: self.do_cli("create-alias", "havasu")) d.addCallback(lambda ign: self._create_test_file()) d.addCallback(lambda ign: self.do_cli("put", self.datafile, "havasu:from")) d.addCallback(lambda ign: self.do_cli("ln", "havasu:from", "to")) d.addCallback(_check) return d def test_ln_with_nonexistent_alias(self): # If invoked with aliases that don't exist, 'tahoe ln' should # output a useful error message and not a stack trace. self.basedir = "cli/Ln/ln_with_nonexistent_alias" self.set_up_grid(oneshare=True) d = self.do_cli("ln", "havasu:from", "havasu:to") def _check(args): (rc, out, err) = args self.failUnlessReallyEqual(rc, 1) self.failUnlessIn("error:", err) d.addCallback(_check) # Make sure that validation occurs on the to parameter if the # from parameter passes. d.addCallback(lambda ign: self.do_cli("create-alias", "havasu")) d.addCallback(lambda ign: self._create_test_file()) d.addCallback(lambda ign: self.do_cli("put", self.datafile, "havasu:from")) d.addCallback(lambda ign: self.do_cli("ln", "havasu:from", "huron:to")) d.addCallback(_check) return d class Admin(unittest.TestCase): def test_generate_keypair(self): d = run_cli("admin", "generate-keypair") def _done(args): (rc, stdout, stderr) = args lines = [line.strip() for line in stdout.splitlines()] privkey_bits = lines[0].split() pubkey_bits = lines[1].split() sk_header = "private:" vk_header = "public:" self.failUnlessEqual(privkey_bits[0], sk_header, lines[0]) self.failUnlessEqual(pubkey_bits[0], vk_header, lines[1]) self.failUnless(privkey_bits[1].startswith("priv-v0-"), lines[0]) self.failUnless(pubkey_bits[1].startswith("pub-v0-"), lines[1]) sk, pk = ed25519.signing_keypair_from_string( privkey_bits[1].encode("ascii")) vk_bytes = pubkey_bits[1].encode("ascii") self.assertEqual( ed25519.string_from_verifying_key(pk), vk_bytes, ) d.addCallback(_done) return d def test_derive_pubkey(self): priv_key, pub_key = ed25519.create_signing_keypair() priv_key_str = str(ed25519.string_from_signing_key(priv_key), "ascii") pub_key_str = str(ed25519.string_from_verifying_key(pub_key), "ascii") d = run_cli("admin", "derive-pubkey", priv_key_str) def _done(args): (rc, stdout, stderr) = args lines = stdout.split("\n") privkey_line = lines[0].strip() pubkey_line = lines[1].strip() sk_header = "private: priv-v0-" vk_header = "public: pub-v0-" self.failUnless(privkey_line.startswith(sk_header), privkey_line) self.failUnless(pubkey_line.startswith(vk_header), pubkey_line) pub_key_str2 = pubkey_line[len(vk_header):] self.assertEqual("pub-v0-" + pub_key_str2, pub_key_str) d.addCallback(_done) return d class Errors(GridTestMixin, CLITestMixin, unittest.TestCase): def test_get(self): self.basedir = "cli/Errors/get" self.set_up_grid() c0 = self.g.clients[0] self.fileurls = {} DATA = b"data" * 100 d = c0.upload(upload.Data(DATA, convergence=b"")) def _stash_bad(ur): self.uri_1share = ur.get_uri() self.delete_shares_numbered(ur.get_uri(), list(range(1,10))) d.addCallback(_stash_bad) # the download is abandoned as soon as it's clear that we won't get # enough shares. The one remaining share might be in either the # COMPLETE or the PENDING state. in_complete_msg = "ran out of shares: complete=sh0 pending= overdue= unused= need 3" in_pending_msg_regex = "ran out of shares: complete= pending=Share\(.+\) overdue= unused= need 3" d.addCallback(lambda ign: self.do_cli("get", self.uri_1share)) def _check1(args): (rc, out, err) = args self.failIfEqual(rc, 0) self.failUnless("410 Gone" in err, err) self.failUnlessIn("NotEnoughSharesError: ", err) self.failUnless(in_complete_msg in err or re.search(in_pending_msg_regex, err)) d.addCallback(_check1) targetf = os.path.join(self.basedir, "output") d.addCallback(lambda ign: self.do_cli("get", self.uri_1share, targetf)) def _check2(args): (rc, out, err) = args self.failIfEqual(rc, 0) self.failUnless("410 Gone" in err, err) self.failUnlessIn("NotEnoughSharesError: ", err) self.failUnless(in_complete_msg in err or re.search(in_pending_msg_regex, err)) self.failIf(os.path.exists(targetf)) d.addCallback(_check2) return d def test_broken_socket(self): # When the http connection breaks (such as when node.url is overwritten # by a confused user), a user friendly error message should be printed. self.basedir = "cli/Errors/test_broken_socket" self.set_up_grid(oneshare=True) # Simulate a connection error def _socket_error(*args, **kwargs): raise socket_error('test error') self.patch(allmydata.scripts.common_http.http_client.HTTPConnection, "endheaders", _socket_error) d = self.do_cli("mkdir") def _check_invalid(args): (rc, stdout, stderr) = args self.failIfEqual(rc, 0) self.failUnlessIn("Error trying to connect to http://127.0.0.1", stderr) d.addCallback(_check_invalid) return d class Get(GridTestMixin, CLITestMixin, unittest.TestCase): def test_get_without_alias(self): # 'tahoe get' should output a useful error message when invoked # without an explicit alias and when the default 'tahoe' alias # hasn't been created yet. self.basedir = "cli/Get/get_without_alias" self.set_up_grid(oneshare=True) d = self.do_cli('get', 'file') def _check(args): (rc, out, err) = args self.failUnlessReallyEqual(rc, 1) self.failUnlessIn("error:", err) self.assertEqual(out, "") d.addCallback(_check) return d def test_get_with_nonexistent_alias(self): # 'tahoe get' should output a useful error message when invoked with # an explicit alias that doesn't exist. self.basedir = "cli/Get/get_with_nonexistent_alias" self.set_up_grid(oneshare=True) d = self.do_cli("get", "nonexistent:file") def _check(args): (rc, out, err) = args self.failUnlessReallyEqual(rc, 1) self.failUnlessIn("error:", err) self.failUnlessIn("nonexistent", err) self.assertEqual(out, "") d.addCallback(_check) return d class Manifest(GridTestMixin, CLITestMixin, unittest.TestCase): def test_manifest_without_alias(self): # 'tahoe manifest' should output a useful error message when invoked # without an explicit alias when the default 'tahoe' alias is # missing. self.basedir = "cli/Manifest/manifest_without_alias" self.set_up_grid(oneshare=True) d = self.do_cli("manifest") def _check(args): (rc, out, err) = args self.failUnlessReallyEqual(rc, 1) self.failUnlessIn("error:", err) self.assertEqual(out, "") d.addCallback(_check) return d def test_manifest_with_nonexistent_alias(self): # 'tahoe manifest' should output a useful error message when invoked # with an explicit alias that doesn't exist. self.basedir = "cli/Manifest/manifest_with_nonexistent_alias" self.set_up_grid(oneshare=True) d = self.do_cli("manifest", "nonexistent:") def _check(args): (rc, out, err) = args self.failUnlessReallyEqual(rc, 1) self.failUnlessIn("error:", err) self.failUnlessIn("nonexistent", err) self.assertEqual(out, "") d.addCallback(_check) return d class Mkdir(GridTestMixin, CLITestMixin, unittest.TestCase): def test_mkdir(self): self.basedir = os.path.dirname(self.mktemp()) self.set_up_grid(oneshare=True) d = self.do_cli("create-alias", "tahoe") d.addCallback(lambda res: self.do_cli("mkdir", "test")) def _check(args): (rc, out, err) = args self.failUnlessReallyEqual(rc, 0) self.assertEqual(err, "") self.failUnlessIn("URI:", out) d.addCallback(_check) return d def test_mkdir_mutable_type(self): self.basedir = os.path.dirname(self.mktemp()) self.set_up_grid(oneshare=True) d = self.do_cli("create-alias", "tahoe") def _check(args, st): (rc, out, err) = args self.failUnlessReallyEqual(rc, 0) self.assertEqual(err, "") self.failUnlessIn(st, out) return out def _mkdir(ign, mutable_type, uri_prefix, dirname): """ :param str mutable_type: 'sdmf' or 'mdmf' (or uppercase versions) :param str uri_prefix: kind of URI :param str dirname: the directory alias """ d2 = self.do_cli("mkdir", "--format={}".format(mutable_type), dirname) d2.addCallback(_check, uri_prefix) def _stash_filecap(cap): u = uri.from_string(cap) fn_uri = u.get_filenode_cap() self._filecap = fn_uri.to_string() d2.addCallback(_stash_filecap) d2.addCallback(lambda ign: self.do_cli("ls", "--json", dirname)) d2.addCallback(_check, uri_prefix) d2.addCallback(lambda ign: self.do_cli("ls", "--json", self._filecap)) d2.addCallback(_check, '"format": "%s"' % (mutable_type.upper(),)) return d2 d.addCallback(_mkdir, "sdmf", "URI:DIR2", "tahoe:foo") d.addCallback(_mkdir, "SDMF", "URI:DIR2", "tahoe:foo2") d.addCallback(_mkdir, "mdmf", "URI:DIR2-MDMF", "tahoe:bar") d.addCallback(_mkdir, "MDMF", "URI:DIR2-MDMF", "tahoe:bar2") return d def test_mkdir_mutable_type_unlinked(self): self.basedir = os.path.dirname(self.mktemp()) self.set_up_grid(oneshare=True) d = self.do_cli("mkdir", "--format=SDMF") def _check(args, st): (rc, out, err) = args self.failUnlessReallyEqual(rc, 0) self.assertEqual(err, "") self.failUnlessIn(st, out) return out d.addCallback(_check, "URI:DIR2") def _stash_dircap(cap): self._dircap = cap # Now we're going to feed the cap into uri.from_string... u = uri.from_string(cap) # ...grab the underlying filenode uri. fn_uri = u.get_filenode_cap() # ...and stash that. self._filecap = fn_uri.to_string() d.addCallback(_stash_dircap) d.addCallback(lambda res: self.do_cli("ls", "--json", self._filecap)) d.addCallback(_check, '"format": "SDMF"') d.addCallback(lambda res: self.do_cli("mkdir", "--format=MDMF")) d.addCallback(_check, "URI:DIR2-MDMF") d.addCallback(_stash_dircap) d.addCallback(lambda res: self.do_cli("ls", "--json", self._filecap)) d.addCallback(_check, '"format": "MDMF"') return d def test_mkdir_bad_mutable_type(self): o = cli.MakeDirectoryOptions() self.failUnlessRaises(usage.UsageError, o.parseOptions, ["--format=LDMF"]) def test_mkdir_unicode(self): self.basedir = os.path.dirname(self.mktemp()) self.set_up_grid(oneshare=True) try: motorhead_arg = u"tahoe:Mot\u00F6rhead".encode(get_io_encoding()) except UnicodeEncodeError: raise unittest.SkipTest("A non-ASCII command argument could not be encoded on this platform.") d = self.do_cli("create-alias", "tahoe") d.addCallback(lambda res: self.do_cli("mkdir", motorhead_arg)) def _check(args): (rc, out, err) = args self.failUnlessReallyEqual(rc, 0) self.assertEqual(err, "") self.failUnlessIn("URI:", out) d.addCallback(_check) return d def test_mkdir_with_nonexistent_alias(self): # when invoked with an alias that doesn't exist, 'tahoe mkdir' should # output a sensible error message rather than a stack trace. self.basedir = "cli/Mkdir/mkdir_with_nonexistent_alias" self.set_up_grid(oneshare=True) d = self.do_cli("mkdir", "havasu:") def _check(args): (rc, out, err) = args self.failUnlessReallyEqual(rc, 1) self.failUnlessIn("error:", err) self.assertEqual(out, "") d.addCallback(_check) return d class Unlink(GridTestMixin, CLITestMixin, unittest.TestCase): command = "unlink" def _create_test_file(self): data = "puppies" * 1000 path = os.path.join(self.basedir, "datafile") fileutil.write(path, data) self.datafile = path def test_unlink_without_alias(self): # 'tahoe unlink' should behave sensibly when invoked without an explicit # alias before the default 'tahoe' alias has been created. self.basedir = "cli/Unlink/%s_without_alias" % (self.command,) self.set_up_grid(oneshare=True) d = self.do_cli(self.command, "afile") def _check(args): (rc, out, err) = args self.failUnlessReallyEqual(rc, 1) self.failUnlessIn("error:", err) self.assertEqual(out, "") d.addCallback(_check) d.addCallback(lambda ign: self.do_cli(self.command, "afile")) d.addCallback(_check) return d def test_unlink_with_nonexistent_alias(self): # 'tahoe unlink' should behave sensibly when invoked with an explicit # alias that doesn't exist. self.basedir = "cli/Unlink/%s_with_nonexistent_alias" % (self.command,) self.set_up_grid(oneshare=True) d = self.do_cli(self.command, "nonexistent:afile") def _check(args): (rc, out, err) = args self.failUnlessReallyEqual(rc, 1) self.failUnlessIn("error:", err) self.failUnlessIn("nonexistent", err) self.assertEqual(out, "") d.addCallback(_check) d.addCallback(lambda ign: self.do_cli(self.command, "nonexistent:afile")) d.addCallback(_check) return d def test_unlink_without_path(self): # 'tahoe unlink' should give a sensible error message when invoked without a path. self.basedir = "cli/Unlink/%s_without_path" % (self.command,) self.set_up_grid(oneshare=True) self._create_test_file() d = self.do_cli("create-alias", "tahoe") d.addCallback(lambda ign: self.do_cli("put", self.datafile, "tahoe:test")) def _do_unlink(args): (rc, out, err) = args self.failUnlessReallyEqual(rc, 0) self.failUnless(out.startswith("URI:"), out) return self.do_cli(self.command, out.strip('\n')) d.addCallback(_do_unlink) def _check(args): (rc, out, err) = args self.failUnlessReallyEqual(rc, 1) self.failUnlessIn("'tahoe %s'" % (self.command,), err) self.failUnlessIn("path must be given", err) self.assertEqual(out, "") d.addCallback(_check) return d class Stats(GridTestMixin, CLITestMixin, unittest.TestCase): def test_empty_directory(self): self.basedir = "cli/Stats/empty_directory" self.set_up_grid(oneshare=True) c0 = self.g.clients[0] self.fileurls = {} d = c0.create_dirnode() def _stash_root(n): self.rootnode = n self.rooturi = n.get_uri() d.addCallback(_stash_root) # make sure we can get stats on an empty directory too d.addCallback(lambda ign: self.do_cli("stats", self.rooturi)) def _check_stats(args): (rc, out, err) = args self.assertEqual(err, "") self.failUnlessReallyEqual(rc, 0) lines = out.splitlines() self.failUnlessIn(" count-immutable-files: 0", lines) self.failUnlessIn(" count-mutable-files: 0", lines) self.failUnlessIn(" count-literal-files: 0", lines) self.failUnlessIn(" count-directories: 1", lines) self.failUnlessIn(" size-immutable-files: 0", lines) self.failIfIn("Size Histogram:", lines) d.addCallback(_check_stats) return d def test_stats_without_alias(self): # when invoked with no explicit alias and before the default 'tahoe' # alias is created, 'tahoe stats' should output an informative error # message, not a stack trace. self.basedir = "cli/Stats/stats_without_alias" self.set_up_grid(oneshare=True) d = self.do_cli("stats") def _check(args): (rc, out, err) = args self.failUnlessReallyEqual(rc, 1) self.failUnlessIn("error:", err) self.assertEqual(out, "") d.addCallback(_check) return d def test_stats_with_nonexistent_alias(self): # when invoked with an explicit alias that doesn't exist, # 'tahoe stats' should output a useful error message. self.basedir = "cli/Stats/stats_with_nonexistent_alias" self.set_up_grid(oneshare=True) d = self.do_cli("stats", "havasu:") def _check(args): (rc, out, err) = args self.failUnlessReallyEqual(rc, 1) self.failUnlessIn("error:", err) self.assertEqual(out, "") d.addCallback(_check) return d class Webopen(GridTestMixin, CLITestMixin, unittest.TestCase): def test_webopen_with_nonexistent_alias(self): # when invoked with an alias that doesn't exist, 'tahoe webopen' # should output an informative error message instead of a stack # trace. self.basedir = "cli/Webopen/webopen_with_nonexistent_alias" self.set_up_grid(oneshare=True) d = self.do_cli("webopen", "fake:") def _check(args): (rc, out, err) = args self.failUnlessReallyEqual(rc, 1) self.failUnlessIn("error:", err) self.assertEqual(out, "") d.addCallback(_check) return d def test_webopen(self): # TODO: replace with @patch that supports Deferreds. import webbrowser def call_webbrowser_open(url): self.failUnlessIn(str(self.alias_uri, "ascii").replace(':', '%3A'), url) self.webbrowser_open_called = True def _cleanup(res): webbrowser.open = self.old_webbrowser_open return res self.old_webbrowser_open = webbrowser.open try: webbrowser.open = call_webbrowser_open self.basedir = "cli/Webopen/webopen" self.set_up_grid(oneshare=True) d = self.do_cli("create-alias", "alias:") def _check_alias(args): (rc, out, err) = args self.failUnlessReallyEqual(rc, 0, repr((rc, out, err))) self.failUnlessIn("Alias 'alias' created", out) self.assertEqual(err, "") self.alias_uri = get_aliases(self.get_clientdir())["alias"] d.addCallback(_check_alias) d.addCallback(lambda res: self.do_cli("webopen", "alias:")) def _check_webopen(args): (rc, out, err) = args self.failUnlessReallyEqual(rc, 0, repr((rc, out, err))) self.assertEqual(out, "") self.assertEqual(err, "") self.failUnless(self.webbrowser_open_called) d.addCallback(_check_webopen) d.addBoth(_cleanup) except: _cleanup(None) raise return d class Options(ReallyEqualMixin, unittest.TestCase): # this test case only looks at argument-processing and simple stuff. def parse(self, args, stdout=None): o = runner.Options() if stdout is not None: o.stdout = stdout o.parseOptions(args) while hasattr(o, "subOptions"): o = o.subOptions return o def test_list(self): fileutil.rm_dir("cli/test_options") fileutil.make_dirs("cli/test_options") fileutil.make_dirs("cli/test_options/private") fileutil.write("cli/test_options/node.url", "http://localhost:8080/\n") filenode_uri = uri.WriteableSSKFileURI(writekey=b"\x00"*16, fingerprint=b"\x00"*32) private_uri = uri.DirectoryURI(filenode_uri).to_string() fileutil.write("cli/test_options/private/root_dir.cap", private_uri + b"\n") def parse2(args): return parse_options("cli/test_options", "ls", args) o = parse2([]) self.failUnlessEqual(o['node-url'], "http://localhost:8080/") self.failUnlessEqual(o.aliases[DEFAULT_ALIAS].encode("ascii"), private_uri) self.failUnlessEqual(o.where, u"") o = parse2(["--node-url", "http://example.org:8111/"]) self.failUnlessEqual(o['node-url'], "http://example.org:8111/") self.failUnlessEqual(o.aliases[DEFAULT_ALIAS].encode("ascii"), private_uri) self.failUnlessEqual(o.where, u"") # -u for --node-url used to clash with -u for --uri (tickets #1949 and #2137). o = parse2(["-u", "http://example.org:8111/"]) self.failUnlessEqual(o['node-url'], "http://example.org:8111/") self.failUnlessEqual(o.aliases[DEFAULT_ALIAS].encode("ascii"), private_uri) self.failUnlessEqual(o.where, u"") self.failIf(o["uri"]) o = parse2(["-u", "http://example.org:8111/", "--uri"]) self.failUnlessEqual(o['node-url'], "http://example.org:8111/") self.failUnlessEqual(o.aliases[DEFAULT_ALIAS].encode("ascii"), private_uri) self.failUnlessEqual(o.where, u"") self.failUnless(o["uri"]) o = parse2(["--dir-cap", "root"]) self.failUnlessEqual(o['node-url'], "http://localhost:8080/") self.failUnlessEqual(o.aliases[DEFAULT_ALIAS], "root") self.failUnlessEqual(o.where, u"") other_filenode_uri = uri.WriteableSSKFileURI(writekey=b"\x11"*16, fingerprint=b"\x11"*32) other_uri = uri.DirectoryURI(other_filenode_uri).to_string() o = parse2(["--dir-cap", other_uri]) self.failUnlessEqual(o['node-url'], "http://localhost:8080/") self.failUnlessEqual(o.aliases[DEFAULT_ALIAS].encode("ascii"), other_uri) self.failUnlessEqual(o.where, u"") o = parse2(["--dir-cap", other_uri, "subdir"]) self.failUnlessEqual(o['node-url'], "http://localhost:8080/") self.failUnlessEqual(o.aliases[DEFAULT_ALIAS].encode("ascii"), other_uri) self.failUnlessEqual(o.where, u"subdir") self.failUnlessRaises(usage.UsageError, parse2, ["--node-url", "NOT-A-URL"]) o = parse2(["--node-url", "http://localhost:8080"]) self.failUnlessEqual(o["node-url"], "http://localhost:8080/") o = parse2(["--node-url", "https://localhost/"]) self.failUnlessEqual(o["node-url"], "https://localhost/") def test_version(self): # "tahoe --version" dumps text to stdout and exits stdout = StringIO() self.failUnlessRaises(SystemExit, self.parse, ["--version"], stdout) self.failUnlessIn(allmydata.__full_version__, stdout.getvalue()) # but "tahoe SUBCOMMAND --version" should be rejected self.failUnlessRaises(usage.UsageError, self.parse, ["run", "--version"]) self.failUnlessRaises(usage.UsageError, self.parse, ["run", "--version-and-path"]) def test_quiet(self): # accepted as an overall option, but not on subcommands o = self.parse(["--quiet", "run"]) self.failUnless(o.parent["quiet"]) self.failUnlessRaises(usage.UsageError, self.parse, ["run", "--quiet"]) def test_basedir(self): # accept a --node-directory option before the verb, or a --basedir # option after, or a basedir argument after, but none in the wrong # place, and not more than one of the three. # Here is some option twistd recognizes but we don't. Depending on # where it appears, it should be passed through to twistd. It doesn't # really matter which option it is (it doesn't even have to be a valid # option). This test does not actually run any of the twistd argument # parsing. some_twistd_option = "--spew" o = self.parse(["run"]) self.failUnlessReallyEqual(o["basedir"], os.path.join(fileutil.abspath_expanduser_unicode(u"~"), u".tahoe")) o = self.parse(["run", "here"]) self.failUnlessReallyEqual(o["basedir"], fileutil.abspath_expanduser_unicode(u"here")) o = self.parse(["run", "--basedir", "there"]) self.failUnlessReallyEqual(o["basedir"], fileutil.abspath_expanduser_unicode(u"there")) o = self.parse(["--node-directory", "there", "run"]) self.failUnlessReallyEqual(o["basedir"], fileutil.abspath_expanduser_unicode(u"there")) o = self.parse(["run", "here", some_twistd_option]) self.failUnlessReallyEqual(o["basedir"], fileutil.abspath_expanduser_unicode(u"here")) self.failUnlessRaises(usage.UsageError, self.parse, ["--basedir", "there", "run"]) self.failUnlessRaises(usage.UsageError, self.parse, ["run", "--node-directory", "there"]) self.failUnlessRaises(usage.UsageError, self.parse, ["--node-directory=there", "run", "--basedir=here"]) self.failUnlessRaises(usage.UsageError, self.parse, ["run", "--basedir=here", "anywhere"]) self.failUnlessRaises(usage.UsageError, self.parse, ["--node-directory=there", "run", "anywhere"]) self.failUnlessRaises(usage.UsageError, self.parse, ["--node-directory=there", "run", "--basedir=here", "anywhere"]) self.failUnlessRaises(usage.UsageError, self.parse, ["--node-directory=there", "run", some_twistd_option]) self.failUnlessRaises(usage.UsageError, self.parse, ["run", "--basedir=here", some_twistd_option]) tahoe_lafs-1.20.0/src/allmydata/test/cli/test_cp.py0000644000000000000000000013422613615410400017127 0ustar00""" Ported to Python 3. """ import os.path, json from twisted.trial import unittest from twisted.python import usage from twisted.internet import defer from allmydata.scripts import cli from allmydata.util import fileutil from allmydata.util.encodingutil import (quote_output, unicode_to_output, to_bytes) from allmydata.util.assertutil import _assert from ..no_network import GridTestMixin from .common import CLITestMixin from ..common_util import skip_if_cannot_represent_filename class Cp(GridTestMixin, CLITestMixin, unittest.TestCase): def test_not_enough_args(self): o = cli.CpOptions() self.failUnlessRaises(usage.UsageError, o.parseOptions, ["onearg"]) def test_unicode_filename(self): self.basedir = "cli/Cp/unicode_filename" fn1 = os.path.join(self.basedir, u"\u00C4rtonwall") artonwall_arg = u"\u00C4rtonwall" skip_if_cannot_represent_filename(fn1) self.set_up_grid(oneshare=True) DATA1 = "unicode file content" fileutil.write(fn1, DATA1) fn2 = os.path.join(self.basedir, "Metallica") DATA2 = "non-unicode file content" fileutil.write(fn2, DATA2) d = self.do_cli("create-alias", "tahoe") d.addCallback(lambda res: self.do_cli("cp", fn1, "tahoe:")) d.addCallback(lambda res: self.do_cli("get", "tahoe:" + artonwall_arg)) d.addCallback(lambda rc_out_err: self.assertEqual(rc_out_err[1], DATA1)) # Version where destination filename is explicitly Unicode too. d.addCallback(lambda res: self.do_cli("cp", fn1, "tahoe:" + artonwall_arg + "-2")) d.addCallback(lambda res: self.do_cli("get", "tahoe:" + artonwall_arg + "-2")) d.addCallback(lambda rc_out_err: self.assertEqual(rc_out_err[1], DATA1)) d.addCallback(lambda res: self.do_cli("cp", fn2, "tahoe:")) d.addCallback(lambda res: self.do_cli("get", "tahoe:Metallica")) d.addCallback(lambda rc_out_err: self.assertEqual(rc_out_err[1], DATA2)) d.addCallback(lambda res: self.do_cli("ls", "tahoe:")) def _check(args): (rc, out, err) = args try: unicode_to_output(u"\u00C4rtonwall") except UnicodeEncodeError: self.failUnlessReallyEqual(rc, 1) self.failUnlessReallyEqual(out, "Metallica\n") self.failUnlessIn(quote_output(u"\u00C4rtonwall"), err) self.failUnlessIn("files whose names could not be converted", err) else: self.failUnlessReallyEqual(rc, 0) self.failUnlessReallyEqual(out, u"Metallica\n\u00C4rtonwall\n\u00C4rtonwall-2\n") self.assertEqual(len(err), 0, err) d.addCallback(_check) return d def test_dangling_symlink_vs_recursion(self): if not hasattr(os, 'symlink'): raise unittest.SkipTest("Symlinks are not supported by Python on this platform.") # cp -r on a directory containing a dangling symlink shouldn't assert self.basedir = "cli/Cp/dangling_symlink_vs_recursion" self.set_up_grid(oneshare=True) dn = os.path.join(self.basedir, "dir") os.mkdir(dn) fn = os.path.join(dn, "Fakebandica") ln = os.path.join(dn, "link") os.symlink(fn, ln) d = self.do_cli("create-alias", "tahoe") d.addCallback(lambda res: self.do_cli("cp", "--recursive", dn, "tahoe:")) return d def test_copy_using_filecap(self): self.basedir = "cli/Cp/test_copy_using_filecap" self.set_up_grid(oneshare=True) outdir = os.path.join(self.basedir, "outdir") os.mkdir(outdir) fn1 = os.path.join(self.basedir, "Metallica") fn2 = os.path.join(outdir, "Not Metallica") fn3 = os.path.join(outdir, "test2") DATA1 = b"puppies" * 10000 fileutil.write(fn1, DATA1) d = self.do_cli("create-alias", "tahoe") d.addCallback(lambda ign: self.do_cli("put", fn1)) def _put_file(args): (rc, out, err) = args self.failUnlessReallyEqual(rc, 0) self.failUnlessIn("200 OK", err) # keep track of the filecap self.filecap = out.strip() d.addCallback(_put_file) # Let's try copying this to the disk using the filecap. d.addCallback(lambda ign: self.do_cli("cp", self.filecap, fn2)) def _copy_file(args): (rc, out, err) = args self.failUnlessReallyEqual(rc, 0) results = fileutil.read(fn2) self.failUnlessReallyEqual(results, DATA1) d.addCallback(_copy_file) # Test copying a filecap to local dir, which should fail without a # destination filename (#761). d.addCallback(lambda ign: self.do_cli("cp", self.filecap, outdir)) def _resp(args): (rc, out, err) = args self.failUnlessReallyEqual(rc, 1) self.failUnlessIn("when copying into a directory, all source files must have names, but", err) self.assertEqual(len(out), 0, out) d.addCallback(_resp) # Create a directory, linked at tahoe:test . d.addCallback(lambda ign: self.do_cli("mkdir", "tahoe:test")) def _get_dir(args): (rc, out, err) = args self.failUnlessReallyEqual(rc, 0) self.dircap = out.strip() d.addCallback(_get_dir) # Upload a file to the directory. d.addCallback(lambda ign: self.do_cli("put", fn1, "tahoe:test/test_file")) d.addCallback(lambda rc_out_err: self.failUnlessReallyEqual(rc_out_err[0], 0)) # Copying DIRCAP/filename to a local dir should work, because the # destination filename can be inferred. d.addCallback(lambda ign: self.do_cli("cp", self.dircap + "/test_file", outdir)) def _get_resp(args): (rc, out, err) = args self.failUnlessReallyEqual(rc, 0) results = fileutil.read(os.path.join(outdir, "test_file")) self.failUnlessReallyEqual(results, DATA1) d.addCallback(_get_resp) # ... and to an explicit filename different from the source filename. d.addCallback(lambda ign: self.do_cli("cp", self.dircap + "/test_file", fn3)) def _get_resp2(args): (rc, out, err) = args self.failUnlessReallyEqual(rc, 0) results = fileutil.read(fn3) self.failUnlessReallyEqual(results, DATA1) d.addCallback(_get_resp2) # Test that the --verbose option prints correct indices (#1805). d.addCallback(lambda ign: self.do_cli("cp", "--verbose", fn3, self.dircap)) def _test_for_wrong_indices(args): (rc, out, err) = args lines = err.split('\n') self.failUnlessIn('examining 1 of 1', lines) self.failUnlessIn('starting copy, 1 files, 1 directories', lines) self.failIfIn('examining 0 of', err) d.addCallback(_test_for_wrong_indices) return d def test_cp_with_nonexistent_alias(self): # when invoked with an alias or aliases that don't exist, 'tahoe cp' # should output a sensible error message rather than a stack trace. self.basedir = "cli/Cp/cp_with_nonexistent_alias" self.set_up_grid(oneshare=True) d = self.do_cli("cp", "fake:file1", "fake:file2") def _check(args): (rc, out, err) = args self.failUnlessReallyEqual(rc, 1) self.failUnlessIn("error:", err) d.addCallback(_check) # 'tahoe cp' actually processes the target argument first, so we need # to check to make sure that validation extends to the source # argument. d.addCallback(lambda ign: self.do_cli("create-alias", "tahoe")) d.addCallback(lambda ign: self.do_cli("cp", "fake:file1", "tahoe:file2")) d.addCallback(_check) return d def test_unicode_dirnames(self): self.basedir = "cli/Cp/unicode_dirnames" fn1 = os.path.join(self.basedir, u"\u00C4rtonwall") artonwall_arg = u"\u00C4rtonwall" skip_if_cannot_represent_filename(fn1) self.set_up_grid(oneshare=True) d = self.do_cli("create-alias", "tahoe") d.addCallback(lambda res: self.do_cli("mkdir", "tahoe:test/" + artonwall_arg)) d.addCallback(lambda res: self.do_cli("cp", "-r", "tahoe:test", "tahoe:test2")) d.addCallback(lambda res: self.do_cli("ls", "tahoe:test2/test")) def _check(args): (rc, out, err) = args try: unicode_to_output(u"\u00C4rtonwall") except UnicodeEncodeError: self.failUnlessReallyEqual(rc, 1) self.assertEqual(len(out), 0, out) self.failUnlessIn(quote_output(u"\u00C4rtonwall"), err) self.failUnlessIn("files whose names could not be converted", err) else: self.failUnlessReallyEqual(rc, 0) self.failUnlessReallyEqual(out, u"\u00C4rtonwall\n") self.assertEqual(len(err), 0, err) d.addCallback(_check) return d @defer.inlineCallbacks def test_cp_duplicate_directories(self): self.basedir = "cli/Cp/cp_duplicate_directories" self.set_up_grid(oneshare=True) filename = os.path.join(self.basedir, "file") data = b"abc\xff\x00\xee" with open(filename, "wb") as f: f.write(data) yield self.do_cli("create-alias", "tahoe") (rc, out, err) = yield self.do_cli("mkdir", "tahoe:test1") self.assertEqual(rc, 0, (rc, err)) dircap = out.strip() (rc, out, err) = yield self.do_cli("cp", filename, "tahoe:test1/file") self.assertEqual(rc, 0, (rc, err)) # Now duplicate dirnode, testing duplicates on destination side: (rc, out, err) = yield self.do_cli( "cp", "--recursive", dircap, "tahoe:test2/") self.assertEqual(rc, 0, (rc, err)) (rc, out, err) = yield self.do_cli( "cp", "--recursive", dircap, "tahoe:test3/") self.assertEqual(rc, 0, (rc, err)) # Now copy to local directory, testing duplicates on origin side: yield self.do_cli("cp", "--recursive", "tahoe:", self.basedir) for i in range(1, 4): with open(os.path.join(self.basedir, "test%d" % (i,), "file"), "rb") as f: self.assertEquals(f.read(), data) @defer.inlineCallbacks def test_cp_immutable_file(self): self.basedir = "cli/Cp/cp_immutable_file" self.set_up_grid(oneshare=True) filename = os.path.join(self.basedir, "source_file") data = b"abc\xff\x00\xee" with open(filename, "wb") as f: f.write(data) # Create immutable file: yield self.do_cli("create-alias", "tahoe") (rc, out, _) = yield self.do_cli("put", filename, "tahoe:file1") filecap = out.strip() self.assertEqual(rc, 0) # Copy it: (rc, _, _) = yield self.do_cli("cp", "tahoe:file1", "tahoe:file2") self.assertEqual(rc, 0) # Make sure resulting file is the same: (rc, _, _) = yield self.do_cli("cp", "--recursive", "--caps-only", "tahoe:", self.basedir) self.assertEqual(rc, 0) with open(os.path.join(self.basedir, "file2")) as f: self.assertEqual(f.read().strip(), filecap) def test_cp_replaces_mutable_file_contents(self): self.basedir = "cli/Cp/cp_replaces_mutable_file_contents" self.set_up_grid(oneshare=True) # Write a test file, which we'll copy to the grid. test_txt_path = os.path.join(self.basedir, "test.txt") test_txt_contents = "foo bar baz" f = open(test_txt_path, "w") f.write(test_txt_contents) f.close() d = self.do_cli("create-alias", "tahoe") d.addCallback(lambda ignored: self.do_cli("mkdir", "tahoe:test")) # We have to use 'tahoe put' here because 'tahoe cp' doesn't # know how to make mutable files at the destination. d.addCallback(lambda ignored: self.do_cli("put", "--mutable", test_txt_path, "tahoe:test/test.txt")) d.addCallback(lambda ignored: self.do_cli("get", "tahoe:test/test.txt")) def _check(args): (rc, out, err) = args self.failUnlessEqual(rc, 0) self.failUnlessEqual(out, test_txt_contents) d.addCallback(_check) # We'll do ls --json to get the read uri and write uri for the # file we've just uploaded. d.addCallback(lambda ignored: self.do_cli("ls", "--json", "tahoe:test/test.txt")) def _get_test_txt_uris(args): (rc, out, err) = args self.failUnlessEqual(rc, 0) filetype, data = json.loads(out) self.failUnlessEqual(filetype, "filenode") self.failUnless(data['mutable']) self.failUnlessIn("rw_uri", data) self.rw_uri = to_bytes(data["rw_uri"]) self.failUnlessIn("ro_uri", data) self.ro_uri = to_bytes(data["ro_uri"]) d.addCallback(_get_test_txt_uris) # Now make a new file to copy in place of test.txt. new_txt_path = os.path.join(self.basedir, "new.txt") new_txt_contents = "baz bar foo" * 100000 f = open(new_txt_path, "w") f.write(new_txt_contents) f.close() # Copy the new file on top of the old file. d.addCallback(lambda ignored: self.do_cli("cp", new_txt_path, "tahoe:test/test.txt")) # If we get test.txt now, we should see the new data. d.addCallback(lambda ignored: self.do_cli("get", "tahoe:test/test.txt")) d.addCallback(lambda rc_out_err: self.failUnlessEqual(rc_out_err[1], new_txt_contents)) # If we get the json of the new file, we should see that the old # uri is there d.addCallback(lambda ignored: self.do_cli("ls", "--json", "tahoe:test/test.txt")) def _check_json(args): (rc, out, err) = args self.failUnlessEqual(rc, 0) filetype, data = json.loads(out) self.failUnlessEqual(filetype, "filenode") self.failUnless(data['mutable']) self.failUnlessIn("ro_uri", data) self.failUnlessEqual(to_bytes(data["ro_uri"]), self.ro_uri) self.failUnlessIn("rw_uri", data) self.failUnlessEqual(to_bytes(data["rw_uri"]), self.rw_uri) d.addCallback(_check_json) # and, finally, doing a GET directly on one of the old uris # should give us the new contents. d.addCallback(lambda ignored: self.do_cli("get", self.rw_uri)) d.addCallback(lambda rc_out_err: self.failUnlessEqual(rc_out_err[1], new_txt_contents)) # Now copy the old test.txt without an explicit destination # file. tahoe cp will match it to the existing file and # overwrite it appropriately. d.addCallback(lambda ignored: self.do_cli("cp", test_txt_path, "tahoe:test")) d.addCallback(lambda ignored: self.do_cli("get", "tahoe:test/test.txt")) d.addCallback(lambda rc_out_err: self.failUnlessEqual(rc_out_err[1], test_txt_contents)) d.addCallback(lambda ignored: self.do_cli("ls", "--json", "tahoe:test/test.txt")) d.addCallback(_check_json) d.addCallback(lambda ignored: self.do_cli("get", self.rw_uri)) d.addCallback(lambda rc_out_err: self.failUnlessEqual(rc_out_err[1], test_txt_contents)) # Now we'll make a more complicated directory structure. # test2/ # test2/mutable1 # test2/mutable2 # test2/imm1 # test2/imm2 imm_test_txt_path = os.path.join(self.basedir, "imm_test.txt") imm_test_txt_contents = test_txt_contents * 10000 fileutil.write(imm_test_txt_path, imm_test_txt_contents) d.addCallback(lambda ignored: self.do_cli("mkdir", "tahoe:test2")) d.addCallback(lambda ignored: self.do_cli("put", "--mutable", new_txt_path, "tahoe:test2/mutable1")) d.addCallback(lambda ignored: self.do_cli("put", "--mutable", new_txt_path, "tahoe:test2/mutable2")) d.addCallback(lambda ignored: self.do_cli('put', new_txt_path, "tahoe:test2/imm1")) d.addCallback(lambda ignored: self.do_cli("put", imm_test_txt_path, "tahoe:test2/imm2")) d.addCallback(lambda ignored: self.do_cli("ls", "--json", "tahoe:test2")) def _process_directory_json(args): (rc, out, err) = args self.failUnlessEqual(rc, 0) filetype, data = json.loads(out) self.failUnlessEqual(filetype, "dirnode") self.failUnless(data['mutable']) self.failUnlessIn("children", data) children = data['children'] # Store the URIs for later use. self.childuris = {} for k in ["mutable1", "mutable2", "imm1", "imm2"]: self.failUnlessIn(k, children) childtype, childdata = children[k] self.failUnlessEqual(childtype, "filenode") if "mutable" in k: self.failUnless(childdata['mutable']) self.failUnlessIn("rw_uri", childdata) uri_key = "rw_uri" else: self.failIf(childdata['mutable']) self.failUnlessIn("ro_uri", childdata) uri_key = "ro_uri" self.childuris[k] = to_bytes(childdata[uri_key]) d.addCallback(_process_directory_json) # Now build a local directory to copy into place, like the following: # test2/ # test2/mutable1 # test2/mutable2 # test2/imm1 # test2/imm3 def _build_local_directory(ignored): test2_path = os.path.join(self.basedir, "test2") fileutil.make_dirs(test2_path) for fn in ("mutable1", "mutable2", "imm1", "imm3"): fileutil.write(os.path.join(test2_path, fn), fn * 1000) self.test2_path = test2_path d.addCallback(_build_local_directory) d.addCallback(lambda ignored: self.do_cli("cp", "-r", self.test2_path, "tahoe:")) # We expect that mutable1 and mutable2 are overwritten in-place, # so they'll retain their URIs but have different content. def _process_file_json(args, fn): (rc, out, err) = args self.failUnlessEqual(rc, 0) filetype, data = json.loads(out) self.failUnlessEqual(filetype, "filenode") if "mutable" in fn: self.failUnless(data['mutable']) self.failUnlessIn("rw_uri", data) self.failUnlessEqual(to_bytes(data["rw_uri"]), self.childuris[fn]) else: self.failIf(data['mutable']) self.failUnlessIn("ro_uri", data) self.failIfEqual(to_bytes(data["ro_uri"]), self.childuris[fn]) for fn in ("mutable1", "mutable2"): d.addCallback(lambda ignored, fn=fn: self.do_cli("get", "tahoe:test2/%s" % fn)) d.addCallback(lambda rc_out_err, fn=fn: self.failUnlessEqual(rc_out_err[1], fn * 1000)) d.addCallback(lambda ignored, fn=fn: self.do_cli("ls", "--json", "tahoe:test2/%s" % fn)) d.addCallback(_process_file_json, fn=fn) # imm1 should have been replaced, so both its uri and content # should be different. d.addCallback(lambda ignored: self.do_cli("get", "tahoe:test2/imm1")) d.addCallback(lambda rc_out_err: self.failUnlessEqual(rc_out_err[1], "imm1" * 1000)) d.addCallback(lambda ignored: self.do_cli("ls", "--json", "tahoe:test2/imm1")) d.addCallback(_process_file_json, fn="imm1") # imm3 should have been created. d.addCallback(lambda ignored: self.do_cli("get", "tahoe:test2/imm3")) d.addCallback(lambda rc_out_err: self.failUnlessEqual(rc_out_err[1], "imm3" * 1000)) # imm2 should be exactly as we left it, since our newly-copied # directory didn't contain an imm2 entry. d.addCallback(lambda ignored: self.do_cli("get", "tahoe:test2/imm2")) d.addCallback(lambda rc_out_err: self.failUnlessEqual(rc_out_err[1], imm_test_txt_contents)) d.addCallback(lambda ignored: self.do_cli("ls", "--json", "tahoe:test2/imm2")) def _process_imm2_json(args): (rc, out, err) = args self.failUnlessEqual(rc, 0) filetype, data = json.loads(out) self.failUnlessEqual(filetype, "filenode") self.failIf(data['mutable']) self.failUnlessIn("ro_uri", data) self.failUnlessEqual(to_bytes(data["ro_uri"]), self.childuris["imm2"]) d.addCallback(_process_imm2_json) return d def test_cp_overwrite_readonly_mutable_file(self): # tahoe cp should print an error when asked to overwrite a # mutable file that it can't overwrite. self.basedir = "cli/Cp/overwrite_readonly_mutable_file" self.set_up_grid(oneshare=True) # This is our initial file. We'll link its readcap into the # tahoe: alias. test_file_path = os.path.join(self.basedir, "test_file.txt") test_file_contents = "This is a test file." fileutil.write(test_file_path, test_file_contents) # This is our replacement file. We'll try and fail to upload it # over the readcap that we linked into the tahoe: alias. replacement_file_path = os.path.join(self.basedir, "replacement.txt") replacement_file_contents = "These are new contents." fileutil.write(replacement_file_path, replacement_file_contents) d = self.do_cli("create-alias", "tahoe:") d.addCallback(lambda ignored: self.do_cli("put", "--mutable", test_file_path)) def _get_test_uri(args): (rc, out, err) = args self.failUnlessEqual(rc, 0) # this should be a write uri self._test_write_uri = out d.addCallback(_get_test_uri) d.addCallback(lambda ignored: self.do_cli("ls", "--json", self._test_write_uri)) def _process_test_json(args): (rc, out, err) = args self.failUnlessEqual(rc, 0) filetype, data = json.loads(out) self.failUnlessEqual(filetype, "filenode") self.failUnless(data['mutable']) self.failUnlessIn("ro_uri", data) self._test_read_uri = to_bytes(data["ro_uri"]) d.addCallback(_process_test_json) # Now we'll link the readonly URI into the tahoe: alias. d.addCallback(lambda ignored: self.do_cli("ln", self._test_read_uri, "tahoe:test_file.txt")) d.addCallback(lambda rc_out_err: self.failUnlessEqual(rc_out_err[0], 0)) # Let's grab the json of that to make sure that we did it right. d.addCallback(lambda ignored: self.do_cli("ls", "--json", "tahoe:")) def _process_tahoe_json(args): (rc, out, err) = args self.failUnlessEqual(rc, 0) filetype, data = json.loads(out) self.failUnlessEqual(filetype, "dirnode") self.failUnlessIn("children", data) kiddata = data['children'] self.failUnlessIn("test_file.txt", kiddata) testtype, testdata = kiddata['test_file.txt'] self.failUnlessEqual(testtype, "filenode") self.failUnless(testdata['mutable']) self.failUnlessIn("ro_uri", testdata) self.failUnlessEqual(to_bytes(testdata["ro_uri"]), self._test_read_uri) self.failIfIn("rw_uri", testdata) d.addCallback(_process_tahoe_json) # Okay, now we're going to try uploading another mutable file in # place of that one. We should get an error. d.addCallback(lambda ignored: self.do_cli("cp", replacement_file_path, "tahoe:test_file.txt")) def _check_error_message(args): (rc, out, err) = args self.failUnlessEqual(rc, 1) self.failUnlessIn("replace or update requested with read-only cap", err) d.addCallback(_check_error_message) # Make extra sure that that didn't work. d.addCallback(lambda ignored: self.do_cli("get", "tahoe:test_file.txt")) d.addCallback(lambda rc_out_err: self.failUnlessEqual(rc_out_err[1], test_file_contents)) d.addCallback(lambda ignored: self.do_cli("get", self._test_read_uri)) d.addCallback(lambda rc_out_err: self.failUnlessEqual(rc_out_err[1], test_file_contents)) # Now we'll do it without an explicit destination. d.addCallback(lambda ignored: self.do_cli("cp", test_file_path, "tahoe:")) d.addCallback(_check_error_message) d.addCallback(lambda ignored: self.do_cli("get", "tahoe:test_file.txt")) d.addCallback(lambda rc_out_err: self.failUnlessEqual(rc_out_err[1], test_file_contents)) d.addCallback(lambda ignored: self.do_cli("get", self._test_read_uri)) d.addCallback(lambda rc_out_err: self.failUnlessEqual(rc_out_err[1], test_file_contents)) # Now we'll link a readonly file into a subdirectory. d.addCallback(lambda ignored: self.do_cli("mkdir", "tahoe:testdir")) d.addCallback(lambda rc_out_err: self.failUnlessEqual(rc_out_err[0], 0)) d.addCallback(lambda ignored: self.do_cli("ln", self._test_read_uri, "tahoe:test/file2.txt")) d.addCallback(lambda rc_out_err: self.failUnlessEqual(rc_out_err[0], 0)) test_dir_path = os.path.join(self.basedir, "test") fileutil.make_dirs(test_dir_path) for f in ("file1.txt", "file2.txt"): fileutil.write(os.path.join(test_dir_path, f), f * 10000) d.addCallback(lambda ignored: self.do_cli("cp", "-r", test_dir_path, "tahoe:")) d.addCallback(_check_error_message) d.addCallback(lambda ignored: self.do_cli("ls", "--json", "tahoe:test")) def _got_testdir_json(args): (rc, out, err) = args self.failUnlessEqual(rc, 0) filetype, data = json.loads(out) self.failUnlessEqual(filetype, "dirnode") self.failUnlessIn("children", data) childdata = data['children'] self.failUnlessIn("file2.txt", childdata) file2type, file2data = childdata['file2.txt'] self.failUnlessEqual(file2type, "filenode") self.failUnless(file2data['mutable']) self.failUnlessIn("ro_uri", file2data) self.failUnlessEqual(to_bytes(file2data["ro_uri"]), self._test_read_uri) self.failIfIn("rw_uri", file2data) d.addCallback(_got_testdir_json) return d def test_cp_verbose(self): self.basedir = "cli/Cp/cp_verbose" self.set_up_grid(oneshare=True) # Write two test files, which we'll copy to the grid. test1_path = os.path.join(self.basedir, "test1") test2_path = os.path.join(self.basedir, "test2") fileutil.write(test1_path, "test1") fileutil.write(test2_path, "test2") d = self.do_cli("create-alias", "tahoe") d.addCallback(lambda ign: self.do_cli("cp", "--verbose", test1_path, test2_path, "tahoe:")) def _check(res): (rc, out, err) = res self.failUnlessEqual(rc, 0, str(res)) self.failUnlessIn("Success: files copied", out, str(res)) self.failUnlessEqual(err, """\ attaching sources to targets, 2 files / 0 dirs in root targets assigned, 1 dirs, 2 files starting copy, 2 files, 1 directories 1/2 files, 0/1 directories 2/2 files, 0/1 directories 1/1 directories """, str(res)) d.addCallback(_check) return d def test_cp_copies_dir(self): # This test ensures that a directory is copied using # tahoe cp -r. Refer to ticket #712: # https://tahoe-lafs.org/trac/tahoe-lafs/ticket/712 self.basedir = "cli/Cp/cp_copies_dir" self.set_up_grid(oneshare=True) subdir = os.path.join(self.basedir, "foo") os.mkdir(subdir) test1_path = os.path.join(subdir, "test1") fileutil.write(test1_path, "test1") d = self.do_cli("create-alias", "tahoe") d.addCallback(lambda ign: self.do_cli("cp", "-r", subdir, "tahoe:")) d.addCallback(lambda ign: self.do_cli("ls", "tahoe:")) def _check(res, item): (rc, out, err) = res self.failUnlessEqual(rc, 0) self.failUnlessEqual(err, "") self.failUnlessIn(item, out, str(res)) d.addCallback(_check, "foo") d.addCallback(lambda ign: self.do_cli("ls", "tahoe:foo/")) d.addCallback(_check, "test1") d.addCallback(lambda ign: fileutil.rm_dir(subdir)) d.addCallback(lambda ign: self.do_cli("cp", "-r", "tahoe:foo", self.basedir)) def _check_local_fs(ign): self.failUnless(os.path.isdir(self.basedir)) self.failUnless(os.path.isfile(test1_path)) d.addCallback(_check_local_fs) return d def test_ticket_2027(self): # This test ensures that tahoe will copy a file from the grid to # a local directory without a specified file name. # https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2027 self.basedir = "cli/Cp/ticket_2027" self.set_up_grid(oneshare=True) # Write a test file, which we'll copy to the grid. test1_path = os.path.join(self.basedir, "test1") fileutil.write(test1_path, "test1") d = self.do_cli("create-alias", "tahoe") d.addCallback(lambda ign: self.do_cli("cp", test1_path, "tahoe:")) d.addCallback(lambda ign: self.do_cli("cp", "tahoe:test1", self.basedir)) def _check(res): (rc, out, err) = res self.failUnlessIn("Success: file copied", out, str(res)) return d # these test cases come from ticket #2329 comment 40 # trailing slash on target *directory* should not matter, test both # trailing slash on target files should cause error # trailing slash on source directory should not matter, test a few # trailing slash on source files should cause error COPYOUT_TESTCASES = """ cp $FILECAP to/existing-file : to/existing-file cp -r $FILECAP to/existing-file : to/existing-file cp $DIRCAP/file $PARENTCAP/dir2/file2 to/existing-file : E6-MANYONE cp -r $DIRCAP/file $PARENTCAP/dir2/file2 to/existing-file : E6-MANYONE cp $DIRCAP to/existing-file : E4-NEED-R cp -r $DIRCAP to/existing-file : E5-DIRTOFILE cp $FILECAP $DIRCAP to/existing-file : E4-NEED-R cp -r $FILECAP $DIRCAP to/existing-file : E6-MANYONE cp $FILECAP to/existing-file/ : E7-BADSLASH cp -r $FILECAP to/existing-file/ : E7-BADSLASH cp $DIRCAP/file $PARENTCAP/dir2/file2 to/existing-file/ : E7-BADSLASH cp -r $DIRCAP/file $PARENTCAP/dir2/file2 to/existing-file/ : E7-BADSLASH cp $DIRCAP to/existing-file/ : E4-NEED-R cp -r $DIRCAP to/existing-file/ : E7-BADSLASH cp $FILECAP $DIRCAP to/existing-file/ : E4-NEED-R cp -r $FILECAP $DIRCAP to/existing-file/ : E7-BADSLASH # single source to a (present) target directory cp $FILECAP to : E2-DESTNAME cp -r $FILECAP to : E2-DESTNAME cp $DIRCAP/file to : to/file cp -r $DIRCAP/file to : to/file # these two are errors cp $DIRCAP/file/ to : E8-BADSLASH cp -r $DIRCAP/file/ to : E8-BADSLASH cp $PARENTCAP/dir to : E4-NEED-R cp -r $PARENTCAP/dir to : to/dir/file # but these two should ignore the trailing source slash cp $PARENTCAP/dir/ to : E4-NEED-R cp -r $PARENTCAP/dir/ to : to/dir/file cp $DIRCAP to : E4-NEED-R cp -r $DIRCAP to : to/file cp $DIRALIAS to : E4-NEED-R cp -r $DIRALIAS to : to/file cp $FILECAP to/ : E2-DESTNAME cp -r $FILECAP to/ : E2-DESTNAME cp $DIRCAP/file to/ : to/file cp -r $DIRCAP/file to/ : to/file cp $PARENTCAP/dir to/ : E4-NEED-R cp -r $PARENTCAP/dir to/ : to/dir/file cp $DIRCAP to/ : E4-NEED-R cp -r $DIRCAP to/ : to/file cp $DIRALIAS to/ : E4-NEED-R cp -r $DIRALIAS to/ : to/file # multiple sources to a (present) target directory cp $DIRCAP/file $PARENTCAP/dir2/file2 to : to/file,to/file2 cp $DIRCAP/file $FILECAP to : E2-DESTNAME cp $DIRCAP $FILECAP to : E4-NEED-R cp -r $DIRCAP $FILECAP to : E2-DESTNAME # namedfile, unnameddir, nameddir cp $PARENTCAP/dir3/file3 $DIRCAP $PARENTCAP/dir2 to : E4-NEED-R cp -r $PARENTCAP/dir3/file3 $DIRCAP $PARENTCAP/dir2 to : to/file3,to/file,to/dir2/file2 # namedfile, unnameddir, nameddir, unnamedfile cp $PARENTCAP/dir3/file3 $DIRCAP $PARENTCAP/dir2 $FILECAP to : E4-NEED-R cp -r $PARENTCAP/dir3/file3 $DIRCAP $PARENTCAP/dir2 $FILECAP to : E2-DESTNAME cp $DIRCAP/file $PARENTCAP/dir2/file2 to/ : to/file,to/file2 cp $DIRCAP/file $FILECAP to/ : E2-DESTNAME cp $DIRCAP $FILECAP to/ : E4-NEED-R cp -r $DIRCAP $FILECAP to/ : E2-DESTNAME # namedfile, unnameddir, nameddir cp $PARENTCAP/dir3/file3 $DIRCAP $PARENTCAP/dir2 to/ : E4-NEED-R cp -r $PARENTCAP/dir3/file3 $DIRCAP $PARENTCAP/dir2 to/ : to/file3,to/file,to/dir2/file2 # namedfile, unnameddir, nameddir, unnamedfile cp $PARENTCAP/dir3/file3 $DIRCAP $PARENTCAP/dir2 $FILECAP to/ : E4-NEED-R cp -r $PARENTCAP/dir3/file3 $DIRCAP $PARENTCAP/dir2 $FILECAP to/ : E2-DESTNAME # single sources to a missing target: should mkdir or create a file cp $FILECAP to/missing : to/missing cp -r $FILECAP to/missing : to/missing cp $DIRCAP/file to/missing : to/missing cp -r $DIRCAP/file to/missing : to/missing cp $PARENTCAP/dir to/missing : E4-NEED-R cp -r $PARENTCAP/dir to/missing : to/missing/dir/file cp $DIRCAP to/missing : E4-NEED-R cp -r $DIRCAP to/missing : to/missing/file cp $DIRALIAS to/missing : E4-NEED-R cp -r $DIRALIAS to/missing : to/missing/file cp $FILECAP to/missing/ : E7-BADSLASH cp -r $FILECAP to/missing/ : E7-BADSLASH cp $DIRCAP/file to/missing/ : E7-BADSLASH cp -r $DIRCAP/file to/missing/ : E7-BADSLASH cp $PARENTCAP/dir to/missing/ : E4-NEED-R cp -r $PARENTCAP/dir to/missing/ : to/missing/dir/file cp $DIRCAP to/missing/ : E4-NEED-R cp -r $DIRCAP to/missing/ : to/missing/file cp $DIRALIAS to/missing/ : E4-NEED-R cp -r $DIRALIAS to/missing/ : to/missing/file # multiple things to a missing target: should mkdir cp $DIRCAP/file $PARENTCAP/dir2/file2 to/missing : to/missing/file,to/missing/file2 cp -r $DIRCAP/file $PARENTCAP/dir2/file2 to/missing : to/missing/file,to/missing/file2 cp $DIRCAP/file $FILECAP to/missing : E2-DESTNAME cp -r $DIRCAP/file $FILECAP to/missing : E2-DESTNAME cp $DIRCAP $FILECAP to/missing : E4-NEED-R cp -r $DIRCAP $FILECAP to/missing : E2-DESTNAME # namedfile, unnameddir, nameddir cp $PARENTCAP/dir3/file3 $DIRCAP $PARENTCAP/dir2 to/missing : E4-NEED-R cp -r $PARENTCAP/dir3/file3 $DIRCAP $PARENTCAP/dir2 to/missing : to/missing/file3,to/missing/file,to/missing/dir2/file2 # namedfile, unnameddir, nameddir, unnamedfile cp $PARENTCAP/dir3/file3 $DIRCAP $PARENTCAP/dir2 $FILECAP to/missing : E4-NEED-R cp -r $PARENTCAP/dir3/file3 $DIRCAP $PARENTCAP/dir2 $FILECAP to/missing : E2-DESTNAME cp $DIRCAP/file $PARENTCAP/dir2/file2 to/missing/ : to/missing/file,to/missing/file2 cp -r $DIRCAP/file $PARENTCAP/dir2/file2 to/missing/ : to/missing/file,to/missing/file2 cp $DIRCAP/file $FILECAP to/missing/ : E2-DESTNAME cp -r $DIRCAP/file $FILECAP to/missing/ : E2-DESTNAME cp $DIRCAP $FILECAP to/missing/ : E4-NEED-R cp -r $DIRCAP $FILECAP to/missing/ : E2-DESTNAME # namedfile, unnameddir, nameddir cp $PARENTCAP/dir3/file3 $DIRCAP $PARENTCAP/dir2 to/missing/ : E4-NEED-R cp -r $PARENTCAP/dir3/file3 $DIRCAP $PARENTCAP/dir2 to/missing/ : to/missing/file3,to/missing/file,to/missing/dir2/file2 # namedfile, unnameddir, nameddir, unnamedfile cp $PARENTCAP/dir3/file3 $DIRCAP $PARENTCAP/dir2 $FILECAP to/missing/ : E4-NEED-R cp -r $PARENTCAP/dir3/file3 $DIRCAP $PARENTCAP/dir2 $FILECAP to/missing/ : E2-DESTNAME # make sure empty directories are copied too cp -r $PARENTCAP/dir4 to : to/dir4/emptydir/ cp -r $PARENTCAP/dir4 to/ : to/dir4/emptydir/ # name collisions should cause errors, not overwrites cp -r $PARENTCAP/dir6/dir $PARENTCAP/dir5/dir to : E9-COLLIDING-TARGETS cp -r $PARENTCAP/dir5/dir $PARENTCAP/dir6/dir to : E9-COLLIDING-TARGETS cp -r $DIRCAP6 $DIRCAP5 to : E9-COLLIDING-TARGETS cp -r $DIRCAP5 $DIRCAP6 to : E9-COLLIDING-TARGETS """ class CopyOut(GridTestMixin, CLITestMixin, unittest.TestCase): FILE_CONTENTS = b"file text" FILE_CONTENTS_5 = b"5" FILE_CONTENTS_6 = b"6" def do_setup(self): # first we build a tahoe filesystem that contains: # $PARENTCAP # $PARENTCAP/dir == $DIRCAP == alias: # $PARENTCAP/dir/file == $FILECAP # $PARENTCAP/dir2 (named directory) # $PARENTCAP/dir2/file2 # $PARENTCAP/dir3/file3 (a second named file) # $PARENTCAP/dir4 # $PARENTCAP/dir4/emptydir/ (an empty directory) # $PARENTCAP/dir5 == $DIRCAP5 # $PARENTCAP/dir5/dir/collide (contents are "5") # $PARENTCAP/dir6 == $DIRCAP6 # $PARENTCAP/dir6/dir/collide (contents are "6") source_file = os.path.join(self.basedir, "file") fileutil.write(source_file, self.FILE_CONTENTS) source_file_5 = os.path.join(self.basedir, "file5") fileutil.write(source_file_5, self.FILE_CONTENTS_5) source_file_6 = os.path.join(self.basedir, "file6") fileutil.write(source_file_6, self.FILE_CONTENTS_6) d = self.do_cli("mkdir") def _stash_parentdircap(res): (rc, out, err) = res self.failUnlessEqual(rc, 0, str(res)) self.failUnlessEqual(err, "", str(res)) self.PARENTCAP = out.strip() return self.do_cli("mkdir", "%s/dir" % self.PARENTCAP) d.addCallback(_stash_parentdircap) def _stash_dircap(res): (rc, out, err) = res self.failUnlessEqual(rc, 0, str(res)) self.failUnlessEqual(err, "", str(res)) self.DIRCAP = out.strip() return self.do_cli("add-alias", "ALIAS", self.DIRCAP) d.addCallback(_stash_dircap) d.addCallback(lambda ign: self.do_cli("put", source_file, "%s/dir/file" % self.PARENTCAP)) def _stash_filecap(res): (rc, out, err) = res self.failUnlessEqual(rc, 0, str(res)) self.failUnlessEqual(err.strip(), "201 Created", str(res)) self.FILECAP = out.strip() assert self.FILECAP.startswith("URI:LIT:") d.addCallback(_stash_filecap) d.addCallback(lambda ign: self.do_cli("mkdir", "%s/dir2" % self.PARENTCAP)) d.addCallback(lambda ign: self.do_cli("put", source_file, "%s/dir2/file2" % self.PARENTCAP)) d.addCallback(lambda ign: self.do_cli("mkdir", "%s/dir3" % self.PARENTCAP)) d.addCallback(lambda ign: self.do_cli("put", source_file, "%s/dir3/file3" % self.PARENTCAP)) d.addCallback(lambda ign: self.do_cli("mkdir", "%s/dir4" % self.PARENTCAP)) d.addCallback(lambda ign: self.do_cli("mkdir", "%s/dir4/emptydir" % self.PARENTCAP)) d.addCallback(lambda ign: self.do_cli("mkdir", "%s/dir5" % self.PARENTCAP)) def _stash_dircap_5(res): (rc, out, err) = res self.failUnlessEqual(rc, 0, str(res)) self.failUnlessEqual(err, "", str(res)) self.DIRCAP5 = out.strip() d.addCallback(_stash_dircap_5) d.addCallback(lambda ign: self.do_cli("mkdir", "%s/dir5/dir" % self.PARENTCAP)) d.addCallback(lambda ign: self.do_cli("put", source_file_5, "%s/dir5/dir/collide" % self.PARENTCAP)) d.addCallback(lambda ign: self.do_cli("mkdir", "%s/dir6" % self.PARENTCAP)) def _stash_dircap_6(res): (rc, out, err) = res self.failUnlessEqual(rc, 0, str(res)) self.failUnlessEqual(err, "", str(res)) self.DIRCAP6 = out.strip() d.addCallback(_stash_dircap_6) d.addCallback(lambda ign: self.do_cli("mkdir", "%s/dir6/dir" % self.PARENTCAP)) d.addCallback(lambda ign: self.do_cli("put", source_file_6, "%s/dir6/dir/collide" % self.PARENTCAP)) return d def check_output(self): # locate the files and directories created (if any) under to/ top = os.path.join(self.basedir, "to") results = set() for (dirpath, dirnames, filenames) in os.walk(top): assert dirpath.startswith(top) here = "/".join(dirpath.split(os.sep)[len(top.split(os.sep))-1:]) results.add(here+"/") for fn in filenames: contents = fileutil.read(os.path.join(dirpath, fn)) if contents == self.FILE_CONTENTS: results.add("%s/%s" % (here, fn)) elif contents == self.FILE_CONTENTS_5: results.add("%s/%s=5" % (here, fn)) elif contents == self.FILE_CONTENTS_6: results.add("%s/%s=6" % (here, fn)) return results def run_one_case(self, case): cmd = (case .replace("$PARENTCAP", self.PARENTCAP) .replace("$DIRCAP5", self.DIRCAP5) .replace("$DIRCAP6", self.DIRCAP6) .replace("$DIRCAP", self.DIRCAP) .replace("$DIRALIAS", "ALIAS:") .replace("$FILECAP", self.FILECAP) .split()) target = cmd[-1] _assert(target == "to" or target.startswith("to/"), target) cmd[-1] = os.path.abspath(os.path.join(self.basedir, cmd[-1])) # reset targetdir = os.path.abspath(os.path.join(self.basedir, "to")) fileutil.rm_dir(targetdir) os.mkdir(targetdir) if target.rstrip("/") == "to/existing-file": fileutil.write(cmd[-1], "existing file contents\n") # The abspath() for cmd[-1] strips a trailing slash, and we want to # test what happens when it is present. So put it back. if target.endswith("/"): cmd[-1] += "/" d = self.do_cli(*cmd) def _check(res): (rc, out, err) = res err = err.strip() if rc == 0: return self.check_output() if rc == 1: self.failUnlessEqual(out, "", str(res)) if "when copying into a directory, all source files must have names, but" in err: return set(["E2-DESTNAME"]) if err == "cannot copy directories without --recursive": return set(["E4-NEED-R"]) if err == "cannot copy directory into a file": return set(["E5-DIRTOFILE"]) if err == "copying multiple things requires target be a directory": return set(["E6-MANYONE"]) if err == "target is not a directory, but ends with a slash": return set(["E7-BADSLASH"]) if (err.startswith("source ") and "is not a directory, but ends with a slash" in err): return set(["E8-BADSLASH"]) if err == "cannot copy multiple files with the same name into the same target directory": return set(["E9-COLLIDING-TARGETS"]) self.fail("unrecognized error ('%s') %s" % (case, res)) d.addCallback(_check) return d def do_one_test(self, case, orig_expected): expected = set(orig_expected) printable_expected = ",".join(sorted(expected)) #print("---", case, ":", printable_expected) for f in orig_expected: # f is "dir/file" or "dir/sub/file" or "dir/" or "dir/sub/" # we want all parent directories in the set, with trailing / pieces = f.rstrip("/").split("/") for i in range(1,len(pieces)): parent = "/".join(pieces[:i]) expected.add(parent+"/") d = self.run_one_case(case) def _dump(got): ok = "ok" if got == expected else "FAIL" printable_got = ",".join(sorted(got)) print("%-31s: got %-19s, want %-19s %s" % (case, printable_got, printable_expected, ok)) return got #d.addCallback(_dump) def _check(got): self.failUnlessEqual(got, expected, case) d.addCallback(_check) return d def do_tests(self): # then we run various forms of "cp [-r] TAHOETHING to[/missing]" # and see what happens. d = defer.succeed(None) #print() for line in COPYOUT_TESTCASES.splitlines(): if "#" in line: line = line[:line.find("#")] line = line.strip() if not line: continue case, expected = line.split(":") case = case.strip() expected = frozenset(expected.strip().split(",")) d.addCallback(lambda ign, case=case, expected=expected: self.do_one_test(case, expected)) return d def test_cp_out(self): # test copying all sorts of things out of a tahoe filesystem self.basedir = "cli_cp/CopyOut/cp_out" self.set_up_grid(num_servers=1, oneshare=True) d = self.do_setup() d.addCallback(lambda ign: self.do_tests()) return d tahoe_lafs-1.20.0/src/allmydata/test/cli/test_create.py0000644000000000000000000005557313615410400017777 0ustar00""" Ported to Python 3. """ from __future__ import annotations import os from typing import Any from twisted.trial import unittest from twisted.internet import defer, reactor from twisted.python import usage from allmydata.util import configutil from allmydata.util import tor_provider, i2p_provider from ..common_util import run_cli, parse_cli from ..common import ( disable_modules, ) from ...scripts import create_node from ...listeners import ListenerConfig, StaticProvider from ... import client def read_config(basedir): tahoe_cfg = os.path.join(basedir, "tahoe.cfg") config = configutil.get_config(tahoe_cfg) return config class MergeConfigTests(unittest.TestCase): """ Tests for ``create_node.merge_config``. """ def test_disable_left(self) -> None: """ If the left argument to ``create_node.merge_config`` is ``None`` then the return value is ``None``. """ conf = ListenerConfig([], [], {}) self.assertEqual(None, create_node.merge_config(None, conf)) def test_disable_right(self) -> None: """ If the right argument to ``create_node.merge_config`` is ``None`` then the return value is ``None``. """ conf = ListenerConfig([], [], {}) self.assertEqual(None, create_node.merge_config(conf, None)) def test_disable_both(self) -> None: """ If both arguments to ``create_node.merge_config`` are ``None`` then the return value is ``None``. """ self.assertEqual(None, create_node.merge_config(None, None)) def test_overlapping_keys(self) -> None: """ If there are any keys in the ``node_config`` of the left and right parameters that are shared then ``ValueError`` is raised. """ left = ListenerConfig([], [], {"foo": [("b", "ar")]}) right = ListenerConfig([], [], {"foo": [("ba", "z")]}) self.assertRaises(ValueError, lambda: create_node.merge_config(left, right)) def test_merge(self) -> None: """ ``create_node.merge_config`` returns a ``ListenerConfig`` that has all of the ports, locations, and node config from each of the two ``ListenerConfig`` values given. """ left = ListenerConfig( ["left-port"], ["left-location"], {"left": [("f", "oo")]}, ) right = ListenerConfig( ["right-port"], ["right-location"], {"right": [("ba", "r")]}, ) result = create_node.merge_config(left, right) self.assertEqual( ListenerConfig( ["left-port", "right-port"], ["left-location", "right-location"], {"left": [("f", "oo")], "right": [("ba", "r")]}, ), result, ) class Config(unittest.TestCase): def test_client_unrecognized_options(self): tests = [ ("--listen", "create-client", "--listen=tcp"), ("--hostname", "create-client", "--hostname=computer"), ("--port", "create-client", "--port=unix:/var/tahoe/socket", "--location=tor:myservice.onion:12345"), ("--port", "create-client", "--port=unix:/var/tahoe/socket"), ("--location", "create-client", "--location=tor:myservice.onion:12345"), ("--listen", "create-client", "--listen=tor"), ("--listen", "create-client", "--listen=i2p"), ] for test in tests: option = test[0] verb = test[1] args = test[2:] e = self.assertRaises(usage.UsageError, parse_cli, verb, *args) self.assertIn("option %s not recognized" % (option,), str(e)) async def test_create_client_config(self): """ ``create_node.write_client_config`` writes a configuration file that can be parsed. TODO Maybe we should test that we can recover the given configuration from the parse, too. """ d = self.mktemp() os.mkdir(d) fname = os.path.join(d, 'tahoe.cfg') with open(fname, 'w') as f: opts = {"nickname": "nick", "webport": "tcp:3456", "hide-ip": False, "listen": "none", "shares-needed": "1", "shares-happy": "1", "shares-total": "1", } await create_node.write_node_config(f, opts) create_node.write_client_config(f, opts) # should succeed, no exceptions client.read_config(d, "") @defer.inlineCallbacks def test_client(self): basedir = self.mktemp() rc, out, err = yield run_cli("create-client", basedir) cfg = read_config(basedir) self.assertEqual(cfg.getboolean("node", "reveal-IP-address"), True) self.assertEqual(cfg.get("node", "tub.port"), "disabled") self.assertEqual(cfg.get("node", "tub.location"), "disabled") self.assertFalse(cfg.has_section("connections")) @defer.inlineCallbacks def test_non_default_storage_args(self): basedir = self.mktemp() rc, out, err = yield run_cli( "create-client", '--shares-total', '19', '--shares-needed', '2', '--shares-happy', '11', basedir, ) cfg = read_config(basedir) self.assertEqual(2, cfg.getint("client", "shares.needed")) self.assertEqual(11, cfg.getint("client", "shares.happy")) self.assertEqual(19, cfg.getint("client", "shares.total")) @defer.inlineCallbacks def test_illegal_shares_total(self): basedir = self.mktemp() rc, out, err = yield run_cli( "create-client", '--shares-total', 'funballs', basedir, ) self.assertNotEqual(0, rc) self.assertTrue('--shares-total must be an integer' in err + out) @defer.inlineCallbacks def test_client_hide_ip_no_i2p_txtorcon(self): """ The ``create-client`` sub-command tells the user to install the necessary dependencies if they have neither tor nor i2p support installed and they request network location privacy with the ``--hide-ip`` flag. """ with disable_modules("txi2p", "txtorcon"): basedir = self.mktemp() rc, out, err = yield run_cli("create-client", "--hide-ip", basedir) self.assertTrue(rc != 0, out) self.assertTrue('pip install tahoe-lafs[i2p]' in out) self.assertTrue('pip install tahoe-lafs[tor]' in out) @defer.inlineCallbacks def test_client_i2p_option_no_txi2p(self): with disable_modules("txi2p"): basedir = self.mktemp() rc, out, err = yield run_cli("create-node", "--listen=i2p", "--i2p-launch", basedir) self.assertTrue(rc != 0) self.assertTrue("Specifying any I2P options requires the 'txi2p' module" in out) @defer.inlineCallbacks def test_client_tor_option_no_txtorcon(self): with disable_modules("txtorcon"): basedir = self.mktemp() rc, out, err = yield run_cli("create-node", "--listen=tor", "--tor-launch", basedir) self.assertTrue(rc != 0) self.assertTrue("Specifying any Tor options requires the 'txtorcon' module" in out) @defer.inlineCallbacks def test_client_hide_ip(self): basedir = self.mktemp() rc, out, err = yield run_cli("create-client", "--hide-ip", basedir) self.assertEqual(0, rc) cfg = read_config(basedir) self.assertEqual(cfg.getboolean("node", "reveal-IP-address"), False) self.assertEqual(cfg.get("connections", "tcp"), "tor") @defer.inlineCallbacks def test_client_hide_ip_no_txtorcon(self): with disable_modules("txtorcon"): basedir = self.mktemp() rc, out, err = yield run_cli("create-client", "--hide-ip", basedir) self.assertEqual(0, rc) cfg = read_config(basedir) self.assertEqual(cfg.getboolean("node", "reveal-IP-address"), False) self.assertEqual(cfg.get("connections", "tcp"), "disabled") @defer.inlineCallbacks def test_client_basedir_exists(self): basedir = self.mktemp() os.mkdir(basedir) with open(os.path.join(basedir, "foo"), "w") as f: f.write("blocker") rc, out, err = yield run_cli("create-client", basedir) self.assertEqual(rc, -1) self.assertIn(basedir, err) self.assertIn("is not empty", err) self.assertIn("To avoid clobbering anything, I am going to quit now", err) @defer.inlineCallbacks def test_node(self): basedir = self.mktemp() rc, out, err = yield run_cli("create-node", "--hostname=foo", basedir) cfg = read_config(basedir) self.assertEqual(cfg.getboolean("node", "reveal-IP-address"), True) self.assertFalse(cfg.has_section("connections")) @defer.inlineCallbacks def test_storage_dir(self): basedir = self.mktemp() rc, out, err = yield run_cli("create-node", "--storage-dir", "/tmp/storage", "--hostname=foo", basedir) cfg = read_config(basedir) self.assertEqual(cfg.get("storage", "storage_dir"), "/tmp/storage") @defer.inlineCallbacks def test_node_hide_ip(self): basedir = self.mktemp() rc, out, err = yield run_cli("create-node", "--hide-ip", "--hostname=foo", basedir) cfg = read_config(basedir) self.assertEqual(cfg.getboolean("node", "reveal-IP-address"), False) self.assertEqual(cfg.get("connections", "tcp"), "tor") @defer.inlineCallbacks def test_node_hostname(self): basedir = self.mktemp() rc, out, err = yield run_cli("create-node", "--hostname=computer", basedir) cfg = read_config(basedir) port = cfg.get("node", "tub.port") location = cfg.get("node", "tub.location") self.assertRegex(port, r'^tcp:\d+$') self.assertRegex(location, r'^tcp:computer:\d+$') @defer.inlineCallbacks def test_node_port_location(self): basedir = self.mktemp() rc, out, err = yield run_cli("create-node", "--port=unix:/var/tahoe/socket", "--location=tor:myservice.onion:12345", basedir) cfg = read_config(basedir) self.assertEqual(cfg.get("node", "tub.location"), "tor:myservice.onion:12345") self.assertEqual(cfg.get("node", "tub.port"), "unix:/var/tahoe/socket") def test_node_hostname_port_location(self): basedir = self.mktemp() e = self.assertRaises(usage.UsageError, parse_cli, "create-node", "--listen=tcp", "--hostname=foo", "--port=bar", "--location=baz", basedir) self.assertEqual(str(e), "--hostname cannot be used with --location/--port") def test_node_listen_tcp_no_hostname(self): basedir = self.mktemp() e = self.assertRaises(usage.UsageError, parse_cli, "create-node", "--listen=tcp", basedir) self.assertIn("--listen=tcp requires --hostname=", str(e)) @defer.inlineCallbacks def test_node_listen_none(self): basedir = self.mktemp() rc, out, err = yield run_cli("create-node", "--listen=none", basedir) cfg = read_config(basedir) self.assertEqual(cfg.get("node", "tub.port"), "disabled") self.assertEqual(cfg.get("node", "tub.location"), "disabled") def test_node_listen_none_errors(self): basedir = self.mktemp() e = self.assertRaises(usage.UsageError, parse_cli, "create-node", "--listen=none", "--hostname=foo", basedir) self.assertEqual(str(e), "--hostname cannot be used when --listen=none") e = self.assertRaises(usage.UsageError, parse_cli, "create-node", "--listen=none", "--port=foo", "--location=foo", basedir) self.assertEqual(str(e), "--port/--location cannot be used when --listen=none") e = self.assertRaises(usage.UsageError, parse_cli, "create-node", "--listen=tcp,none", basedir) self.assertEqual(str(e), "--listen=tcp requires --hostname=") def test_node_listen_bad(self): basedir = self.mktemp() e = self.assertRaises(usage.UsageError, parse_cli, "create-node", "--listen=XYZZY,tcp", basedir) self.assertEqual(str(e), "--listen= must be one/some of: i2p, none, tcp, tor") def test_node_listen_tor_hostname(self): e = self.assertRaises(usage.UsageError, parse_cli, "create-node", "--listen=tor", "--hostname=foo") self.assertEqual(str(e), "--listen= must be tcp to use --hostname") def test_node_port_only(self): e = self.assertRaises(usage.UsageError, parse_cli, "create-node", "--port=unix:/var/tahoe/socket") self.assertEqual(str(e), "--port must be used with --location") def test_node_location_only(self): e = self.assertRaises(usage.UsageError, parse_cli, "create-node", "--location=tor:myservice.onion:12345") self.assertEqual(str(e), "--location must be used with --port") @defer.inlineCallbacks def test_node_basedir_exists(self): basedir = self.mktemp() os.mkdir(basedir) with open(os.path.join(basedir, "foo"), "w") as f: f.write("blocker") rc, out, err = yield run_cli("create-node", "--hostname=foo", basedir) self.assertEqual(rc, -1) self.assertIn(basedir, err) self.assertIn("is not empty", err) self.assertIn("To avoid clobbering anything, I am going to quit now", err) @defer.inlineCallbacks def test_node_slow(self): """ A node can be created using a listener type that returns an unfired Deferred from its ``create_config`` method. """ d = defer.Deferred() slow = StaticProvider(True, False, d, None) create_node._LISTENERS["xxyzy"] = slow self.addCleanup(lambda: create_node._LISTENERS.pop("xxyzy")) basedir = self.mktemp() d2 = run_cli("create-node", "--listen=xxyzy", basedir) d.callback(None) rc, out, err = yield d2 self.assertEqual(rc, 0) self.assertIn("Node created", out) self.assertEqual(err, "") def test_introducer_no_hostname(self): basedir = self.mktemp() e = self.assertRaises(usage.UsageError, parse_cli, "create-introducer", basedir) self.assertEqual(str(e), "--listen=tcp requires --hostname=") @defer.inlineCallbacks def test_introducer_hide_ip(self): basedir = self.mktemp() rc, out, err = yield run_cli("create-introducer", "--hide-ip", "--hostname=foo", basedir) cfg = read_config(basedir) self.assertEqual(cfg.getboolean("node", "reveal-IP-address"), False) @defer.inlineCallbacks def test_introducer_hostname(self): basedir = self.mktemp() rc, out, err = yield run_cli("create-introducer", "--hostname=foo", basedir) cfg = read_config(basedir) self.assertTrue("foo" in cfg.get("node", "tub.location")) self.assertEqual(cfg.getboolean("node", "reveal-IP-address"), True) @defer.inlineCallbacks def test_introducer_basedir_exists(self): basedir = self.mktemp() os.mkdir(basedir) with open(os.path.join(basedir, "foo"), "w") as f: f.write("blocker") rc, out, err = yield run_cli("create-introducer", "--hostname=foo", basedir) self.assertEqual(rc, -1) self.assertIn(basedir, err) self.assertIn("is not empty", err) self.assertIn("To avoid clobbering anything, I am going to quit now", err) def fake_config(testcase: unittest.TestCase, module: Any, result: Any) -> list[tuple]: """ Monkey-patch a fake configuration function into the given module. :param testcase: The test case to use to do the monkey-patching. :param module: The module into which to patch the fake function. :param result: The return value for the fake function. :return: A list of tuples of the arguments the fake function was called with. """ calls = [] def fake_config(reactor, cli_config): calls.append((reactor, cli_config)) return result testcase.patch(module, "create_config", fake_config) return calls class Tor(unittest.TestCase): def test_default(self): basedir = self.mktemp() tor_config = {"tor": [("abc", "def")]} tor_port = "ghi" tor_location = "jkl" config_d = defer.succeed( ListenerConfig([tor_port], [tor_location], tor_config) ) calls = fake_config(self, tor_provider, config_d) rc, out, err = self.successResultOf( run_cli("create-node", "--listen=tor", basedir), ) self.assertEqual(len(calls), 1) args = calls[0] self.assertIdentical(args[0], reactor) self.assertIsInstance(args[1], create_node.CreateNodeOptions) self.assertEqual(args[1]["listen"], "tor") cfg = read_config(basedir) self.assertEqual(cfg.get("tor", "abc"), "def") self.assertEqual(cfg.get("node", "tub.port"), "ghi") self.assertEqual(cfg.get("node", "tub.location"), "jkl") def test_launch(self): """ The ``--tor-launch`` command line option sets ``tor-launch`` to ``True``. """ basedir = self.mktemp() config_d = defer.succeed(None) calls = fake_config(self, tor_provider, config_d) rc, out, err = self.successResultOf( run_cli( "create-node", "--listen=tor", "--tor-launch", basedir, ), ) args = calls[0] self.assertEqual(args[1]["listen"], "tor") self.assertEqual(args[1]["tor-launch"], True) self.assertEqual(args[1]["tor-control-port"], None) def test_control_port(self): """ The ``--tor-control-port`` command line parameter's value is passed along as the ``tor-control-port`` value. """ basedir = self.mktemp() config_d = defer.succeed(None) calls = fake_config(self, tor_provider, config_d) rc, out, err = self.successResultOf( run_cli( "create-node", "--listen=tor", "--tor-control-port=mno", basedir, ), ) args = calls[0] self.assertEqual(args[1]["listen"], "tor") self.assertEqual(args[1]["tor-launch"], False) self.assertEqual(args[1]["tor-control-port"], "mno") def test_not_both(self): e = self.assertRaises(usage.UsageError, parse_cli, "create-node", "--listen=tor", "--tor-launch", "--tor-control-port=foo") self.assertEqual(str(e), "use either --tor-launch or" " --tor-control-port=, not both") def test_launch_without_listen(self): e = self.assertRaises(usage.UsageError, parse_cli, "create-node", "--listen=none", "--tor-launch") self.assertEqual(str(e), "--tor-launch requires --listen=tor") def test_control_port_without_listen(self): e = self.assertRaises(usage.UsageError, parse_cli, "create-node", "--listen=none", "--tor-control-port=foo") self.assertEqual(str(e), "--tor-control-port= requires --listen=tor") class I2P(unittest.TestCase): def test_default(self): basedir = self.mktemp() i2p_config = {"i2p": [("abc", "def")]} i2p_port = "ghi" i2p_location = "jkl" dest_d = defer.succeed(ListenerConfig([i2p_port], [i2p_location], i2p_config)) calls = fake_config(self, i2p_provider, dest_d) rc, out, err = self.successResultOf( run_cli("create-node", "--listen=i2p", basedir), ) self.assertEqual(len(calls), 1) args = calls[0] self.assertIdentical(args[0], reactor) self.assertIsInstance(args[1], create_node.CreateNodeOptions) self.assertEqual(args[1]["listen"], "i2p") cfg = read_config(basedir) self.assertEqual(cfg.get("i2p", "abc"), "def") self.assertEqual(cfg.get("node", "tub.port"), "ghi") self.assertEqual(cfg.get("node", "tub.location"), "jkl") def test_launch(self): e = self.assertRaises(usage.UsageError, parse_cli, "create-node", "--listen=i2p", "--i2p-launch") self.assertEqual(str(e), "--i2p-launch is under development") def test_sam_port(self): basedir = self.mktemp() dest_d = defer.succeed(None) calls = fake_config(self, i2p_provider, dest_d) rc, out, err = self.successResultOf( run_cli( "create-node", "--listen=i2p", "--i2p-sam-port=mno", basedir, ), ) args = calls[0] self.assertEqual(args[1]["listen"], "i2p") self.assertEqual(args[1]["i2p-launch"], False) self.assertEqual(args[1]["i2p-sam-port"], "mno") def test_not_both(self): e = self.assertRaises(usage.UsageError, parse_cli, "create-node", "--listen=i2p", "--i2p-launch", "--i2p-sam-port=foo") self.assertEqual(str(e), "use either --i2p-launch or" " --i2p-sam-port=, not both") def test_launch_without_listen(self): e = self.assertRaises(usage.UsageError, parse_cli, "create-node", "--listen=none", "--i2p-launch") self.assertEqual(str(e), "--i2p-launch requires --listen=i2p") def test_sam_port_without_listen(self): e = self.assertRaises(usage.UsageError, parse_cli, "create-node", "--listen=none", "--i2p-sam-port=foo") self.assertEqual(str(e), "--i2p-sam-port= requires --listen=i2p") tahoe_lafs-1.20.0/src/allmydata/test/cli/test_create_alias.py0000644000000000000000000002431313615410400021134 0ustar00""" Ported to Python 3. """ from io import StringIO import os.path from twisted.trial import unittest from urllib.parse import quote as url_quote from allmydata.util import fileutil from allmydata.scripts.common import get_aliases from allmydata.scripts import cli, runner from ..no_network import GridTestMixin from allmydata.util.encodingutil import quote_output_u from .common import CLITestMixin class CreateAlias(GridTestMixin, CLITestMixin, unittest.TestCase): def _test_webopen(self, args, expected_url): o = runner.Options() o.parseOptions(["--node-directory", self.get_clientdir(), "webopen"] + list(args)) urls = [] o.subOptions.stdout = StringIO() o.subOptions.stderr = StringIO() o.subOptions.stdin = StringIO() rc = cli.webopen(o.subOptions, urls.append) self.failUnlessReallyEqual(rc, 0) self.failUnlessReallyEqual(len(urls), 1) self.assertEqual(urls[0], expected_url) def test_create(self): self.basedir = "cli/CreateAlias/create" self.set_up_grid(oneshare=True) aliasfile = os.path.join(self.get_clientdir(), "private", "aliases") d = self.do_cli("create-alias", "tahoe") def _done(args): (rc, stdout, stderr) = args self.assertEqual(stderr, "") self.assertIn("Alias 'tahoe' created", stdout) aliases = get_aliases(self.get_clientdir()) self.failUnless("tahoe" in aliases) self.failUnless(aliases["tahoe"].startswith(b"URI:DIR2:")) d.addCallback(_done) d.addCallback(lambda res: self.do_cli("create-alias", "two:")) def _stash_urls(res): aliases = get_aliases(self.get_clientdir()) node_url_file = os.path.join(self.get_clientdir(), "node.url") nodeurl = fileutil.read(node_url_file, mode="r").strip() self.welcome_url = nodeurl uribase = nodeurl + "uri/" self.tahoe_url = uribase + url_quote(aliases["tahoe"]) self.tahoe_subdir_url = self.tahoe_url + "/subdir" self.two_url = uribase + url_quote(aliases["two"]) self.two_uri = aliases["two"] d.addCallback(_stash_urls) d.addCallback(lambda res: self.do_cli("create-alias", "two")) # dup def _check_create_duplicate(args): (rc, stdout, stderr) = args self.failIfEqual(rc, 0) self.failUnless("Alias 'two' already exists!" in stderr) aliases = get_aliases(self.get_clientdir()) self.failUnlessReallyEqual(aliases["two"], self.two_uri) d.addCallback(_check_create_duplicate) d.addCallback(lambda res: self.do_cli("add-alias", "added", self.two_uri)) def _check_add(args): (rc, stdout, stderr) = args self.failUnlessReallyEqual(rc, 0) self.failUnless("Alias 'added' added" in stdout) d.addCallback(_check_add) # check add-alias with a duplicate d.addCallback(lambda res: self.do_cli("add-alias", "two", self.two_uri)) def _check_add_duplicate(args): (rc, stdout, stderr) = args self.failIfEqual(rc, 0) self.failUnless("Alias 'two' already exists!" in stderr) aliases = get_aliases(self.get_clientdir()) self.failUnlessReallyEqual(aliases["two"], self.two_uri) d.addCallback(_check_add_duplicate) # check create-alias and add-alias with invalid aliases def _check_invalid(args): (rc, stdout, stderr) = args self.failIfEqual(rc, 0) self.failUnlessIn("cannot contain", stderr) for invalid in ['foo:bar', 'foo bar', 'foobar::']: d.addCallback(lambda res, invalid=invalid: self.do_cli("create-alias", invalid)) d.addCallback(_check_invalid) d.addCallback(lambda res, invalid=invalid: self.do_cli("add-alias", invalid, self.two_uri)) d.addCallback(_check_invalid) def _test_urls(junk): self._test_webopen([], self.welcome_url) self._test_webopen(["/"], self.tahoe_url) self._test_webopen(["tahoe:"], self.tahoe_url) self._test_webopen(["tahoe:/"], self.tahoe_url) self._test_webopen(["tahoe:subdir"], self.tahoe_subdir_url) self._test_webopen(["-i", "tahoe:subdir"], self.tahoe_subdir_url+"?t=info") self._test_webopen(["tahoe:subdir/"], self.tahoe_subdir_url + '/') self._test_webopen(["tahoe:subdir/file"], self.tahoe_subdir_url + '/file') self._test_webopen(["--info", "tahoe:subdir/file"], self.tahoe_subdir_url + '/file?t=info') # if "file" is indeed a file, then the url produced by webopen in # this case is disallowed by the webui. but by design, webopen # passes through the mistake from the user to the resultant # webopened url self._test_webopen(["tahoe:subdir/file/"], self.tahoe_subdir_url + '/file/') self._test_webopen(["two:"], self.two_url) d.addCallback(_test_urls) def _remove_trailing_newline_and_create_alias(ign): # ticket #741 is about a manually-edited alias file (which # doesn't end in a newline) being corrupted by a subsequent # "tahoe create-alias" old = fileutil.read(aliasfile) fileutil.write(aliasfile, old.rstrip()) return self.do_cli("create-alias", "un-corrupted1") d.addCallback(_remove_trailing_newline_and_create_alias) def _check_not_corrupted1(args): (rc, stdout, stderr) = args self.failUnless("Alias 'un-corrupted1' created" in stdout, stdout) self.failIf(stderr) # the old behavior was to simply append the new record, causing a # line that looked like "NAME1: CAP1NAME2: CAP2". This won't look # like a valid dircap, so get_aliases() will raise an exception. aliases = get_aliases(self.get_clientdir()) self.failUnless("added" in aliases) self.failUnless(aliases["added"].startswith(b"URI:DIR2:")) # to be safe, let's confirm that we don't see "NAME2:" in CAP1. # No chance of a false-negative, because the hyphen in # "un-corrupted1" is not a valid base32 character. self.failIfIn(b"un-corrupted1:", aliases["added"]) self.failUnless("un-corrupted1" in aliases) self.failUnless(aliases["un-corrupted1"].startswith(b"URI:DIR2:")) d.addCallback(_check_not_corrupted1) def _remove_trailing_newline_and_add_alias(ign): # same thing, but for "tahoe add-alias" old = fileutil.read(aliasfile) fileutil.write(aliasfile, old.rstrip()) return self.do_cli("add-alias", "un-corrupted2", self.two_uri) d.addCallback(_remove_trailing_newline_and_add_alias) def _check_not_corrupted(args): (rc, stdout, stderr) = args self.failUnless("Alias 'un-corrupted2' added" in stdout, stdout) self.failIf(stderr) aliases = get_aliases(self.get_clientdir()) self.failUnless("un-corrupted1" in aliases) self.failUnless(aliases["un-corrupted1"].startswith(b"URI:DIR2:")) self.failIfIn(b"un-corrupted2:", aliases["un-corrupted1"]) self.failUnless("un-corrupted2" in aliases) self.failUnless(aliases["un-corrupted2"].startswith(b"URI:DIR2:")) d.addCallback(_check_not_corrupted) return d def test_create_unicode(self): self.basedir = "cli/CreateAlias/create_unicode" self.set_up_grid(oneshare=True) etudes_arg = u"\u00E9tudes" lumiere_arg = u"lumi\u00E8re.txt" d = self.do_cli("create-alias", etudes_arg) def _check_create_unicode(args): (rc, out, err) = args self.failUnlessReallyEqual(rc, 0) self.assertEqual(len(err), 0, err) self.failUnlessIn(u"Alias %s created" % (quote_output_u(etudes_arg),), out) aliases = get_aliases(self.get_clientdir()) self.failUnless(aliases[u"\u00E9tudes"].startswith(b"URI:DIR2:")) d.addCallback(_check_create_unicode) d.addCallback(lambda res: self.do_cli("ls", etudes_arg + ":")) def _check_ls1(args): (rc, out, err) = args self.failUnlessReallyEqual(rc, 0) self.assertEqual(len(err), 0, err) self.assertEqual(len(out), 0, out) d.addCallback(_check_ls1) DATA = b"Blah blah blah \xff blah \x00 blah" d.addCallback(lambda res: self.do_cli("put", "-", etudes_arg + ":uploaded.txt", stdin=DATA)) d.addCallback(lambda res: self.do_cli("ls", etudes_arg + ":")) def _check_ls2(args): (rc, out, err) = args self.failUnlessReallyEqual(rc, 0) self.assertEqual(len(err), 0, err) self.assertEqual(out, "uploaded.txt\n") d.addCallback(_check_ls2) d.addCallback(lambda res: self.do_cli("get", etudes_arg + ":uploaded.txt", return_bytes=True)) def _check_get(args): (rc, out, err) = args self.failUnlessReallyEqual(rc, 0) self.assertEqual(len(err), 0, err) self.failUnlessReallyEqual(out, DATA) d.addCallback(_check_get) # Ensure that an Unicode filename in an Unicode alias works as expected d.addCallback(lambda res: self.do_cli("put", "-", etudes_arg + ":" + lumiere_arg, stdin=b"Let the sunshine In!")) d.addCallback(lambda res: self.do_cli( "get", str(get_aliases(self.get_clientdir())[u"\u00E9tudes"], "ascii") + "/" + lumiere_arg, return_bytes=True)) def _check_get2(args): (rc, out, err) = args self.failUnlessReallyEqual(rc, 0) self.assertEqual(len(err), 0, err) self.failUnlessReallyEqual(out, b"Let the sunshine In!") d.addCallback(_check_get2) return d # TODO: test list-aliases, including Unicode tahoe_lafs-1.20.0/src/allmydata/test/cli/test_grid_manager.py0000644000000000000000000002650213615410400021141 0ustar00""" Tests for the grid manager CLI. """ import os from io import ( BytesIO, ) from unittest import ( skipIf, ) from twisted.trial.unittest import ( TestCase, ) from allmydata.cli.grid_manager import ( grid_manager, ) import click.testing # these imports support the tests for `tahoe *` subcommands from ..common_util import ( run_cli, ) from ..common import ( superuser, ) from twisted.internet.defer import ( inlineCallbacks, ) from twisted.python.filepath import ( FilePath, ) from twisted.python.runtime import ( platform, ) from allmydata.util import jsonbytes as json class GridManagerCommandLine(TestCase): """ Test the mechanics of the `grid-manager` command """ def setUp(self): self.runner = click.testing.CliRunner() super(GridManagerCommandLine, self).setUp() def invoke_and_check(self, *args, **kwargs): """Invoke a command with the runner and ensure it succeeded.""" result = self.runner.invoke(*args, **kwargs) if result.exception is not None: raise result.exc_info[1].with_traceback(result.exc_info[2]) self.assertEqual(result.exit_code, 0, result) return result def test_create(self): """ Create a new grid-manager """ with self.runner.isolated_filesystem(): result = self.invoke_and_check(grid_manager, ["--config", "foo", "create"]) self.assertEqual(["foo"], os.listdir(".")) self.assertEqual(["config.json"], os.listdir("./foo")) result = self.invoke_and_check(grid_manager, ["--config", "foo", "public-identity"]) self.assertTrue(result.output.startswith("pub-v0-")) def test_load_invalid(self): """ An invalid config is reported to the user """ with self.runner.isolated_filesystem(): with open("config.json", "wb") as f: f.write(json.dumps_bytes({"not": "valid"})) result = self.runner.invoke(grid_manager, ["--config", ".", "public-identity"]) self.assertNotEqual(result.exit_code, 0) self.assertIn( "Error loading Grid Manager", result.output, ) def test_create_already(self): """ It's an error to create a new grid-manager in an existing directory. """ with self.runner.isolated_filesystem(): result = self.invoke_and_check(grid_manager, ["--config", "foo", "create"]) result = self.runner.invoke(grid_manager, ["--config", "foo", "create"]) self.assertEqual(1, result.exit_code) self.assertIn( "Can't create", result.stdout, ) def test_create_stdout(self): """ Create a new grid-manager with no files """ with self.runner.isolated_filesystem(): result = self.invoke_and_check(grid_manager, ["--config", "-", "create"]) self.assertEqual([], os.listdir(".")) config = json.loads(result.output) self.assertEqual( {"private_key", "grid_manager_config_version"}, set(config.keys()), ) def test_list_stdout(self): """ Load Grid Manager without files (using 'list' subcommand, but any will do) """ config = { "storage_servers": { "storage0": { "public_key": "pub-v0-cbq6hcf3pxcz6ouoafrbktmkixkeuywpcpbcomzd3lqbkq4nmfga" } }, "private_key": "priv-v0-6uinzyaxy3zvscwgsps5pxcfezhrkfb43kvnrbrhhfzyduyqnniq", "grid_manager_config_version": 0 } result = self.invoke_and_check( grid_manager, ["--config", "-", "list"], input=BytesIO(json.dumps_bytes(config)), ) self.assertEqual(result.exit_code, 0) self.assertEqual( "storage0: pub-v0-cbq6hcf3pxcz6ouoafrbktmkixkeuywpcpbcomzd3lqbkq4nmfga\n", result.output, ) def test_add_and_sign(self): """ Add a new storage-server and sign a certificate for it """ pubkey = "pub-v0-cbq6hcf3pxcz6ouoafrbktmkixkeuywpcpbcomzd3lqbkq4nmfga" with self.runner.isolated_filesystem(): self.invoke_and_check(grid_manager, ["--config", "foo", "create"]) self.invoke_and_check(grid_manager, ["--config", "foo", "add", "storage0", pubkey]) result = self.invoke_and_check(grid_manager, ["--config", "foo", "sign", "storage0", "10"]) sigcert = json.loads(result.output) self.assertEqual({"certificate", "signature"}, set(sigcert.keys())) cert = json.loads(sigcert['certificate']) self.assertEqual(cert["public_key"], pubkey) def test_add_and_sign_second_cert(self): """ Add a new storage-server and sign two certificates. """ pubkey = "pub-v0-cbq6hcf3pxcz6ouoafrbktmkixkeuywpcpbcomzd3lqbkq4nmfga" with self.runner.isolated_filesystem(): self.invoke_and_check(grid_manager, ["--config", "foo", "create"]) self.invoke_and_check(grid_manager, ["--config", "foo", "add", "storage0", pubkey]) self.invoke_and_check(grid_manager, ["--config", "foo", "sign", "storage0", "10"]) self.invoke_and_check(grid_manager, ["--config", "foo", "sign", "storage0", "10"]) # we should now have two certificates stored self.assertEqual( set(FilePath("foo").listdir()), {'storage0.cert.1', 'storage0.cert.0', 'config.json'}, ) def test_add_twice(self): """ An error is reported trying to add an existing server """ pubkey0 = "pub-v0-cbq6hcf3pxcz6ouoafrbktmkixkeuywpcpbcomzd3lqbkq4nmfga" pubkey1 = "pub-v0-5ysc55trfvfvg466v46j4zmfyltgus3y2gdejifctv7h4zkuyveq" with self.runner.isolated_filesystem(): self.invoke_and_check(grid_manager, ["--config", "foo", "create"]) self.invoke_and_check(grid_manager, ["--config", "foo", "add", "storage0", pubkey0]) result = self.runner.invoke(grid_manager, ["--config", "foo", "add", "storage0", pubkey1]) self.assertNotEquals(result.exit_code, 0) self.assertIn( "A storage-server called 'storage0' already exists", result.output, ) def test_add_list_remove(self): """ Add a storage server, list it, remove it. """ pubkey = "pub-v0-cbq6hcf3pxcz6ouoafrbktmkixkeuywpcpbcomzd3lqbkq4nmfga" with self.runner.isolated_filesystem(): self.invoke_and_check(grid_manager, ["--config", "foo", "create"]) self.invoke_and_check(grid_manager, ["--config", "foo", "add", "storage0", pubkey]) self.invoke_and_check(grid_manager, ["--config", "foo", "sign", "storage0", "1"]) result = self.invoke_and_check(grid_manager, ["--config", "foo", "list"]) names = [ line.split(':')[0] for line in result.output.strip().split('\n') if not line.startswith(" ") # "cert" lines start with whitespace ] self.assertEqual(names, ["storage0"]) self.invoke_and_check(grid_manager, ["--config", "foo", "remove", "storage0"]) result = self.invoke_and_check(grid_manager, ["--config", "foo", "list"]) self.assertEqual(result.output.strip(), "") def test_remove_missing(self): """ Error reported when removing non-existant server """ with self.runner.isolated_filesystem(): self.invoke_and_check(grid_manager, ["--config", "foo", "create"]) result = self.runner.invoke(grid_manager, ["--config", "foo", "remove", "storage0"]) self.assertNotEquals(result.exit_code, 0) self.assertIn( "No storage-server called 'storage0' exists", result.output, ) def test_sign_missing(self): """ Error reported when signing non-existant server """ with self.runner.isolated_filesystem(): self.invoke_and_check(grid_manager, ["--config", "foo", "create"]) result = self.runner.invoke(grid_manager, ["--config", "foo", "sign", "storage0", "42"]) self.assertNotEquals(result.exit_code, 0) self.assertIn( "No storage-server called 'storage0' exists", result.output, ) @skipIf(platform.isWindows(), "We don't know how to set permissions on Windows.") @skipIf(superuser, "cannot test as superuser with all permissions") def test_sign_bad_perms(self): """ Error reported if we can't create certificate file """ pubkey = "pub-v0-cbq6hcf3pxcz6ouoafrbktmkixkeuywpcpbcomzd3lqbkq4nmfga" with self.runner.isolated_filesystem(): self.invoke_and_check(grid_manager, ["--config", "foo", "create"]) self.invoke_and_check(grid_manager, ["--config", "foo", "add", "storage0", pubkey]) # make the directory un-writable (so we can't create a new cert) os.chmod("foo", 0o550) result = self.runner.invoke(grid_manager, ["--config", "foo", "sign", "storage0", "42"]) self.assertEquals(result.exit_code, 1) self.assertIn( "Permission denied", result.output, ) class TahoeAddGridManagerCert(TestCase): """ Test `tahoe admin add-grid-manager-cert` subcommand """ @inlineCallbacks def test_help(self): """ some kind of help is printed """ code, out, err = yield run_cli("admin", "add-grid-manager-cert") self.assertEqual(err, "") self.assertNotEqual(0, code) @inlineCallbacks def test_no_name(self): """ error to miss --name option """ code, out, err = yield run_cli( "admin", "add-grid-manager-cert", "--filename", "-", stdin=b"the cert", ) self.assertIn( "Must provide --name", out ) @inlineCallbacks def test_no_filename(self): """ error to miss --name option """ code, out, err = yield run_cli( "admin", "add-grid-manager-cert", "--name", "foo", stdin=b"the cert", ) self.assertIn( "Must provide --filename", out ) @inlineCallbacks def test_add_one(self): """ we can add a certificate """ nodedir = self.mktemp() fake_cert = b"""{"certificate": "", "signature": ""}""" code, out, err = yield run_cli( "--node-directory", nodedir, "admin", "add-grid-manager-cert", "-f", "-", "--name", "foo", stdin=fake_cert, ignore_stderr=True, ) nodepath = FilePath(nodedir) with nodepath.child("tahoe.cfg").open("r") as f: config_data = f.read() self.assertIn("tahoe.cfg", nodepath.listdir()) self.assertIn( b"foo = foo.cert", config_data, ) self.assertIn("foo.cert", nodepath.listdir()) with nodepath.child("foo.cert").open("r") as f: self.assertEqual( json.load(f), json.loads(fake_cert) ) tahoe_lafs-1.20.0/src/allmydata/test/cli/test_invite.py0000644000000000000000000003675113615410400020027 0ustar00""" Tests for ``tahoe invite``. """ from __future__ import annotations import json import os from functools import partial from os.path import join from typing import Callable, Optional, Sequence, TypeVar, Union, Coroutine, Any, Tuple, cast, Generator from twisted.internet import defer from twisted.trial import unittest from ...client import read_config from ...scripts import runner from ...util.jsonbytes import dumps_bytes from ..common_util import run_cli from ..no_network import GridTestMixin from .common import CLITestMixin from .wormholetesting import MemoryWormholeServer, TestingHelper, memory_server, IWormhole # Logically: # JSONable = dict[str, Union[JSONable, None, int, float, str, list[JSONable]]] # # But practically: JSONable = Union[dict, None, int, float, str, list] async def open_wormhole() -> tuple[Callable, IWormhole, str]: """ Create a new in-memory wormhole server, open one end of a wormhole, and return it and related info. :return: A three-tuple allowing use of the wormhole. The first element is a callable like ``run_cli`` but which will run commands so that they use the in-memory wormhole server instead of a real one. The second element is the open wormhole. The third element is the wormhole's code. """ server = MemoryWormholeServer() options = runner.Options() options.wormhole = server reactor = object() wormhole = server.create( "tahoe-lafs.org/invite", "ws://wormhole.tahoe-lafs.org:4000/v1", reactor, ) code = await wormhole.get_code() return (partial(run_cli, options=options), wormhole, code) def make_simple_peer( reactor, server: MemoryWormholeServer, helper: TestingHelper, messages: Sequence[JSONable], ) -> Callable[[], Coroutine[defer.Deferred[IWormhole], Any, IWormhole]]: """ Make a wormhole peer that just sends the given messages. The returned function returns an awaitable that fires with the peer's end of the wormhole. """ async def peer() -> IWormhole: # Run the client side of the invitation by manually pumping a # message through the wormhole. # First, wait for the server to create the wormhole at all. wormhole = await helper.wait_for_wormhole( "tahoe-lafs.org/invite", "ws://wormhole.tahoe-lafs.org:4000/v1", ) # Then read out its code and open the other side of the wormhole. code = await wormhole.when_code() other_end = server.create( "tahoe-lafs.org/invite", "ws://wormhole.tahoe-lafs.org:4000/v1", reactor, ) other_end.set_code(code) send_messages(other_end, messages) return other_end return peer def send_messages(wormhole: IWormhole, messages: Sequence[JSONable]) -> None: """ Send a list of message through a wormhole. """ for msg in messages: wormhole.send_message(dumps_bytes(msg)) A = TypeVar("A") B = TypeVar("B") def concurrently( client: Callable[[], Union[ Coroutine[defer.Deferred[A], Any, A], Generator[defer.Deferred[A], Any, A], ]], server: Callable[[], Union[ Coroutine[defer.Deferred[B], Any, B], Generator[defer.Deferred[B], Any, B], ]], ) -> defer.Deferred[Tuple[A, B]]: """ Run two asynchronous functions concurrently and asynchronously return a tuple of both their results. """ result = defer.gatherResults([ defer.Deferred.fromCoroutine(client()), defer.Deferred.fromCoroutine(server()), ]).addCallback(tuple) # type: ignore return cast(defer.Deferred[Tuple[A, B]], result) class Join(GridTestMixin, CLITestMixin, unittest.TestCase): @defer.inlineCallbacks def setUp(self): self.basedir = self.mktemp() yield super(Join, self).setUp() yield self.set_up_grid(oneshare=True) @defer.inlineCallbacks def test_create_node_join(self): """ successfully join after an invite """ node_dir = self.mktemp() run_cli, wormhole, code = yield defer.Deferred.fromCoroutine(open_wormhole()) send_messages(wormhole, [ {u"abilities": {u"server-v1": {}}}, { u"shares-needed": 1, u"shares-happy": 1, u"shares-total": 1, u"nickname": u"somethinghopefullyunique", u"introducer": u"pb://foo", }, ]) rc, out, err = yield run_cli( "create-client", "--join", code, node_dir, ) self.assertEqual(0, rc) config = read_config(node_dir, u"") self.assertIn( "pb://foo", set( furl for (furl, cache) in config.get_introducer_configuration().values() ), ) with open(join(node_dir, 'tahoe.cfg'), 'r') as f: config = f.read() self.assertIn(u"somethinghopefullyunique", config) @defer.inlineCallbacks def test_create_node_illegal_option(self): """ Server sends JSON with unknown/illegal key """ node_dir = self.mktemp() run_cli, wormhole, code = yield defer.Deferred.fromCoroutine(open_wormhole()) send_messages(wormhole, [ {u"abilities": {u"server-v1": {}}}, { u"shares-needed": 1, u"shares-happy": 1, u"shares-total": 1, u"nickname": u"somethinghopefullyunique", u"introducer": u"pb://foo", u"something-else": u"not allowed", }, ]) rc, out, err = yield run_cli( "create-client", "--join", code, node_dir, ) # should still succeed -- just ignores the not-whitelisted # "something-else" option self.assertEqual(0, rc) class Invite(GridTestMixin, CLITestMixin, unittest.TestCase): @defer.inlineCallbacks def setUp(self): self.basedir = self.mktemp() yield super(Invite, self).setUp() yield self.set_up_grid(oneshare=True) intro_dir = os.path.join(self.basedir, "introducer") yield run_cli( "create-introducer", "--listen", "none", intro_dir, ) async def _invite_success(self, extra_args: Sequence[bytes] = (), tahoe_config: Optional[bytes] = None) -> str: """ Exercise an expected-success case of ``tahoe invite``. :param extra_args: Positional arguments to pass to ``tahoe invite`` before the nickname. :param tahoe_config: If given, bytes to write to the node's ``tahoe.cfg`` before running ``tahoe invite. """ intro_dir = os.path.join(self.basedir, "introducer") # we've never run the introducer, so it hasn't created # introducer.furl yet priv_dir = join(intro_dir, "private") with open(join(priv_dir, "introducer.furl"), "w") as fobj_intro: fobj_intro.write("pb://fooblam\n") if tahoe_config is not None: assert isinstance(tahoe_config, bytes) with open(join(intro_dir, "tahoe.cfg"), "wb") as fobj_cfg: fobj_cfg.write(tahoe_config) wormhole_server, helper = memory_server() options = runner.Options() options.wormhole = wormhole_server reactor = object() async def server(): # Run the server side of the invitation process using the CLI. rc, out, err = await run_cli( "-d", intro_dir, "invite", *tuple(extra_args) + ("foo",), options=options, ) # Send a proper client abilities message. client = make_simple_peer(reactor, wormhole_server, helper, [{u"abilities": {u"client-v1": {}}}]) other_end, _ = await concurrently(client, server) # Check the server's messages. First, it should announce its # abilities correctly. server_abilities = json.loads(await other_end.when_received()) self.assertEqual( server_abilities, { "abilities": { "server-v1": {} }, }, ) # Second, it should have an invitation with a nickname and introducer # furl. invite = json.loads(await other_end.when_received()) self.assertEqual( invite["nickname"], "foo", ) self.assertEqual( invite["introducer"], "pb://fooblam", ) return invite @defer.inlineCallbacks def test_invite_success(self): """ successfully send an invite """ invite = yield defer.Deferred.fromCoroutine(self._invite_success(( "--shares-needed", "1", "--shares-happy", "2", "--shares-total", "3", ))) self.assertEqual( invite["shares-needed"], "1", ) self.assertEqual( invite["shares-happy"], "2", ) self.assertEqual( invite["shares-total"], "3", ) @defer.inlineCallbacks def test_invite_success_read_share_config(self): """ If ``--shares-{needed,happy,total}`` are not given on the command line then the invitation is generated using the configured values. """ invite = yield defer.Deferred.fromCoroutine(self._invite_success(tahoe_config=b""" [client] shares.needed = 2 shares.happy = 4 shares.total = 6 """)) self.assertEqual( invite["shares-needed"], "2", ) self.assertEqual( invite["shares-happy"], "4", ) self.assertEqual( invite["shares-total"], "6", ) @defer.inlineCallbacks def test_invite_no_furl(self): """ Invites must include the Introducer FURL """ intro_dir = os.path.join(self.basedir, "introducer") options = runner.Options() options.wormhole = None rc, out, err = yield run_cli( "-d", intro_dir, "invite", "--shares-needed", "1", "--shares-happy", "1", "--shares-total", "1", "foo", options=options, ) self.assertNotEqual(rc, 0) self.assertIn(u"Can't find introducer FURL", out + err) @defer.inlineCallbacks def test_invite_wrong_client_abilities(self): """ Send unknown client version """ intro_dir = os.path.join(self.basedir, "introducer") # we've never run the introducer, so it hasn't created # introducer.furl yet priv_dir = join(intro_dir, "private") with open(join(priv_dir, "introducer.furl"), "w") as f: f.write("pb://fooblam\n") wormhole_server, helper = memory_server() options = runner.Options() options.wormhole = wormhole_server reactor = object() async def server(): rc, out, err = await run_cli( "-d", intro_dir, "invite", "--shares-needed", "1", "--shares-happy", "1", "--shares-total", "1", "foo", options=options, ) self.assertNotEqual(rc, 0) self.assertIn(u"No 'client-v1' in abilities", out + err) # Send some surprising client abilities. client = make_simple_peer(reactor, wormhole_server, helper, [{u"abilities": {u"client-v9000": {}}}]) yield concurrently(client, server) @defer.inlineCallbacks def test_invite_no_client_abilities(self): """ Client doesn't send any client abilities at all """ intro_dir = os.path.join(self.basedir, "introducer") # we've never run the introducer, so it hasn't created # introducer.furl yet priv_dir = join(intro_dir, "private") with open(join(priv_dir, "introducer.furl"), "w") as f: f.write("pb://fooblam\n") wormhole_server, helper = memory_server() options = runner.Options() options.wormhole = wormhole_server reactor = object() async def server(): # Run the server side of the invitation process using the CLI. rc, out, err = await run_cli( "-d", intro_dir, "invite", "--shares-needed", "1", "--shares-happy", "1", "--shares-total", "1", "foo", options=options, ) self.assertNotEqual(rc, 0) self.assertIn(u"No 'abilities' from client", out + err) # Send a no-abilities message through to the server. client = make_simple_peer(reactor, wormhole_server, helper, [{}]) yield concurrently(client, server) @defer.inlineCallbacks def test_invite_wrong_server_abilities(self): """ Server sends unknown version """ intro_dir = os.path.join(self.basedir, "introducer") # we've never run the introducer, so it hasn't created # introducer.furl yet priv_dir = join(intro_dir, "private") with open(join(priv_dir, "introducer.furl"), "w") as f: f.write("pb://fooblam\n") run_cli, wormhole, code = yield defer.Deferred.fromCoroutine(open_wormhole()) send_messages(wormhole, [ {u"abilities": {u"server-v9000": {}}}, { "shares-needed": "1", "shares-total": "1", "shares-happy": "1", "nickname": "foo", "introducer": "pb://fooblam", }, ]) rc, out, err = yield run_cli( "create-client", "--join", code, "foo", ) self.assertNotEqual(rc, 0) self.assertIn("Expected 'server-v1' in server abilities", out + err) @defer.inlineCallbacks def test_invite_no_server_abilities(self): """ Server sends unknown version """ intro_dir = os.path.join(self.basedir, "introducer") # we've never run the introducer, so it hasn't created # introducer.furl yet priv_dir = join(intro_dir, "private") with open(join(priv_dir, "introducer.furl"), "w") as f: f.write("pb://fooblam\n") run_cli, wormhole, code = yield defer.Deferred.fromCoroutine(open_wormhole()) send_messages(wormhole, [ {}, { "shares-needed": "1", "shares-total": "1", "shares-happy": "1", "nickname": "bar", "introducer": "pb://fooblam", }, ]) rc, out, err = yield run_cli( "create-client", "--join", code, "bar", ) self.assertNotEqual(rc, 0) self.assertIn("Expected 'abilities' in server introduction", out + err) @defer.inlineCallbacks def test_invite_no_nick(self): """ Should still work if server sends no nickname """ intro_dir = os.path.join(self.basedir, "introducer") options = runner.Options() options.wormhole = None rc, out, err = yield run_cli( "-d", intro_dir, "invite", "--shares-needed", "1", "--shares-happy", "1", "--shares-total", "1", options=options, ) self.assertTrue(rc) self.assertIn(u"Provide a single argument", out + err) tahoe_lafs-1.20.0/src/allmydata/test/cli/test_list.py0000644000000000000000000003125013615410400017471 0ustar00""" Ported to Python 3. """ from twisted.trial import unittest from twisted.internet import defer from allmydata.immutable import upload from allmydata.interfaces import MDMF_VERSION, SDMF_VERSION from allmydata.mutable.publish import MutableData from ..no_network import GridTestMixin from allmydata.util.encodingutil import quote_output from .common import CLITestMixin class List(GridTestMixin, CLITestMixin, unittest.TestCase): def test_list(self): self.basedir = "cli/List/list" self.set_up_grid() c0 = self.g.clients[0] small = b"small" good_arg = u"g\u00F6\u00F6d" good_out = u"g\u00F6\u00F6d" d = c0.create_dirnode() def _stash_root_and_create_file(n): self.rootnode = n self.rooturi = str(n.get_uri(), "utf-8") return n.add_file(u"g\u00F6\u00F6d", upload.Data(small, convergence=b"")) d.addCallback(_stash_root_and_create_file) def _stash_goodcap(n): self.goodcap = n.get_uri() d.addCallback(_stash_goodcap) d.addCallback(lambda ign: self.rootnode.create_subdirectory(u"1share")) d.addCallback(lambda n: self.delete_shares_numbered(n.get_uri(), list(range(1,10)))) d.addCallback(lambda ign: self.rootnode.create_subdirectory(u"0share")) d.addCallback(lambda n: self.delete_shares_numbered(n.get_uri(), list(range(0,10)))) d.addCallback(lambda ign: self.do_cli("add-alias", "tahoe", self.rooturi)) d.addCallback(lambda ign: self.do_cli("ls")) def _check1(args): (rc, out, err) = args self.failUnlessReallyEqual(rc, 0) self.assertEqual(len(err), 0, err) expected = sorted(["0share", "1share", good_out]) self.assertEqual(sorted(out.splitlines()), expected) d.addCallback(_check1) d.addCallback(lambda ign: self.do_cli("ls", "missing")) def _check2(args): (rc, out, err) = args self.failIfEqual(rc, 0) self.assertEqual(err.strip(), "No such file or directory") self.assertEqual(len(out), 0, out) d.addCallback(_check2) d.addCallback(lambda ign: self.do_cli("ls", "1share")) def _check3(args): (rc, out, err) = args self.failIfEqual(rc, 0) self.failUnlessIn("Error during GET: 410 Gone", err) self.failUnlessIn("UnrecoverableFileError:", err) self.failUnlessIn("could not be retrieved, because there were " "insufficient good shares.", err) self.assertEqual(len(out), 0, out) d.addCallback(_check3) d.addCallback(lambda ign: self.do_cli("ls", "0share")) d.addCallback(_check3) def _check4(args): (rc, out, err) = args if good_out is None: self.failUnlessReallyEqual(rc, 1) self.failUnlessIn("files whose names could not be converted", err) self.failUnlessIn(quote_output(u"g\u00F6\u00F6d"), err) self.assertEqual(len(out), 0, out) else: # listing a file (as dir/filename) should have the edge metadata, # including the filename self.failUnlessReallyEqual(rc, 0) self.failUnlessIn(good_out, out) self.failIfIn("-r-- %d -" % len(small), out, "trailing hyphen means unknown date") if good_arg is not None: d.addCallback(lambda ign: self.do_cli("ls", "-l", good_arg)) d.addCallback(_check4) # listing a file as $DIRCAP/filename should work just like dir/filename d.addCallback(lambda ign: self.do_cli("ls", "-l", self.rooturi + "/" + good_arg)) d.addCallback(_check4) # and similarly for $DIRCAP:./filename d.addCallback(lambda ign: self.do_cli("ls", "-l", self.rooturi + ":./" + good_arg)) d.addCallback(_check4) def _check5(args): # listing a raw filecap should not explode, but it will have no # metadata, just the size (rc, out, err) = args self.failUnlessReallyEqual(rc, 0) self.assertEqual("-r-- %d -" % len(small), out.strip()) d.addCallback(lambda ign: self.do_cli("ls", "-l", self.goodcap)) d.addCallback(_check5) # Now rename 'g\u00F6\u00F6d' to 'good' and repeat the tests that might have been skipped due # to encoding problems. d.addCallback(lambda ign: self.rootnode.move_child_to(u"g\u00F6\u00F6d", self.rootnode, u"good")) d.addCallback(lambda ign: self.do_cli("ls")) def _check1_ascii(args): (rc,out,err) = args self.failUnlessReallyEqual(rc, 0) self.assertEqual(len(err), 0, err) self.failUnlessReallyEqual(sorted(out.splitlines()), sorted(["0share", "1share", "good"])) d.addCallback(_check1_ascii) def _check4_ascii(args): # listing a file (as dir/filename) should have the edge metadata, # including the filename (rc, out, err) = args self.failUnlessReallyEqual(rc, 0) self.failUnlessIn("good", out) self.failIfIn("-r-- %d -" % len(small), out, "trailing hyphen means unknown date") d.addCallback(lambda ign: self.do_cli("ls", "-l", "good")) d.addCallback(_check4_ascii) # listing a file as $DIRCAP/filename should work just like dir/filename d.addCallback(lambda ign: self.do_cli("ls", "-l", self.rooturi + "/good")) d.addCallback(_check4_ascii) # and similarly for $DIRCAP:./filename d.addCallback(lambda ign: self.do_cli("ls", "-l", self.rooturi + ":./good")) d.addCallback(_check4_ascii) unknown_immcap = b"imm.URI:unknown" def _create_unknown(ign): nm = c0.nodemaker kids = {u"unknownchild-imm": (nm.create_from_cap(unknown_immcap), {})} return self.rootnode.create_subdirectory(u"unknown", initial_children=kids, mutable=False) d.addCallback(_create_unknown) def _check6(args): # listing a directory referencing an unknown object should print # an extra message to stderr (rc, out, err) = args self.failUnlessReallyEqual(rc, 0) self.failUnlessIn("?r-- ? - unknownchild-imm\n", out) self.failUnlessIn("included unknown objects", err) d.addCallback(lambda ign: self.do_cli("ls", "-l", "unknown")) d.addCallback(_check6) def _check7(args): # listing an unknown cap directly should print an extra message # to stderr (currently this only works if the URI starts with 'URI:' # after any 'ro.' or 'imm.' prefix, otherwise it will be confused # with an alias). (rc, out, err) = args self.failUnlessReallyEqual(rc, 0) self.failUnlessIn("?r-- ? -\n", out) self.failUnlessIn("included unknown objects", err) d.addCallback(lambda ign: self.do_cli("ls", "-l", unknown_immcap)) d.addCallback(_check7) return d def test_list_without_alias(self): # doing just 'tahoe ls' without specifying an alias or first # doing 'tahoe create-alias tahoe' should fail gracefully. self.basedir = "cli/List/list_without_alias" self.set_up_grid(oneshare=True) d = self.do_cli("ls") def _check(args): (rc, out, err) = args self.failUnlessReallyEqual(rc, 1) self.failUnlessIn("error:", err) self.assertEqual(len(out), 0, out) d.addCallback(_check) return d def test_list_with_nonexistent_alias(self): # doing 'tahoe ls' while specifying an alias that doesn't already # exist should fail with an informative error message self.basedir = "cli/List/list_with_nonexistent_alias" self.set_up_grid(oneshare=True) d = self.do_cli("ls", "nonexistent:") def _check(args): (rc, out, err) = args self.failUnlessReallyEqual(rc, 1) self.failUnlessIn("error:", err) self.failUnlessIn("nonexistent", err) self.assertEqual(len(out), 0, out) d.addCallback(_check) return d @defer.inlineCallbacks def test_list_readonly(self): self.basedir = "cli/List/list_readonly" yield self.set_up_grid(oneshare=True) c0 = self.g.clients[0] root = yield c0.create_dirnode() rooturi = root.get_uri() rc, out, err = yield self.do_cli("add-alias", "tahoe", rooturi) self.assertEqual(0, rc) rc, out, err = yield self.do_cli("list-aliases", "--readonly-uri") self.assertTrue('URI:DIR2-RO' in out) def _create_directory_structure(self): # Create a simple directory structure that we can use for MDMF, # SDMF, and immutable testing. assert self.g client = self.g.clients[0] # Create a dirnode d = client.create_dirnode() def _got_rootnode(n): # Add a few nodes. self._dircap = n.get_uri() nm = n._nodemaker # The uploaders may run at the same time, so we need two # MutableData instances or they'll fight over offsets &c and # break. mutable_data = MutableData(b"data" * 100000) mutable_data2 = MutableData(b"data" * 100000) # Add both kinds of mutable node. d1 = nm.create_mutable_file(mutable_data, version=MDMF_VERSION) d2 = nm.create_mutable_file(mutable_data2, version=SDMF_VERSION) # Add an immutable node. We do this through the directory, # with add_file. immutable_data = upload.Data(b"immutable data" * 100000, convergence=b"") d3 = n.add_file(u"immutable", immutable_data) ds = [d1, d2, d3] dl = defer.DeferredList(ds) def _made_files(args): (r1, r2, r3) = args self.failUnless(r1[0]) self.failUnless(r2[0]) self.failUnless(r3[0]) # r1, r2, and r3 contain nodes. mdmf_node = r1[1] sdmf_node = r2[1] imm_node = r3[1] self._mdmf_uri = mdmf_node.get_uri() self._mdmf_readonly_uri = mdmf_node.get_readonly_uri() self._sdmf_uri = mdmf_node.get_uri() self._sdmf_readonly_uri = sdmf_node.get_readonly_uri() self._imm_uri = imm_node.get_uri() d1 = n.set_node(u"mdmf", mdmf_node) d2 = n.set_node(u"sdmf", sdmf_node) return defer.DeferredList([d1, d2]) # We can now list the directory by listing self._dircap. dl.addCallback(_made_files) return dl d.addCallback(_got_rootnode) return d def test_list_mdmf(self): # 'tahoe ls' should include MDMF files. self.basedir = "cli/List/list_mdmf" self.set_up_grid(oneshare=True) d = self._create_directory_structure() d.addCallback(lambda ignored: self.do_cli("ls", self._dircap)) def _got_ls(args): (rc, out, err) = args self.failUnlessEqual(rc, 0) self.failUnlessEqual(err, "") self.failUnlessIn("immutable", out) self.failUnlessIn("mdmf", out) self.failUnlessIn("sdmf", out) d.addCallback(_got_ls) return d def test_list_mdmf_json(self): # 'tahoe ls' should include MDMF caps when invoked with MDMF # caps. self.basedir = "cli/List/list_mdmf_json" self.set_up_grid(oneshare=True) d = self._create_directory_structure() d.addCallback(lambda ignored: self.do_cli("ls", "--json", self._dircap)) def _got_json(args): (rc, out, err) = args self.failUnlessEqual(rc, 0) self.assertEqual(len(err), 0, err) self.failUnlessIn(str(self._mdmf_uri, "ascii"), out) self.failUnlessIn(str(self._mdmf_readonly_uri, "ascii"), out) self.failUnlessIn(str(self._sdmf_uri, "ascii"), out) self.failUnlessIn(str(self._sdmf_readonly_uri, "ascii"), out) self.failUnlessIn(str(self._imm_uri, "ascii"), out) self.failUnlessIn('"format": "SDMF"', out) self.failUnlessIn('"format": "MDMF"', out) d.addCallback(_got_json) return d tahoe_lafs-1.20.0/src/allmydata/test/cli/test_mv.py0000644000000000000000000002055013615410400017141 0ustar00""" Ported to Python 3. """ import os.path from twisted.trial import unittest from allmydata.util import fileutil from ..no_network import GridTestMixin from allmydata.scripts import tahoe_mv from .common import CLITestMixin class Mv(GridTestMixin, CLITestMixin, unittest.TestCase): def test_mv_behavior(self): self.basedir = "cli/Mv/mv_behavior" self.set_up_grid(oneshare=True) fn1 = os.path.join(self.basedir, "file1") DATA1 = b"Nuclear launch codes" fileutil.write(fn1, DATA1) fn2 = os.path.join(self.basedir, "file2") DATA2 = b"UML diagrams" fileutil.write(fn2, DATA2) # copy both files to the grid d = self.do_cli("create-alias", "tahoe") d.addCallback(lambda res: self.do_cli("cp", fn1, "tahoe:")) d.addCallback(lambda res: self.do_cli("cp", fn2, "tahoe:")) # do mv file1 file3 # (we should be able to rename files) d.addCallback(lambda res: self.do_cli("mv", "tahoe:file1", "tahoe:file3")) d.addCallback(lambda rc_out_err: self.failUnlessIn("OK", rc_out_err[1], "mv didn't rename a file")) # do mv file3 file2 # (This should succeed without issue) d.addCallback(lambda res: self.do_cli("mv", "tahoe:file3", "tahoe:file2")) # Out should contain "OK" to show that the transfer worked. d.addCallback(lambda rc_out_err: self.failUnlessIn("OK", rc_out_err[1], "mv didn't output OK after mving")) # Next, make a remote directory. d.addCallback(lambda res: self.do_cli("mkdir", "tahoe:directory")) # mv file2 directory # (should fail with a descriptive error message; the CLI mv # client should support this) d.addCallback(lambda res: self.do_cli("mv", "tahoe:file2", "tahoe:directory")) d.addCallback(lambda rc_out_err: self.failUnlessIn( "Error: You can't overwrite a directory with a file", rc_out_err[2], "mv shouldn't overwrite directories" )) # mv file2 directory/ # (should succeed by making file2 a child node of directory) d.addCallback(lambda res: self.do_cli("mv", "tahoe:file2", "tahoe:directory/")) # We should see an "OK"... d.addCallback(lambda rc_out_err: self.failUnlessIn("OK", rc_out_err[1], "mv didn't mv a file into a directory")) # ... and be able to GET the file d.addCallback(lambda res: self.do_cli("get", "tahoe:directory/file2", self.basedir + "new")) d.addCallback(lambda rc_out_err: self.failUnless(os.path.exists(self.basedir + "new"), "mv didn't write the destination file")) # ... and not find the file where it was before. d.addCallback(lambda res: self.do_cli("get", "tahoe:file2", "file2")) d.addCallback(lambda rc_out_err: self.failUnlessIn("404", rc_out_err[2], "mv left the source file intact")) # Let's build: # directory/directory2/some_file # directory3 d.addCallback(lambda res: self.do_cli("mkdir", "tahoe:directory/directory2")) d.addCallback(lambda res: self.do_cli("cp", fn2, "tahoe:directory/directory2/some_file")) d.addCallback(lambda res: self.do_cli("mkdir", "tahoe:directory3")) # Let's now try to mv directory/directory2/some_file to # directory3/some_file d.addCallback(lambda res: self.do_cli("mv", "tahoe:directory/directory2/some_file", "tahoe:directory3/")) # We should have just some_file in tahoe:directory3 d.addCallback(lambda res: self.do_cli("get", "tahoe:directory3/some_file", "some_file")) d.addCallback(lambda rc_out_err: self.failUnless("404" not in rc_out_err[2], "mv didn't handle nested directories correctly")) d.addCallback(lambda res: self.do_cli("get", "tahoe:directory3/directory", "directory")) d.addCallback(lambda rc_out_err: self.failUnlessIn("404", rc_out_err[2], "mv moved the wrong thing")) return d def test_mv_error_if_DELETE_fails(self): self.basedir = "cli/Mv/mv_error_if_DELETE_fails" self.set_up_grid(oneshare=True) fn1 = os.path.join(self.basedir, "file1") DATA1 = b"Nuclear launch codes" fileutil.write(fn1, DATA1) original_do_http = tahoe_mv.do_http def mock_do_http(method, url, body=b""): if method == "DELETE": class FakeResponse(object): def read(self): return "response" resp = FakeResponse() resp.status = '500 Something Went Wrong' resp.reason = '*shrug*' return resp else: return original_do_http(method, url, body=body) tahoe_mv.do_http = mock_do_http # copy file to the grid d = self.do_cli("create-alias", "tahoe") d.addCallback(lambda res: self.do_cli("cp", fn1, "tahoe:")) # do mv file1 file2 d.addCallback(lambda res: self.do_cli("mv", "tahoe:file1", "tahoe:file2")) def _check(args ): (rc, out, err) = args self.failIfIn("OK", out, "mv printed 'OK' even though the DELETE failed") self.failUnlessEqual(rc, 2) d.addCallback(_check) def _restore_do_http(res): tahoe_mv.do_http = original_do_http return res d.addBoth(_restore_do_http) return d def test_mv_without_alias(self): # doing 'tahoe mv' without explicitly specifying an alias or # creating the default 'tahoe' alias should fail with a useful # error message. self.basedir = "cli/Mv/mv_without_alias" self.set_up_grid(oneshare=True) d = self.do_cli("mv", "afile", "anotherfile") def _check(args): (rc, out, err) = args self.failUnlessReallyEqual(rc, 1) self.failUnlessIn("error:", err) self.assertEqual(len(out), 0, out) d.addCallback(_check) # check to see that the validation extends to the # target argument by making an alias that will work with the first # one. d.addCallback(lambda ign: self.do_cli("create-alias", "havasu")) def _create_a_test_file(ign): self.test_file_path = os.path.join(self.basedir, "afile") fileutil.write(self.test_file_path, "puppies" * 100) d.addCallback(_create_a_test_file) d.addCallback(lambda ign: self.do_cli("put", self.test_file_path, "havasu:afile")) d.addCallback(lambda ign: self.do_cli("mv", "havasu:afile", "anotherfile")) d.addCallback(_check) return d def test_mv_with_nonexistent_alias(self): # doing 'tahoe mv' with an alias that doesn't exist should fail # with an informative error message. self.basedir = "cli/Mv/mv_with_nonexistent_alias" self.set_up_grid(oneshare=True) d = self.do_cli("mv", "fake:afile", "fake:anotherfile") def _check(args): (rc, out, err) = args self.failUnlessReallyEqual(rc, 1) self.failUnlessIn("error:", err) self.failUnlessIn("fake", err) self.assertEqual(len(out), 0, out) d.addCallback(_check) # check to see that the validation extends to the # target argument by making an alias that will work with the first # one. d.addCallback(lambda ign: self.do_cli("create-alias", "havasu")) def _create_a_test_file(ign): self.test_file_path = os.path.join(self.basedir, "afile") fileutil.write(self.test_file_path, "puppies" * 100) d.addCallback(_create_a_test_file) d.addCallback(lambda ign: self.do_cli("put", self.test_file_path, "havasu:afile")) d.addCallback(lambda ign: self.do_cli("mv", "havasu:afile", "fake:anotherfile")) d.addCallback(_check) return d tahoe_lafs-1.20.0/src/allmydata/test/cli/test_put.py0000644000000000000000000005554413615410400017342 0ustar00""" Tests for the ``tahoe put`` CLI tool. """ from __future__ import annotations from typing import Callable, Awaitable, TypeVar, Any import os.path from twisted.trial import unittest from twisted.python import usage from twisted.python.filepath import FilePath from cryptography.hazmat.primitives.serialization import load_pem_private_key from allmydata.crypto.rsa import PrivateKey from allmydata.uri import from_string from allmydata.util import fileutil from allmydata.scripts.common import get_aliases from allmydata.scripts import cli from ..no_network import GridTestMixin from ..common_util import skip_if_cannot_represent_filename from allmydata.util.encodingutil import get_io_encoding from allmydata.util.fileutil import abspath_expanduser_unicode from .common import CLITestMixin from allmydata.mutable.common import derive_mutable_keys T = TypeVar("T") class Put(GridTestMixin, CLITestMixin, unittest.TestCase): def test_unlinked_immutable_stdin(self): # tahoe get `echo DATA | tahoe put` # tahoe get `echo DATA | tahoe put -` self.basedir = "cli/Put/unlinked_immutable_stdin" DATA = b"data\xff" * 100 self.set_up_grid(oneshare=True) d = self.do_cli("put", stdin=DATA) def _uploaded(res): (rc, out, err) = res self.failUnlessIn("waiting for file data on stdin..", err) self.failUnlessIn("200 OK", err) self.readcap = out self.failUnless(self.readcap.startswith("URI:CHK:")) d.addCallback(_uploaded) d.addCallback(lambda res: self.do_cli("get", self.readcap, return_bytes=True)) def _downloaded(res): (rc, out, err) = res self.failUnlessReallyEqual(err, b"") self.failUnlessReallyEqual(out, DATA) d.addCallback(_downloaded) d.addCallback(lambda res: self.do_cli("put", "-", stdin=DATA)) d.addCallback(lambda rc_out_err: self.failUnlessReallyEqual(rc_out_err[1], self.readcap)) return d def test_unlinked_immutable_from_file(self): # tahoe put file.txt # tahoe put ./file.txt # tahoe put /tmp/file.txt # tahoe put ~/file.txt self.basedir = "cli/Put/unlinked_immutable_from_file" self.set_up_grid(oneshare=True) rel_fn = str(os.path.join(self.basedir, "DATAFILE")) abs_fn = abspath_expanduser_unicode(rel_fn) # we make the file small enough to fit in a LIT file, for speed fileutil.write(rel_fn, b"short file has some bytes \xff yes") d = self.do_cli_unicode(u"put", [rel_fn]) def _uploaded(args): (rc, out, err) = args readcap = out self.failUnless(readcap.startswith("URI:LIT:"), readcap) self.readcap = readcap d.addCallback(_uploaded) d.addCallback(lambda res: self.do_cli_unicode(u"put", [u"./" + rel_fn])) d.addCallback(lambda rc_stdout_stderr: self.failUnlessReallyEqual(rc_stdout_stderr[1], self.readcap)) d.addCallback(lambda res: self.do_cli_unicode(u"put", [abs_fn])) d.addCallback(lambda rc_stdout_stderr: self.failUnlessReallyEqual(rc_stdout_stderr[1], self.readcap)) # we just have to assume that ~ is handled properly return d def test_immutable_from_file(self): # tahoe put file.txt uploaded.txt # tahoe - uploaded.txt # tahoe put file.txt subdir/uploaded.txt # tahoe put file.txt tahoe:uploaded.txt # tahoe put file.txt tahoe:subdir/uploaded.txt # tahoe put file.txt DIRCAP:./uploaded.txt # tahoe put file.txt DIRCAP:./subdir/uploaded.txt self.basedir = "cli/Put/immutable_from_file" self.set_up_grid(oneshare=True) rel_fn = os.path.join(self.basedir, "DATAFILE") # we make the file small enough to fit in a LIT file, for speed DATA = b"short file" DATA2 = b"short file two" fileutil.write(rel_fn, DATA) d = self.do_cli("create-alias", "tahoe") d.addCallback(lambda res: self.do_cli("put", rel_fn, "uploaded.txt")) def _uploaded(args): (rc, out, err) = args readcap = out.strip() self.failUnless(readcap.startswith("URI:LIT:"), readcap) self.failUnlessIn("201 Created", err) self.readcap = readcap d.addCallback(_uploaded) d.addCallback(lambda res: self.do_cli("get", "tahoe:uploaded.txt", return_bytes=True)) d.addCallback(lambda rc_stdout_stderr: self.failUnlessReallyEqual(rc_stdout_stderr[1], DATA)) d.addCallback(lambda res: self.do_cli("put", "-", "uploaded.txt", stdin=DATA2)) def _replaced(args): (rc, out, err) = args readcap = out.strip() self.failUnless(readcap.startswith("URI:LIT:"), readcap) self.failUnlessIn("200 OK", err) d.addCallback(_replaced) d.addCallback(lambda res: self.do_cli("put", rel_fn, "subdir/uploaded2.txt")) d.addCallback(lambda res: self.do_cli("get", "subdir/uploaded2.txt", return_bytes=True)) d.addCallback(lambda rc_stdout_stderr: self.failUnlessReallyEqual(rc_stdout_stderr[1], DATA)) d.addCallback(lambda res: self.do_cli("put", rel_fn, "tahoe:uploaded3.txt")) d.addCallback(lambda res: self.do_cli("get", "tahoe:uploaded3.txt", return_bytes=True)) d.addCallback(lambda rc_stdout_stderr: self.failUnlessReallyEqual(rc_stdout_stderr[1], DATA)) d.addCallback(lambda res: self.do_cli("put", rel_fn, "tahoe:subdir/uploaded4.txt")) d.addCallback(lambda res: self.do_cli("get", "tahoe:subdir/uploaded4.txt", return_bytes=True)) d.addCallback(lambda rc_stdout_stderr: self.failUnlessReallyEqual(rc_stdout_stderr[1], DATA)) def _get_dircap(res): self.dircap = str(get_aliases(self.get_clientdir())["tahoe"], "ascii") d.addCallback(_get_dircap) d.addCallback(lambda res: self.do_cli("put", rel_fn, self.dircap+":./uploaded5.txt")) d.addCallback(lambda res: self.do_cli("get", "tahoe:uploaded5.txt", return_bytes=True)) d.addCallback(lambda rc_stdout_stderr: self.failUnlessReallyEqual(rc_stdout_stderr[1], DATA)) d.addCallback(lambda res: self.do_cli("put", rel_fn, self.dircap+":./subdir/uploaded6.txt")) d.addCallback(lambda res: self.do_cli("get", "tahoe:subdir/uploaded6.txt", return_bytes=True)) d.addCallback(lambda rc_stdout_stderr: self.failUnlessReallyEqual(rc_stdout_stderr[1], DATA)) return d def test_mutable_unlinked(self): # FILECAP = `echo DATA | tahoe put --mutable` # tahoe get FILECAP, compare against DATA # echo DATA2 | tahoe put - FILECAP # tahoe get FILECAP, compare against DATA2 # tahoe put file.txt FILECAP self.basedir = "cli/Put/mutable_unlinked" self.set_up_grid(oneshare=True) DATA = b"data" * 100 DATA2 = b"two" * 100 rel_fn = os.path.join(self.basedir, "DATAFILE") DATA3 = b"three" * 100 fileutil.write(rel_fn, DATA3) d = self.do_cli("put", "--mutable", stdin=DATA) def _created(res): (rc, out, err) = res self.failUnlessIn("waiting for file data on stdin..", err) self.failUnlessIn("200 OK", err) self.filecap = out self.failUnless(self.filecap.startswith("URI:SSK:"), self.filecap) d.addCallback(_created) d.addCallback(lambda res: self.do_cli("get", self.filecap, return_bytes=True)) d.addCallback(lambda rc_out_err: self.failUnlessReallyEqual(rc_out_err[1], DATA)) d.addCallback(lambda res: self.do_cli("put", "-", self.filecap, stdin=DATA2)) def _replaced(res): (rc, out, err) = res self.failUnlessIn("waiting for file data on stdin..", err) self.failUnlessIn("200 OK", err) self.failUnlessReallyEqual(self.filecap, out) d.addCallback(_replaced) d.addCallback(lambda res: self.do_cli("get", self.filecap, return_bytes=True)) d.addCallback(lambda rc_out_err: self.failUnlessReallyEqual(rc_out_err[1], DATA2)) d.addCallback(lambda res: self.do_cli("put", rel_fn, self.filecap)) def _replaced2(res): (rc, out, err) = res self.failUnlessIn("200 OK", err) self.failUnlessReallyEqual(self.filecap, out) d.addCallback(_replaced2) d.addCallback(lambda res: self.do_cli("get", self.filecap, return_bytes=True)) d.addCallback(lambda rc_out_err: self.failUnlessReallyEqual(rc_out_err[1], DATA3)) return d async def test_unlinked_mutable_specified_private_key(self) -> None: """ A new unlinked mutable can be created using a specified private key. """ self.basedir = "cli/Put/unlinked-mutable-with-key" await self._test_mutable_specified_key( lambda do_cli, pempath, datapath: do_cli( "put", "--mutable", "--private-key-path", pempath.path, stdin=datapath.getContent(), ), ) async def test_linked_mutable_specified_private_key(self) -> None: """ A new linked mutable can be created using a specified private key. """ self.basedir = "cli/Put/linked-mutable-with-key" await self._test_mutable_specified_key( lambda do_cli, pempath, datapath: do_cli( "put", "--mutable", "--private-key-path", pempath.path, datapath.path, ), ) async def _test_mutable_specified_key( self, run: Callable[[Any, FilePath, FilePath], Awaitable[tuple[int, bytes, bytes]]], ) -> None: """ A helper for testing mutable creation. :param run: A function to do the creation. It is called with ``self.do_cli`` and the path to a private key PEM file and a data file. It returns whatever ``do_cli`` returns. """ self.set_up_grid(oneshare=True) pempath = FilePath(__file__).parent().sibling("data").child("openssl-rsa-2048.txt") datapath = FilePath(self.basedir).child("data") datapath.setContent(b"Hello world" * 1024) (rc, out, err) = await run(self.do_cli, pempath, datapath) self.assertEqual(rc, 0, (out, err)) cap = from_string(out.strip()) # The capability is derived from the key we specified. privkey = load_pem_private_key(pempath.getContent(), password=None) assert isinstance(privkey, PrivateKey) pubkey = privkey.public_key() writekey, _, fingerprint = derive_mutable_keys((pubkey, privkey)) self.assertEqual( (writekey, fingerprint), (cap.writekey, cap.fingerprint), ) # Also the capability we were given actually refers to the data we # uploaded. (rc, out, err) = await self.do_cli("get", out.strip()) self.assertEqual(rc, 0, (out, err)) self.assertEqual(out, datapath.getContent().decode("ascii")) def test_mutable(self): # echo DATA1 | tahoe put --mutable - uploaded.txt # echo DATA2 | tahoe put - uploaded.txt # should modify-in-place # tahoe get uploaded.txt, compare against DATA2 self.basedir = "cli/Put/mutable" self.set_up_grid(oneshare=True) DATA1 = b"data" * 100 fn1 = os.path.join(self.basedir, "DATA1") fileutil.write(fn1, DATA1) DATA2 = b"two\xff" * 100 fn2 = os.path.join(self.basedir, "DATA2") fileutil.write(fn2, DATA2) d = self.do_cli("create-alias", "tahoe") d.addCallback(lambda res: self.do_cli("put", "--mutable", fn1, "tahoe:uploaded.txt")) def _check(res): (rc, out, err) = res self.failUnlessEqual(rc, 0, str(res)) self.failUnlessEqual(err.strip(), "201 Created", str(res)) self.uri = out d.addCallback(_check) d.addCallback(lambda res: self.do_cli("put", fn2, "tahoe:uploaded.txt")) def _check2(res): (rc, out, err) = res self.failUnlessEqual(rc, 0, str(res)) self.failUnlessEqual(err.strip(), "200 OK", str(res)) self.failUnlessEqual(out, self.uri, str(res)) d.addCallback(_check2) d.addCallback(lambda res: self.do_cli("get", "tahoe:uploaded.txt", return_bytes=True)) d.addCallback(lambda rc_out_err: self.failUnlessReallyEqual(rc_out_err[1], DATA2)) return d def _check_mdmf_json(self, args): (rc, json, err) = args self.failUnlessEqual(rc, 0) self.failUnlessEqual(err, "") self.failUnlessIn('"format": "MDMF"', json) # We also want a valid MDMF cap to be in the json. self.failUnlessIn("URI:MDMF", json) self.failUnlessIn("URI:MDMF-RO", json) self.failUnlessIn("URI:MDMF-Verifier", json) def _check_sdmf_json(self, args): (rc, json, err) = args self.failUnlessEqual(rc, 0) self.failUnlessEqual(err, "") self.failUnlessIn('"format": "SDMF"', json) # We also want to see the appropriate SDMF caps. self.failUnlessIn("URI:SSK", json) self.failUnlessIn("URI:SSK-RO", json) self.failUnlessIn("URI:SSK-Verifier", json) def _check_chk_json(self, args): (rc, json, err) = args self.failUnlessEqual(rc, 0) self.failUnlessEqual(err, "") self.failUnlessIn('"format": "CHK"', json) # We also want to see the appropriate CHK caps. self.failUnlessIn("URI:CHK", json) self.failUnlessIn("URI:CHK-Verifier", json) def test_format(self): self.basedir = "cli/Put/format" self.set_up_grid(oneshare=True) data = "data" * 40000 # 160kB total, two segments fn1 = os.path.join(self.basedir, "data") fileutil.write(fn1, data) d = self.do_cli("create-alias", "tahoe") def _put_and_ls(ign, cmdargs, expected, filename=None): if filename: args = ["put"] + cmdargs + [fn1, filename] else: # unlinked args = ["put"] + cmdargs + [fn1] d2 = self.do_cli(*args) def _list(args): (rc, out, err) = args self.failUnlessEqual(rc, 0) # don't allow failure if filename: return self.do_cli("ls", "--json", filename) else: cap = out.strip() return self.do_cli("ls", "--json", cap) d2.addCallback(_list) return d2 # 'tahoe put' to a directory d.addCallback(_put_and_ls, ["--mutable"], "SDMF", "tahoe:s1.txt") d.addCallback(self._check_sdmf_json) # backwards-compatibility d.addCallback(_put_and_ls, ["--format=SDMF"], "SDMF", "tahoe:s2.txt") d.addCallback(self._check_sdmf_json) d.addCallback(_put_and_ls, ["--format=sdmf"], "SDMF", "tahoe:s3.txt") d.addCallback(self._check_sdmf_json) d.addCallback(_put_and_ls, ["--mutable", "--format=SDMF"], "SDMF", "tahoe:s4.txt") d.addCallback(self._check_sdmf_json) d.addCallback(_put_and_ls, ["--format=MDMF"], "MDMF", "tahoe:m1.txt") d.addCallback(self._check_mdmf_json) d.addCallback(_put_and_ls, ["--mutable", "--format=MDMF"], "MDMF", "tahoe:m2.txt") d.addCallback(self._check_mdmf_json) d.addCallback(_put_and_ls, ["--format=CHK"], "CHK", "tahoe:c1.txt") d.addCallback(self._check_chk_json) d.addCallback(_put_and_ls, [], "CHK", "tahoe:c1.txt") d.addCallback(self._check_chk_json) # 'tahoe put' unlinked d.addCallback(_put_and_ls, ["--mutable"], "SDMF") d.addCallback(self._check_sdmf_json) # backwards-compatibility d.addCallback(_put_and_ls, ["--format=SDMF"], "SDMF") d.addCallback(self._check_sdmf_json) d.addCallback(_put_and_ls, ["--format=sdmf"], "SDMF") d.addCallback(self._check_sdmf_json) d.addCallback(_put_and_ls, ["--mutable", "--format=SDMF"], "SDMF") d.addCallback(self._check_sdmf_json) d.addCallback(_put_and_ls, ["--format=MDMF"], "MDMF") d.addCallback(self._check_mdmf_json) d.addCallback(_put_and_ls, ["--mutable", "--format=MDMF"], "MDMF") d.addCallback(self._check_mdmf_json) d.addCallback(_put_and_ls, ["--format=CHK"], "CHK") d.addCallback(self._check_chk_json) d.addCallback(_put_and_ls, [], "CHK") d.addCallback(self._check_chk_json) return d def test_put_to_mdmf_cap(self): self.basedir = "cli/Put/put_to_mdmf_cap" self.set_up_grid(oneshare=True) data = "data" * 100000 fn1 = os.path.join(self.basedir, "data") fileutil.write(fn1, data) d = self.do_cli("put", "--format=MDMF", fn1) def _got_cap(args): (rc, out, err) = args self.failUnlessEqual(rc, 0) self.cap = out.strip() d.addCallback(_got_cap) # Now try to write something to the cap using put. data2 = "data2" * 100000 fn2 = os.path.join(self.basedir, "data2") fileutil.write(fn2, data2) d.addCallback(lambda ignored: self.do_cli("put", fn2, self.cap)) def _got_put(args): (rc, out, err) = args self.failUnlessEqual(rc, 0) self.failUnlessIn(self.cap, out) d.addCallback(_got_put) # Now get the cap. We should see the data we just put there. d.addCallback(lambda ignored: self.do_cli("get", self.cap)) def _got_data(args): (rc, out, err) = args self.failUnlessEqual(rc, 0) self.failUnlessEqual(out, data2) d.addCallback(_got_data) # add some extension information to the cap and try to put something # to it. def _make_extended_cap(ignored): self.cap = self.cap + ":Extension-Stuff" d.addCallback(_make_extended_cap) data3 = "data3" * 100000 fn3 = os.path.join(self.basedir, "data3") fileutil.write(fn3, data3) d.addCallback(lambda ignored: self.do_cli("put", fn3, self.cap)) d.addCallback(lambda ignored: self.do_cli("get", self.cap)) def _got_data3(args): (rc, out, err) = args self.failUnlessEqual(rc, 0) self.failUnlessEqual(out, data3) d.addCallback(_got_data3) return d def test_put_to_sdmf_cap(self): self.basedir = "cli/Put/put_to_sdmf_cap" self.set_up_grid(oneshare=True) data = "data" * 100000 fn1 = os.path.join(self.basedir, "data") fileutil.write(fn1, data) d = self.do_cli("put", "--format=SDMF", fn1) def _got_cap(args): (rc, out, err) = args self.failUnlessEqual(rc, 0) self.cap = out.strip() d.addCallback(_got_cap) # Now try to write something to the cap using put. data2 = "data2" * 100000 fn2 = os.path.join(self.basedir, "data2") fileutil.write(fn2, data2) d.addCallback(lambda ignored: self.do_cli("put", fn2, self.cap)) def _got_put(args): (rc, out, err) = args self.failUnlessEqual(rc, 0) self.failUnlessIn(self.cap, out) d.addCallback(_got_put) # Now get the cap. We should see the data we just put there. d.addCallback(lambda ignored: self.do_cli("get", self.cap)) def _got_data(args): (rc, out, err) = args self.failUnlessEqual(rc, 0) self.failUnlessEqual(out, data2) d.addCallback(_got_data) return d def test_mutable_type_invalid_format(self): o = cli.PutOptions() self.failUnlessRaises(usage.UsageError, o.parseOptions, ["--format=LDMF"]) def test_put_with_nonexistent_alias(self): # when invoked with an alias that doesn't exist, 'tahoe put' # should output a useful error message, not a stack trace self.basedir = "cli/Put/put_with_nonexistent_alias" self.set_up_grid(oneshare=True) d = self.do_cli("put", "somefile", "fake:afile") def _check(args): (rc, out, err) = args self.failUnlessReallyEqual(rc, 1) self.failUnlessIn("error:", err) self.assertEqual(len(out), 0, out) d.addCallback(_check) return d def test_immutable_from_file_unicode(self): # tahoe put "\u00E0 trier.txt" "\u00E0 trier.txt" a_trier_arg = u"\u00E0 trier.txt" skip_if_cannot_represent_filename(u"\u00E0 trier.txt") self.basedir = "cli/Put/immutable_from_file_unicode" self.set_up_grid(oneshare=True) rel_fn = os.path.join(str(self.basedir), u"\u00E0 trier.txt") # we make the file small enough to fit in a LIT file, for speed DATA = b"short file \xff bytes" fileutil.write(rel_fn, DATA) d = self.do_cli("create-alias", "tahoe") d.addCallback(lambda res: self.do_cli("put", rel_fn.encode(get_io_encoding()), a_trier_arg)) def _uploaded(args): (rc, out, err) = args readcap = out.strip() self.failUnless(readcap.startswith("URI:LIT:"), readcap) self.failUnlessIn("201 Created", err) self.readcap = readcap d.addCallback(_uploaded) d.addCallback(lambda res: self.do_cli("get", "tahoe:" + a_trier_arg, return_bytes=True)) d.addCallback(lambda rc_out_err: self.failUnlessReallyEqual(rc_out_err[1], DATA)) return d def test_no_leading_slash(self): self.basedir = "cli/Put/leading_slash" self.set_up_grid(oneshare=True) fn1 = os.path.join(self.basedir, "DATA1") d = self.do_cli("create-alias", "tahoe") d.addCallback(lambda res: self.do_cli("put", fn1, "tahoe:/uploaded.txt")) def _check(args): (rc, out, err) = args self.assertEqual(rc, 1) self.failUnlessIn("must not start with a slash", err) self.assertEqual(len(out), 0, out) d.addCallback(_check) return d tahoe_lafs-1.20.0/src/allmydata/test/cli/test_run.py0000644000000000000000000001765013615410400017332 0ustar00""" Tests for ``allmydata.scripts.tahoe_run``. """ from __future__ import annotations import re from io import ( StringIO, ) from hypothesis.strategies import text from hypothesis import given, assume from testtools.matchers import ( Contains, Equals, ) from twisted.python.filepath import ( FilePath, ) from twisted.internet.testing import ( MemoryReactor, ) from twisted.python.failure import ( Failure, ) from twisted.internet.error import ( ConnectionDone, ) from twisted.internet.test.modulehelpers import ( AlternateReactor, ) from ...scripts.tahoe_run import ( DaemonizeTheRealService, RunOptions, run, ) from ...util.pid import ( check_pid_process, InvalidPidFile, ) from ...scripts.runner import ( parse_options ) from ..common import ( SyncTestCase, ) class DaemonizeTheRealServiceTests(SyncTestCase): """ Tests for ``DaemonizeTheRealService``. """ def _verify_error(self, config, expected): """ Assert that when ``DaemonizeTheRealService`` is started using the given configuration it writes the given message to stderr and stops the reactor. :param bytes config: The contents of a ``tahoe.cfg`` file to give to the service. :param bytes expected: A string to assert appears in stderr after the service starts. """ nodedir = FilePath(self.mktemp()) nodedir.makedirs() nodedir.child("tahoe.cfg").setContent(config.encode("ascii")) nodedir.child("tahoe-client.tac").touch() options = parse_options(["run", nodedir.path]) stdout = options.stdout = StringIO() stderr = options.stderr = StringIO() run_options = options.subOptions reactor = MemoryReactor() with AlternateReactor(reactor): service = DaemonizeTheRealService( "client", nodedir.path, run_options, ) service.startService() # We happen to know that the service uses reactor.callWhenRunning # to schedule all its work (though I couldn't tell you *why*). # Make sure those scheduled calls happen. waiting = reactor.whenRunningHooks[:] del reactor.whenRunningHooks[:] for f, a, k in waiting: f(*a, **k) self.assertThat( reactor.hasStopped, Equals(True), ) self.assertThat( stdout.getvalue(), Equals(""), ) self.assertThat( stderr.getvalue(), Contains(expected), ) def test_unknown_config(self): """ If there are unknown items in the node configuration file then a short message introduced with ``"Configuration error:"`` is written to stderr. """ self._verify_error("[invalid-section]\n", "Configuration error:") def test_port_assignment_required(self): """ If ``tub.port`` is configured to use port 0 then a short message rejecting this configuration is written to stderr. """ self._verify_error( """ [node] tub.port = 0 """, "tub.port cannot be 0", ) def test_privacy_error(self): """ If ``reveal-IP-address`` is set to false and the tub is not configured in a way that avoids revealing the node's IP address, a short message about privacy is written to stderr. """ self._verify_error( """ [node] tub.port = AUTO reveal-IP-address = false """, "Privacy requested", ) class DaemonizeStopTests(SyncTestCase): """ Tests relating to stopping the daemon """ def setUp(self): self.nodedir = FilePath(self.mktemp()) self.nodedir.makedirs() config = "" self.nodedir.child("tahoe.cfg").setContent(config.encode("ascii")) self.nodedir.child("tahoe-client.tac").touch() # arrange to know when reactor.stop() is called self.reactor = MemoryReactor() self.stop_calls = [] def record_stop(): self.stop_calls.append(object()) self.reactor.stop = record_stop super().setUp() def _make_daemon(self, extra_argv: list[str]) -> DaemonizeTheRealService: """ Create the daemonization service. :param extra_argv: Extra arguments to pass between ``run`` and the node path. """ options = parse_options(["run"] + extra_argv + [self.nodedir.path]) options.stdout = StringIO() options.stderr = StringIO() options.stdin = StringIO() run_options = options.subOptions return DaemonizeTheRealService( "client", self.nodedir.path, run_options, ) def _run_daemon(self) -> None: """ Simulate starting up the reactor so the daemon plugin can do its stuff. """ # We happen to know that the service uses reactor.callWhenRunning # to schedule all its work (though I couldn't tell you *why*). # Make sure those scheduled calls happen. waiting = self.reactor.whenRunningHooks[:] del self.reactor.whenRunningHooks[:] for f, a, k in waiting: f(*a, **k) def _close_stdin(self) -> None: """ Simulate closing the daemon plugin's stdin. """ # there should be a single reader: our StandardIO process # reader for stdin. Simulate it closing. for r in self.reactor.getReaders(): r.connectionLost(Failure(ConnectionDone())) def test_stop_on_stdin_close(self): """ We stop when stdin is closed. """ with AlternateReactor(self.reactor): service = self._make_daemon([]) service.startService() self._run_daemon() self._close_stdin() self.assertEqual(len(self.stop_calls), 1) def test_allow_stdin_close(self): """ If --allow-stdin-close is specified then closing stdin doesn't stop the process """ with AlternateReactor(self.reactor): service = self._make_daemon(["--allow-stdin-close"]) service.startService() self._run_daemon() self._close_stdin() self.assertEqual(self.stop_calls, []) class RunTests(SyncTestCase): """ Tests for ``run``. """ def test_non_numeric_pid(self): """ If the pidfile exists but does not contain a numeric value, a complaint to this effect is written to stderr. """ basedir = FilePath(self.mktemp()).asTextMode() basedir.makedirs() basedir.child(u"running.process").setContent(b"foo") basedir.child(u"tahoe-client.tac").setContent(b"") config = RunOptions() config.stdout = StringIO() config.stderr = StringIO() config['basedir'] = basedir.path config.twistd_args = [] reactor = MemoryReactor() runs = [] result_code = run(reactor, config, runApp=runs.append) self.assertThat( config.stderr.getvalue(), Contains("found invalid PID file in"), ) # because the pidfile is invalid we shouldn't get to the # .run() call itself. self.assertThat(runs, Equals([])) self.assertThat(result_code, Equals(1)) good_file_content_re = re.compile(r"\s*[0-9]*\s[0-9]*\s*", re.M) @given(text()) def test_pidfile_contents(self, content): """ invalid contents for a pidfile raise errors """ assume(not self.good_file_content_re.match(content)) pidfile = FilePath("pidfile") pidfile.setContent(content.encode("utf8")) with self.assertRaises(InvalidPidFile): with check_pid_process(pidfile): pass tahoe_lafs-1.20.0/src/allmydata/test/cli/test_status.py0000644000000000000000000001672713615410400020055 0ustar00""" Ported to Python 3. """ from six import ensure_text import os import tempfile from io import BytesIO, StringIO from os.path import join from twisted.trial import unittest from twisted.internet import defer from allmydata.mutable.publish import MutableData from allmydata.scripts.common_http import BadResponse from allmydata.scripts.tahoe_status import _handle_response_for_fragment from allmydata.scripts.tahoe_status import _get_request_parameters_for_fragment from allmydata.scripts.tahoe_status import pretty_progress from allmydata.scripts.tahoe_status import do_status from allmydata.web.status import marshal_json from allmydata.immutable.upload import UploadStatus from allmydata.immutable.downloader.status import DownloadStatus from allmydata.mutable.publish import PublishStatus from allmydata.mutable.retrieve import RetrieveStatus from allmydata.mutable.servermap import UpdateStatus from allmydata.util import jsonbytes as json from ..no_network import GridTestMixin from ..common_web import do_http from .common import CLITestMixin class FakeStatus(object): def __init__(self): self.status = [] def setServiceParent(self, p): pass def get_status(self): return self.status def get_storage_index(self): return None def get_size(self): return None class ProgressBar(unittest.TestCase): def test_ascii0(self): prog = pretty_progress(80.0, size=10, output_ascii=True) self.assertEqual('########. ', prog) def test_ascii1(self): prog = pretty_progress(10.0, size=10, output_ascii=True) self.assertEqual('#. ', prog) def test_ascii2(self): prog = pretty_progress(13.0, size=10, output_ascii=True) self.assertEqual('#o ', prog) def test_ascii3(self): prog = pretty_progress(90.0, size=10, output_ascii=True) self.assertEqual('#########.', prog) def test_unicode0(self): self.assertEqual( pretty_progress(82.0, size=10, output_ascii=False), u'\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u258e ', ) def test_unicode1(self): self.assertEqual( pretty_progress(100.0, size=10, output_ascii=False), u'\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588', ) class _FakeOptions(dict): def __init__(self): self._tmp = tempfile.mkdtemp() os.mkdir(join(self._tmp, 'private'), 0o777) with open(join(self._tmp, 'private', 'api_auth_token'), 'w') as f: f.write('a' * 32) with open(join(self._tmp, 'node.url'), 'w') as f: f.write('localhost:9000') self['node-directory'] = self._tmp self['verbose'] = True self.stdout = StringIO() self.stderr = StringIO() class Integration(GridTestMixin, CLITestMixin, unittest.TestCase): @defer.inlineCallbacks def setUp(self): yield super(Integration, self).setUp() self.basedir = "cli/status" self.set_up_grid() # upload something c0 = self.g.clients[0] data = MutableData(b"data" * 100) filenode = yield c0.create_mutable_file(data) self.uri = filenode.get_uri() # make sure our web-port is actually answering yield do_http("get", 'http://127.0.0.1:{}/status?t=json'.format(self.client_webports[0])) def test_simple(self): d = self.do_cli('status')# '--verbose') def _check(ign): code, stdout, stderr = ign self.assertEqual(code, 0, stderr) self.assertTrue('Skipped 1' in stdout) d.addCallback(_check) return d @defer.inlineCallbacks def test_help(self): rc, _, _ = yield self.do_cli('status', '--help') self.assertEqual(rc, 0) class CommandStatus(unittest.TestCase): """ These tests just exercise the renderers and ensure they don't catastrophically fail. """ def setUp(self): self.options = _FakeOptions() def test_no_operations(self): values = [ StringIO(ensure_text(json.dumps({ "active": [], "recent": [], }))), StringIO(ensure_text(json.dumps({ "counters": { "bytes_downloaded": 0, }, "stats": { "node.uptime": 0, } }))), ] def do_http(*args, **kw): return values.pop(0) do_status(self.options, do_http) def test_simple(self): recent_items = active_items = [ UploadStatus(), DownloadStatus(b"abcd", 12345), PublishStatus(), RetrieveStatus(), UpdateStatus(), FakeStatus(), ] values = [ BytesIO(json.dumps({ "active": list( marshal_json(item) for item in active_items ), "recent": list( marshal_json(item) for item in recent_items ), }).encode("utf-8")), BytesIO(json.dumps({ "counters": { "bytes_downloaded": 0, }, "stats": { "node.uptime": 0, } }).encode("utf-8")), ] def do_http(*args, **kw): return values.pop(0) do_status(self.options, do_http) def test_fetch_error(self): def do_http(*args, **kw): raise RuntimeError("boom") do_status(self.options, do_http) class JsonHelpers(unittest.TestCase): def test_bad_response(self): def do_http(*args, **kw): return with self.assertRaises(RuntimeError) as ctx: _handle_response_for_fragment( BadResponse('the url', 'some err'), 'http://localhost:1234', ) self.assertIn( "Failed to get", str(ctx.exception), ) def test_happy_path(self): resp = _handle_response_for_fragment( StringIO('{"some": "json"}'), 'http://localhost:1234/', ) self.assertEqual(resp, dict(some='json')) def test_happy_path_post(self): resp = _handle_response_for_fragment( StringIO('{"some": "json"}'), 'http://localhost:1234/', ) self.assertEqual(resp, dict(some='json')) def test_no_data_returned(self): with self.assertRaises(RuntimeError) as ctx: _handle_response_for_fragment(StringIO('null'), 'http://localhost:1234') self.assertIn('No data from', str(ctx.exception)) def test_no_post_args(self): with self.assertRaises(ValueError) as ctx: _get_request_parameters_for_fragment( {'node-url': 'http://localhost:1234'}, '/fragment', method='POST', post_args=None, ) self.assertIn( "Must pass post_args", str(ctx.exception), ) def test_post_args_for_get(self): with self.assertRaises(ValueError) as ctx: _get_request_parameters_for_fragment( {'node-url': 'http://localhost:1234'}, '/fragment', method='GET', post_args={'foo': 'bar'} ) self.assertIn( "only valid for POST", str(ctx.exception), ) tahoe_lafs-1.20.0/src/allmydata/test/cli/wormholetesting.py0000644000000000000000000002410313615410400020710 0ustar00""" An in-memory implementation of some of the magic-wormhole interfaces for use by automated tests. For example:: async def peerA(mw): wormhole = mw.create("myapp", "wss://myserver", reactor) code = await wormhole.get_code() print(f"I have a code: {code}") message = await wormhole.when_received() print(f"I have a message: {message}") async def local_peerB(helper, mw): peerA_wormhole = await helper.wait_for_wormhole("myapp", "wss://myserver") code = await peerA_wormhole.when_code() peerB_wormhole = mw.create("myapp", "wss://myserver") peerB_wormhole.set_code(code) peerB_wormhole.send_message("Hello, peer A") # Run peerA against local_peerB with pure in-memory message passing. server, helper = memory_server() run(gather(peerA(server), local_peerB(helper, server))) # Run peerA against a peerB somewhere out in the world, using a real # wormhole relay server somewhere. import wormhole run(peerA(wormhole)) """ from __future__ import annotations __all__ = ['MemoryWormholeServer', 'TestingHelper', 'memory_server', 'IWormhole'] from typing import Iterator, Optional, List, Tuple, Any, TextIO from inspect import getfullargspec from itertools import count from sys import stderr from attrs import frozen, define, field, Factory from twisted.internet.defer import Deferred, DeferredQueue, succeed from wormhole._interfaces import IWormhole from wormhole.wormhole import create from zope.interface import implementer WormholeCode = str WormholeMessage = bytes AppId = str RelayURL = str ApplicationKey = Tuple[RelayURL, AppId] @define class MemoryWormholeServer(object): """ A factory for in-memory wormholes. :ivar _apps: Wormhole state arranged by the application id and relay URL it belongs to. :ivar _waiters: Observers waiting for a wormhole to be created for a specific application id and relay URL combination. """ _apps: dict[ApplicationKey, _WormholeApp] = field(default=Factory(dict)) _waiters: dict[ApplicationKey, Deferred[IWormhole]] = field(default=Factory(dict)) def create( self, appid: str, relay_url: str, reactor: Any, # Unfortunately we need a mutable default to match the real API versions: Any={}, # noqa: B006 delegate: Optional[Any]=None, journal: Optional[Any]=None, tor: Optional[Any]=None, timing: Optional[Any]=None, stderr: TextIO=stderr, _eventual_queue: Optional[Any]=None, _enable_dilate: bool=False, ) -> _MemoryWormhole: """ Create a wormhole. It will be able to connect to other wormholes created by this instance (and constrained by the normal appid/relay_url rules). """ if tor is not None: raise ValueError("Cannot deal with Tor right now.") if _enable_dilate: raise ValueError("Cannot deal with dilation right now.") key = (relay_url, appid) wormhole = _MemoryWormhole(self._view(key)) if key in self._waiters: self._waiters.pop(key).callback(wormhole) return wormhole def _view(self, key: ApplicationKey) -> _WormholeServerView: """ Created a view onto this server's state that is limited by a certain appid/relay_url pair. """ return _WormholeServerView(self, key) @frozen class TestingHelper(object): """ Provide extra functionality for interacting with an in-memory wormhole implementation. This is intentionally a separate API so that it is not confused with proper public interface of the real wormhole implementation. """ _server: MemoryWormholeServer async def wait_for_wormhole(self, appid: AppId, relay_url: RelayURL) -> IWormhole: """ Wait for a wormhole to appear at a specific location. :param appid: The appid that the resulting wormhole will have. :param relay_url: The URL of the relay at which the resulting wormhole will presume to be created. :return: The first wormhole to be created which matches the given parameters. """ key = (relay_url, appid) if key in self._server._waiters: raise ValueError(f"There is already a waiter for {key}") d : Deferred[IWormhole] = Deferred() self._server._waiters[key] = d wormhole = await d return wormhole def _verify() -> None: """ Roughly confirm that the in-memory wormhole creation function matches the interface of the real implementation. """ # Poor man's interface verification. a = getfullargspec(create) b = getfullargspec(MemoryWormholeServer.create) # I know it has a `self` argument at the beginning. That's okay. b = b._replace(args=b.args[1:]) # Just compare the same information to check function signature assert a.varkw == b.varkw assert a.args == b.args assert a.varargs == b.varargs assert a.kwonlydefaults == b.kwonlydefaults assert a.defaults == b.defaults _verify() @define class _WormholeApp(object): """ Represent a collection of wormholes that belong to the same appid/relay_url scope. """ wormholes: dict[WormholeCode, IWormhole] = field(default=Factory(dict)) _waiting: dict[WormholeCode, List[Deferred[_MemoryWormhole]]] = field(default=Factory(dict)) _counter: Iterator[int] = field(default=Factory(count)) def allocate_code(self, wormhole: IWormhole, code: Optional[WormholeCode]) -> WormholeCode: """ Allocate a new code for the given wormhole. This also associates the given wormhole with the code for future lookup. Code generation logic is trivial and certainly not good enough for any real use. It is sufficient for automated testing, though. """ if code is None: code = "{}-persnickety-tardigrade".format(next(self._counter)) self.wormholes.setdefault(code, []).append(wormhole) try: waiters = self._waiting.pop(code) except KeyError: pass else: for w in waiters: w.callback(wormhole) return code def wait_for_wormhole(self, code: WormholeCode) -> Deferred[_MemoryWormhole]: """ Return a ``Deferred`` which fires with the next wormhole to be associated with the given code. This is used to let the first end of a wormhole rendezvous with the second end. """ d : Deferred[_MemoryWormhole] = Deferred() self._waiting.setdefault(code, []).append(d) return d @frozen class _WormholeServerView(object): """ Present an interface onto the server to be consumed by individual wormholes. """ _server: MemoryWormholeServer _key: ApplicationKey def allocate_code(self, wormhole: _MemoryWormhole, code: Optional[WormholeCode]) -> WormholeCode: """ Allocate a new code for the given wormhole in the scope associated with this view. """ app = self._server._apps.setdefault(self._key, _WormholeApp()) return app.allocate_code(wormhole, code) def wormhole_by_code(self, code: WormholeCode, exclude: object) -> Deferred[IWormhole]: """ Retrieve all wormholes previously associated with a code. """ app = self._server._apps[self._key] wormholes = app.wormholes[code] try: [wormhole] = list(wormhole for wormhole in wormholes if wormhole != exclude) except ValueError: return app.wait_for_wormhole(code) return succeed(wormhole) @implementer(IWormhole) @define class _MemoryWormhole(object): """ Represent one side of a wormhole as conceived by ``MemoryWormholeServer``. """ _view: _WormholeServerView _code: Optional[WormholeCode] = None _payload: DeferredQueue[WormholeMessage] = field(default=Factory(DeferredQueue)) _waiting_for_code: list[Deferred[WormholeCode]] = field(default=Factory(list)) def allocate_code(self) -> None: if self._code is not None: raise ValueError( "allocate_code used with a wormhole which already has a code" ) self._code = self._view.allocate_code(self, None) waiters = self._waiting_for_code self._waiting_for_code = [] for d in waiters: d.callback(self._code) def set_code(self, code: WormholeCode) -> None: if self._code is None: self._code = code self._view.allocate_code(self, code) else: raise ValueError("set_code used with a wormhole which already has a code") def when_code(self) -> Deferred[WormholeCode]: if self._code is None: d : Deferred[WormholeCode] = Deferred() self._waiting_for_code.append(d) return d return succeed(self._code) def get_welcome(self) -> Deferred[str]: return succeed("welcome") def send_message(self, payload: WormholeMessage) -> None: self._payload.put(payload) def when_received(self) -> Deferred[WormholeMessage]: if self._code is None: raise ValueError( "This implementation requires set_code or allocate_code " "before when_received." ) d = self._view.wormhole_by_code(self._code, exclude=self) def got_wormhole(wormhole: _MemoryWormhole) -> Deferred[WormholeMessage]: msg: Deferred[WormholeMessage] = wormhole._payload.get() return msg d.addCallback(got_wormhole) return d get_message = when_received def close(self) -> None: pass # 0.9.2 compatibility def get_code(self) -> Deferred[WormholeCode]: if self._code is None: self.allocate_code() return self.when_code() get = when_received def memory_server() -> tuple[MemoryWormholeServer, TestingHelper]: """ Create a paired in-memory wormhole server and testing helper. """ server = MemoryWormholeServer() return server, TestingHelper(server) tahoe_lafs-1.20.0/src/allmydata/test/data/lease_checker.history.txt0000644000000000000000000001102013615410400022256 0ustar00(dp0 I363 (dp1 Vconfigured-expiration-mode p2 (S'age' p3 NN(S'immutable' p4 S'mutable' p5 tp6 tp7 sVexpiration-enabled p8 I00 sVleases-per-share-histogram p9 (dp10 I1 I39774 ssVlease-age-histogram p11 (lp12 (I0 I86400 I3125 tp13 a(I345600 I432000 I4175 tp14 a(I950400 I1036800 I141 tp15 a(I1036800 I1123200 I345 tp16 a(I1123200 I1209600 I81 tp17 a(I1296000 I1382400 I1832 tp18 a(I1555200 I1641600 I390 tp19 a(I1728000 I1814400 I12 tp20 a(I2073600 I2160000 I84 tp21 a(I2160000 I2246400 I228 tp22 a(I2246400 I2332800 I75 tp23 a(I2592000 I2678400 I644 tp24 a(I2678400 I2764800 I273 tp25 a(I2764800 I2851200 I94 tp26 a(I2851200 I2937600 I97 tp27 a(I3196800 I3283200 I143 tp28 a(I3283200 I3369600 I48 tp29 a(I4147200 I4233600 I374 tp30 a(I4320000 I4406400 I534 tp31 a(I5270400 I5356800 I1005 tp32 a(I6739200 I6825600 I8704 tp33 a(I6825600 I6912000 I3986 tp34 a(I6912000 I6998400 I7592 tp35 a(I6998400 I7084800 I2607 tp36 a(I7689600 I7776000 I35 tp37 a(I8035200 I8121600 I33 tp38 a(I8294400 I8380800 I54 tp39 a(I8640000 I8726400 I45 tp40 a(I8726400 I8812800 I27 tp41 a(I8812800 I8899200 I12 tp42 a(I9763200 I9849600 I77 tp43 a(I9849600 I9936000 I91 tp44 a(I9936000 I10022400 I1210 tp45 a(I10022400 I10108800 I45 tp46 a(I10108800 I10195200 I186 tp47 a(I10368000 I10454400 I113 tp48 a(I10972800 I11059200 I21 tp49 a(I11232000 I11318400 I5 tp50 a(I11318400 I11404800 I19 tp51 a(I11404800 I11491200 I238 tp52 a(I11491200 I11577600 I159 tp53 a(I11750400 I11836800 I1 tp54 a(I11836800 I11923200 I32 tp55 a(I11923200 I12009600 I192 tp56 a(I12009600 I12096000 I222 tp57 a(I12096000 I12182400 I18 tp58 a(I12182400 I12268800 I224 tp59 a(I12268800 I12355200 I9 tp60 a(I12355200 I12441600 I9 tp61 a(I12441600 I12528000 I10 tp62 a(I12528000 I12614400 I6 tp63 a(I12614400 I12700800 I6 tp64 a(I12700800 I12787200 I18 tp65 a(I12787200 I12873600 I6 tp66 a(I12873600 I12960000 I62 tp67 asVcycle-start-finish-times p68 (F1634446505.241972 F1634446666.055401 tp69 sVspace-recovered p70 (dp71 Vexamined-buckets-immutable p72 I17896 sVconfigured-buckets-mutable p73 I0 sVexamined-shares-mutable p74 I2473 sVoriginal-shares-mutable p75 I1185 sVconfigured-buckets-immutable p76 I0 sVoriginal-shares-immutable p77 I27457 sVoriginal-diskbytes-immutable p78 I2810982400 sVexamined-shares-immutable p79 I37301 sVoriginal-buckets p80 I14047 sVactual-shares-immutable p81 I0 sVconfigured-shares p82 I0 sVoriginal-buckets-mutable p83 I691 sVactual-diskbytes p84 I4096 sVactual-shares-mutable p85 I0 sVconfigured-buckets p86 I1 sVexamined-buckets-unknown p87 I14 sVactual-sharebytes p88 I0 sVoriginal-shares p89 I28642 sVactual-buckets-immutable p90 I0 sVoriginal-sharebytes p91 I2695552941 sVexamined-sharebytes-immutable p92 I2754798505 sVactual-shares p93 I0 sVactual-sharebytes-immutable p94 I0 sVoriginal-diskbytes p95 I2818981888 sVconfigured-diskbytes-mutable p96 I0 sVconfigured-sharebytes-immutable p97 I0 sVconfigured-shares-mutable p98 I0 sVactual-diskbytes-immutable p99 I0 sVconfigured-diskbytes-immutable p100 I0 sVoriginal-diskbytes-mutable p101 I7995392 sVactual-sharebytes-mutable p102 I0 sVconfigured-sharebytes p103 I0 sVexamined-shares p104 I39774 sVactual-diskbytes-mutable p105 I0 sVactual-buckets p106 I1 sVoriginal-buckets-immutable p107 I13355 sVconfigured-sharebytes-mutable p108 I0 sVexamined-sharebytes p109 I2763646972 sVoriginal-sharebytes-immutable p110 I2692076909 sVoriginal-sharebytes-mutable p111 I3476032 sVactual-buckets-mutable p112 I0 sVexamined-buckets-mutable p113 I1286 sVconfigured-shares-immutable p114 I0 sVexamined-diskbytes p115 I2854801408 sVexamined-diskbytes-mutable p116 I12161024 sVexamined-sharebytes-mutable p117 I8848467 sVexamined-buckets p118 I19197 sVconfigured-diskbytes p119 I4096 sVexamined-diskbytes-immutable p120 I2842640384 ssVcorrupt-shares p121 (lp122 (V2dn6xnlnsqwtnapwxfdivpm3s4 p123 I3 tp124 a(g123 I0 tp125 a(V2rrzthwsrrxolevmwdvbdy3rqi p126 I3 tp127 a(g126 I0 tp128 a(V2skfngcto6h7eqmn4uo7ntk3ne p129 I3 tp130 a(g129 I0 tp131 a(V32d5swqpqx2mwix7xmqzvhdwje p132 I3 tp133 a(g132 I0 tp134 a(V5mmayp66yflmpon3o6unsnbaca p135 I3 tp136 a(g135 I0 tp137 a(V6ixhpvbtre7fnrl6pehlrlflc4 p138 I3 tp139 a(g138 I0 tp140 a(Vewzhvswjsz4vp2bqkb6mi3bz2u p141 I3 tp142 a(g141 I0 tp143 a(Vfu7pazf6ogavkqj6z4q5qqex3u p144 I3 tp145 a(g144 I0 tp146 a(Vhbyjtqvpcimwxiyqbcbbdn2i4a p147 I3 tp148 a(g147 I0 tp149 a(Vpmcjbdkbjdl26k3e6yja77femq p150 I3 tp151 a(g150 I0 tp152 a(Vr6swof4v2uttbiiqwj5pi32cm4 p153 I3 tp154 a(g153 I0 tp155 a(Vt45v5akoktf53evc2fi6gwnv6y p156 I3 tp157 a(g156 I0 tp158 a(Vy6zb4faar3rdvn3e6pfg4wlotm p159 I3 tp160 a(g159 I0 tp161 a(Vz3yghutvqoqbchjao4lndnrh3a p162 I3 tp163 a(g162 I0 tp164 ass.tahoe_lafs-1.20.0/src/allmydata/test/data/lease_checker.state.txt0000644000000000000000000001156113615410400021707 0ustar00(dp1 S'last-complete-prefix' p2 NsS'version' p3 I1 sS'current-cycle-start-time' p4 F1635003106.611748 sS'last-cycle-finished' p5 I312 sS'cycle-to-date' p6 (dp7 Vleases-per-share-histogram p8 (dp9 I1 I36793 sI2 I1 ssVspace-recovered p10 (dp11 Vexamined-buckets-immutable p12 I17183 sVconfigured-buckets-mutable p13 I0 sVexamined-shares-mutable p14 I1796 sVoriginal-shares-mutable p15 I1563 sVconfigured-buckets-immutable p16 I0 sVoriginal-shares-immutable p17 I27926 sVoriginal-diskbytes-immutable p18 I431149056 sVexamined-shares-immutable p19 I34998 sVoriginal-buckets p20 I14661 sVactual-shares-immutable p21 I0 sVconfigured-shares p22 I0 sVoriginal-buckets-immutable p23 I13761 sVactual-diskbytes p24 I4096 sVactual-shares-mutable p25 I0 sVconfigured-buckets p26 I1 sVexamined-buckets-unknown p27 I14 sVactual-sharebytes p28 I0 sVoriginal-shares p29 I29489 sVoriginal-sharebytes p30 I312664812 sVexamined-sharebytes-immutable p31 I383801602 sVactual-shares p32 I0 sVactual-sharebytes-immutable p33 I0 sVoriginal-diskbytes p34 I441643008 sVconfigured-diskbytes-mutable p35 I0 sVconfigured-sharebytes-immutable p36 I0 sVconfigured-shares-mutable p37 I0 sVactual-diskbytes-immutable p38 I0 sVconfigured-diskbytes-immutable p39 I0 sVoriginal-diskbytes-mutable p40 I10489856 sVactual-sharebytes-mutable p41 I0 sVconfigured-sharebytes p42 I0 sVexamined-shares p43 I36794 sVactual-diskbytes-mutable p44 I0 sVactual-buckets p45 I1 sVoriginal-buckets-mutable p46 I899 sVconfigured-sharebytes-mutable p47 I0 sVexamined-sharebytes p48 I390369660 sVoriginal-sharebytes-immutable p49 I308125753 sVoriginal-sharebytes-mutable p50 I4539059 sVactual-buckets-mutable p51 I0 sVexamined-diskbytes-mutable p52 I9154560 sVexamined-buckets-mutable p53 I1043 sVconfigured-shares-immutable p54 I0 sVexamined-diskbytes p55 I476598272 sVactual-buckets-immutable p56 I0 sVexamined-sharebytes-mutable p57 I6568058 sVexamined-buckets p58 I18241 sVconfigured-diskbytes p59 I4096 sVexamined-diskbytes-immutable p60 I467443712 ssVcorrupt-shares p61 (lp62 (V2dn6xnlnsqwtnapwxfdivpm3s4 p63 I4 tp64 a(g63 I1 tp65 a(V2rrzthwsrrxolevmwdvbdy3rqi p66 I4 tp67 a(g66 I1 tp68 a(V2skfngcto6h7eqmn4uo7ntk3ne p69 I4 tp70 a(g69 I1 tp71 a(V32d5swqpqx2mwix7xmqzvhdwje p72 I4 tp73 a(g72 I1 tp74 a(V5mmayp66yflmpon3o6unsnbaca p75 I4 tp76 a(g75 I1 tp77 a(V6ixhpvbtre7fnrl6pehlrlflc4 p78 I4 tp79 a(g78 I1 tp80 a(Vewzhvswjsz4vp2bqkb6mi3bz2u p81 I4 tp82 a(g81 I1 tp83 a(Vfu7pazf6ogavkqj6z4q5qqex3u p84 I4 tp85 a(g84 I1 tp86 a(Vhbyjtqvpcimwxiyqbcbbdn2i4a p87 I4 tp88 a(g87 I1 tp89 a(Vpmcjbdkbjdl26k3e6yja77femq p90 I4 tp91 a(g90 I1 tp92 a(Vr6swof4v2uttbiiqwj5pi32cm4 p93 I4 tp94 a(g93 I1 tp95 a(Vt45v5akoktf53evc2fi6gwnv6y p96 I4 tp97 a(g96 I1 tp98 a(Vy6zb4faar3rdvn3e6pfg4wlotm p99 I4 tp100 a(g99 I1 tp101 a(Vz3yghutvqoqbchjao4lndnrh3a p102 I4 tp103 a(g102 I1 tp104 asVlease-age-histogram p105 (dp106 (I45619200 I45705600 tp107 I4 s(I12441600 I12528000 tp108 I78 s(I11923200 I12009600 tp109 I89 s(I33436800 I33523200 tp110 I7 s(I37411200 I37497600 tp111 I4 s(I38361600 I38448000 tp112 I5 s(I4665600 I4752000 tp113 I256 s(I11491200 I11577600 tp114 I20 s(I10713600 I10800000 tp115 I183 s(I42076800 I42163200 tp116 I4 s(I47865600 I47952000 tp117 I7 s(I3110400 I3196800 tp118 I328 s(I5788800 I5875200 tp119 I954 s(I9331200 I9417600 tp120 I12 s(I7430400 I7516800 tp121 I7228 s(I1555200 I1641600 tp122 I492 s(I37929600 I38016000 tp123 I3 s(I38880000 I38966400 tp124 I3 s(I12528000 I12614400 tp125 I193 s(I10454400 I10540800 tp126 I1239 s(I11750400 I11836800 tp127 I7 s(I950400 I1036800 tp128 I4435 s(I44409600 I44496000 tp129 I13 s(I12787200 I12873600 tp130 I218 s(I10368000 I10454400 tp131 I117 s(I3283200 I3369600 tp132 I86 s(I7516800 I7603200 tp133 I993 s(I42336000 I42422400 tp134 I33 s(I46310400 I46396800 tp135 I1 s(I39052800 I39139200 tp136 I51 s(I7603200 I7689600 tp137 I2004 s(I10540800 I10627200 tp138 I16 s(I36374400 I36460800 tp139 I3 s(I3369600 I3456000 tp140 I79 s(I12700800 I12787200 tp141 I25 s(I4838400 I4924800 tp142 I386 s(I10972800 I11059200 tp143 I122 s(I8812800 I8899200 tp144 I57 s(I38966400 I39052800 tp145 I61 s(I3196800 I3283200 tp146 I628 s(I9244800 I9331200 tp147 I73 s(I30499200 I30585600 tp148 I5 s(I12009600 I12096000 tp149 I329 s(I12960000 I13046400 tp150 I8 s(I12614400 I12700800 tp151 I210 s(I3801600 I3888000 tp152 I32 s(I10627200 I10713600 tp153 I43 s(I44928000 I45014400 tp154 I2 s(I8208000 I8294400 tp155 I38 s(I8640000 I8726400 tp156 I32 s(I7344000 I7430400 tp157 I12689 s(I49075200 I49161600 tp158 I19 s(I2764800 I2851200 tp159 I76 s(I2592000 I2678400 tp160 I40 s(I2073600 I2160000 tp161 I388 s(I37497600 I37584000 tp162 I11 s(I1641600 I1728000 tp163 I78 s(I12873600 I12960000 tp164 I5 s(I1814400 I1900800 tp165 I1860 s(I40176000 I40262400 tp166 I1 s(I3715200 I3801600 tp167 I104 s(I2332800 I2419200 tp168 I12 s(I2678400 I2764800 tp169 I278 s(I12268800 I12355200 tp170 I2 s(I28771200 I28857600 tp171 I6 s(I41990400 I42076800 tp172 I10 sssS'last-complete-bucket' p173 NsS'current-cycle' p174 Ns.tahoe_lafs-1.20.0/src/allmydata/test/data/openssh-rsa-2048.pub.txt0000644000000000000000000000061413615410400021432 0ustar00ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDx5JfaPwE2wfXIQcmlGte9EPAbrTmHPGOF/PuZ71XPa3mZTHMQQuc959gmLxupmcc5o4jYe8VTwT6bbNl6YM+HmCvL3XVH0BqdM2lpKCTB/WzSAyFUv8gSjQVXekRm9wF69tZkPrudqutTLhqXU5ESiUzfhU+CxHQW+kAf10Yd9R68V1f8jkuWjEoeVfCltj7O5fRlpouoTXn83MUAXB3J/wDjpjnjp2PxvXL2x5aCHtzd1WCGEmtWbHZvRA1a0EE233zfXNHg4xLd3ycUqAxoRlCcC230itUBXtr4qgDMzRdsL+HGWrcJ+4yezlQj+l8mc7vi5shNT7HDRfvi/rE7 exarkun@baryon tahoe_lafs-1.20.0/src/allmydata/test/data/openssh-rsa-2048.txt0000644000000000000000000000343713615410400020653 0ustar00-----BEGIN OPENSSH PRIVATE KEY----- b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAABFwAAAAdzc2gtcn NhAAAAAwEAAQAAAQEA8eSX2j8BNsH1yEHJpRrXvRDwG605hzxjhfz7me9Vz2t5mUxzEELn PefYJi8bqZnHOaOI2HvFU8E+m2zZemDPh5gry911R9AanTNpaSgkwf1s0gMhVL/IEo0FV3 pEZvcBevbWZD67narrUy4al1OREolM34VPgsR0FvpAH9dGHfUevFdX/I5LloxKHlXwpbY+ zuX0ZaaLqE15/NzFAFwdyf8A46Y546dj8b1y9seWgh7c3dVghhJrVmx2b0QNWtBBNt9831 zR4OMS3d8nFKgMaEZQnAtt9IrVAV7a+KoAzM0XbC/hxlq3CfuMns5UI/pfJnO74ubITU+x w0X74v6xOwAAA8gG6fYoBun2KAAAAAdzc2gtcnNhAAABAQDx5JfaPwE2wfXIQcmlGte9EP AbrTmHPGOF/PuZ71XPa3mZTHMQQuc959gmLxupmcc5o4jYe8VTwT6bbNl6YM+HmCvL3XVH 0BqdM2lpKCTB/WzSAyFUv8gSjQVXekRm9wF69tZkPrudqutTLhqXU5ESiUzfhU+CxHQW+k Af10Yd9R68V1f8jkuWjEoeVfCltj7O5fRlpouoTXn83MUAXB3J/wDjpjnjp2PxvXL2x5aC Htzd1WCGEmtWbHZvRA1a0EE233zfXNHg4xLd3ycUqAxoRlCcC230itUBXtr4qgDMzRdsL+ HGWrcJ+4yezlQj+l8mc7vi5shNT7HDRfvi/rE7AAAAAwEAAQAAAQBc8ukC/RjbULbAJ79z SRhDV2HcULj9ZVAc6XRI13XSyUqlhIHmar7uw8sECTAJAMVUOanY/d56a5RCJxZ+dvrn8K pLoSJy4N2JMHs95CYTwOzy2i8RoMwhjLzTu3DTW/DerkD9rjlrwYTBpsKjCYKCa+31KgW+ ivzM44aGdbNEyO+yHaxdcyEr3OLcRMppgZmwTieFnG053lCP5XyYRQmZ1a78G6WOzpOgbO 2N6Z1sbEqTMVd3oxFZAbmqA8kE4jLJzRcso/SSK5NDs22JzMfxByJQSlitWzDDvHdWpQpy 8C6Eu7+48ataLI68VOOXuDWDy9Dck0ev89u7Z4vNLWBhAAAAgAndOZZ0C179Um6sn6gmfM 0ttXEaSIqYNGRhkoYqn9vvw03bOMbSnqdEJiwFhbE/rWv7PypB5MeY7tRoCyBMWsUYj0pA HKSl68diLr5g5EOIRGAWu8e//7T2HgZKOo+VaG1IXgmb7PUoAJ6Tzsmb4jdnYfg+BP/TDd e9yCcoiT2fAAAAgQD6T7Kr6ECg0ME8vt/ixsjKdA2zS9SIHyjCMXbdMv1Ok1hkr5rRWbbZ jm79fF+a8pOQUg30Qw2JUx7II50akt2xL6zesGDDUcOHD2GE/B6Ftji53G3fwWZCqeQ5sD YP25qAWlrqDBGJvF+hkEdlceS8etYJ3XWXjNIYwfR7frQvkQAAAIEA92Pq3FWH63TS3Lqe mQjhfNV75tU0AwENG+xlI1g0nQb7Qsdbm6rIg6XqewUfw03Q+/AqPvwG/1mbyVF7jRZ+qw cl69yM70c9qY74GHjIIOOcC8Kgv29LQrm/VqVp0Lesn5RA8SIiLcMfyYBTEX8V9VY99Zkd v6WwRr4XK1bPRgsAAAAOZXhhcmt1bkBiYXJ5b24BAgMEBQ== -----END OPENSSH PRIVATE KEY----- tahoe_lafs-1.20.0/src/allmydata/test/data/openssl-rsa-2048-2.txt0000644000000000000000000000321313615410400021006 0ustar00-----BEGIN RSA PRIVATE KEY----- MIIEowIBAAKCAQEAygMjLBKayDEioOZap2syJhUlqI7Dkk4zV5TfVxlQFO7bR410 eJRJY1rHGIeZxQPjytsSJvqlYEJrvvVNdhi6XN/6NA3RFL6pDTHkYyM3qbrXqlYC HUlkS2JAZzIFRizl6nG11yIbHjPsoG+vGSjGSzVIiOP4NeIssYLpoASTIppdZxy+ syZ6zSmPhZu7W9X73aupLjFrIZpjeKfO2+GfUwEzAH0HckLIgJpQ+vK3sqbSik/2 1oZK33M8uvtdmba7D3uJXmxWMTJ7oyFLDpDOMl7HSUv1lZY2O2qiDPYfGDUM1BRp 6blxE+BA2INr9NO4A4H8pzhikFnaFnkpH/AxowIDAQABAoIBABprXJ8386w42NmI JtT8bPuUCm/H9AXfWlGa87aVZebG8kCiXFgktJBc3+ryWQbuIk12ZyJX52b2aNb5 h97pDv50gGlsYSrAYKWMH91jTrVQ7UGmq/IelhJR0DBu10e9OXh21JxFJpzFl63H zXOR5JUTa+ATSHPrl4LDp0A5OPDuWbBWa64yx7gUI9/tljbndplCrPjmIE6+h10M sqxW5oJpLnZpWc73QQUTuPIr+A7fLgGJYHnyCFUu9OW4ZnxNEI3/wNHPvoxkYuHN 2qVonFESiAx9mBv7JzQ7X2KIB8doY3KL6S7sAKi/i/aP7EDJ9QEtl3BR3M8/XP8E KJVORWECgYEA8Vbw75+aVMxHUl9BJc1zESxqVvr+R0NBqMO47CBj39sTJkXY37O3 A7j4dzCorI0NaB7Jr+AI2ZZu9CaR31Y2mhAGbNLBPK8yn0Z7iWyDIqOW1OpMDs35 h2CI1pFLjx1a3PzhsQdzZ68izWKYBdTs2scaFz/ntaPwwPEwORaMDZECgYEA1kie YfMRJ2GwzvbR35WvEMhVxhnmA6yuRL15Pkb1WDR3iWGM0ld/u3N4sRVCx1nU4wk/ MMqCRdm4JaxqzR/hl8+/sp3Aai15ecqR+F+ecwbbB2XKVHfi1nqClivYnB+GgCh1 bQYUd9LT80sIQdBEW5MBdbMFnOkt+1sSpjf1wfMCgYBAavlyrIJQQhqDdSN5iKY/ HkDgKKy4rs4W0u9IL7kY5mvtGlWyGFEwcC35+oX7UMcUVKt3A3C5S3sgNi9XkraO VtqwL20e2pDDjNeqrcku9MVs3YEhrn79UJoV08B8WdSICgPf8eIu+cNrWPbFD7mN B/oB3K/nfvPjPD2n70nA0QKBgGWJN3NWR9SPV8ZZ8gyt0qxzISGjd/hZxKHR3jeC TBMlmVbBoIay61WZW6EdX+0yRcvmv8iQzLXoendvgZP8/VqAGGe8lEY7kgoB0LUO Kfh7USHqO7tWq2fR2TrrP9KKpaLoiOvGK8CzZ7cq4Ji+5QU3XUO2NnypiR5Hg0i7 z3m9AoGBAIEXtoSR9OTwdmrdIQn3vsaFOkN5pyYfvAvdeZ+7wwMg/ZOwhStwctbI Um7XqocXU+8f/gjczgLgMJj+zqr+QDH5n4vSTUMPeN0gIugI9UwWnc2rhbRCgDdY W6SwPQGDuGoUa5PxjggkyevUUmtXvGG9jnkt9kozQOA0lOF1vbw/ -----END RSA PRIVATE KEY----- tahoe_lafs-1.20.0/src/allmydata/test/data/openssl-rsa-2048-3.txt0000644000000000000000000000321313615410400021007 0ustar00-----BEGIN RSA PRIVATE KEY----- MIIEowIBAAKCAQEAoa9i8v9YIzb+3yRHyXLm4j1eWK9lQc6lFwoQhik8y+joD+5A v73OlDZAcn6vzlU72vwrJ1f4o54nEVm0rhNrhwCsiHCdxxEDEoqZ8w/19vc4hWj4 SYwGirhcnyb2ysZSV8v9Lm5HiFe5zZM4jzCzf2rzt0YRlZZj9nhSglaiHZ9BE2e0 vzOl6GePDz6yS4jbh2RsPsDQtqXNOqZwfGUd+iTsbSxXcm8+rNrT1VAbx6+1Sr0r aDyc/jp8S1JwJ0ofJLsU3Pb6DYazFf12CNTsrKF1L0hAsbN8v2DSunZIQqQLQGfp 0hnNO9V8q9FjvVu8XY/HhgoTvtESU3vuq+BnIwIDAQABAoIBAGpWDP+/y9mtK8bZ 95SXyx10Ov6crD2xiIY0ilWR/XgmP6lqio8QaDK104D5rOpIyErnmgIQK2iAdTVG CDyMbSWm3dIGLt5jY9/n5AQltSCtyzCCrvi/7PWC9vd9Csal1DYF5QeKY+VZvMtl Tcduwj7EunEI1jvJYwkQbUNncsuDi+88/JNwa8DJp1IrR4goxNflGl7mNzfq49re lhSyezfLSTZKDa3A6sYnNFAAOy82iXZuLXCqKuwRuaiFFilB0R0/egzBSUeBwMJk sS+SvHHXwv9HsYt4pYiiZFm8HxB4NKYtdpHpvJVJcG9vOXjewnA5YHWVDJsrBfu6 0kPgbcECgYEA0bqfX2Vc6DizwjWVn9yVlckjQNGTnwf/B9eGW2MgTn6YADe0yjFm KCtr34hEZc/hv3kBnoLOqSvZJiser8ve3SmwxfmpjEfJdIgA5J5DbCEGBiDm9PMy 0lYsfjykzYykehdasb8f4xd+SPMuTC/CFb1MCTlohex7qn7Xt9IskBECgYEAxVtF iXwFJPQUil2bSFGnxtaI/8ijypLOkP3CyuVnEcbMt74jDt1hdooRxjQ9VVlg7r7i EvebPKMukWxdVcQ/38i97oB/oN7MIH0QBCDWTdTQokuNQSEknGLouj6YtLAWRcyJ 9DDENSaGtP42le5dD60hZc732jN09fGxNa6gN/MCgYB5ux98CGJ3q0mzBNUW17q/ GOLsYXiUitidHZyveIas6M+i+LJn1WpdEG7pbLd+fL2kHEEzVutKx9efTtHd6bAu oF8pWfLuKFCm4bXa/H1XyocrkXdcX7h0222xy9NAN0zUTK/okW2Zqu4yu2t47xNw +NGkXPztFsjkugDNgiE5cQKBgQDDy/BqHPORnOIAACw9jF1SpKcYdPsiz5FGQawO 1ZbzCPMzW9y2M6YtD3/gzxUGZv0G/7OUs7h8aTybJBJZM7FXGHZud2ent0J2/Px1 zAow/3DZgvEp63LCAFL5635ezM/cAbff3r3aKVW9nPOUvf3vvokC01oMTb68/kMc ihoERwKBgFsoRUrgGPSfG1UZt8BpIXbG/8qfoy/Vy77BRqvJ6ZpdM9RPqdAl7Sih cdqfxs8w0NVvj+gvM/1CGO0J9lZW2f1J81haIoyUpiITFdoyzLKXLhMSbaF4Y7Hn yC/N5w3cCLa2LLKoLG8hagFDlXBGSmpT1zgKBk4YxNn6CLdMSzPR -----END RSA PRIVATE KEY----- tahoe_lafs-1.20.0/src/allmydata/test/data/openssl-rsa-2048-4.txt0000644000000000000000000000321313615410400021010 0ustar00-----BEGIN RSA PRIVATE KEY----- MIIEowIBAAKCAQEA2PL5Ry2BGuuUtRJa20WS0fwBOqVIVSXDVuSvZFYTT1Xji19J q+ohHcFnIIYHAq0zQG+NgNjK5rogY/5TfbwIhfwLufleeAdL9jXTfxan0o/wwFA1 DAIHcYsTEYI2dfQe4acOLFY6/Hh6iXCbHvSzzUnEmYkgwCAZvc0v/lD8pMnz/6gQ 2nJnAASfFovcAvfr1T+MZzLJGQem3f2IFp1frurQyFmzFRtZMO5B9PDSsFG4yJVf cz0iSP8wlc9QydImmJGRvu4xEOkx/55B/XaUdb6CIGpCTkLsDOlImvZt9UHDSgXq qcE/T7SYMIXqbep64tJw9enjomH+n1KVh9UA2wIDAQABAoIBABCSTrQ/J5N010EV i9cf810S0M03/tRyM/+ZLESPxp3Sw7TLrIbzNWBee5AibLqpnDaZzsc+yBDjusGo lZwPFt+VJxgnki288PJ3nhYhFuSglhU6izLFnOfxZZ16wsozwYAfEJgWZh8O3N1O uqqcqndN4TSRIu1KBm1XFQlqCkJT/stzYjO4k1vhgZT4pqhYRdx7q7FAap4v+sNs Svhm1blvOXlyeumAbFBdGFttpTxIOGRzI1bp00jcLK4rgssTTxNyEiVu4oJhQY/k 0CptSUzpGio8DZ0/8bNnKCkw8YATUWJZQgSmKraRwAYMMR/SZa7WqjEc2KRTj6xQ pHmYwZECgYEA700a/7ur8+EwTSulLgDveAOtTV0xEbhuq6cJQgNrEp2rbFqie6FX g/YJKzEpEnUvj/yOzhEcw3CdQDUaxndlqY87QIhUWMcsnfMPsM1FjhmfksR8s3TF WZNqa0RAKmcRoLohGclSvRV2OVU8+10mLUwJfR86Nl5+auR3LxWLyB8CgYEA6BaR r+Z7oTlgkdEDVhnQ58Msktv58y28N+VIbYS79bV01jqUUlogm5uTvdvq5nyENXHx gnK88mVzWYBMk83D01HlOC5DhpspTVEQQG2V/If6KZa56mxiHP3Mab9jLew9w/kA g6l/04ATSA8g4i2H/Bz0eEyPEBt6o/+SO0Xv38UCgYEAyTTLvrrNmgF922UXPdcL gp2U2bfBymSIqUuJPTgij0SDHlgWxlyieRImI2ryXdKqayav7BP3W10U2yfLm5RI pokICPqX8Q2HNkdoqf/uu8xPn9gWAc3tIaQRlp+MVBrVd48IxeXA67tf7FT/MVrg /rUwRUQ8bfqF0NrIW46COYECgYAYDJamGoT/DNoD4hutZVlvWpsY0LCS0U9qn1ik +Jcde+MSe9l4uxwb48AocUxi+84bV6ZF9Su9FmQghxnoSu8ay6ar7qdSoGtkNp0v f+uF0nVKr/Kt5vM3u9jdsFZPoOY5k2jJO9wiB2h4FBE9PqiTqFBw0sYUTjSkH8yA VdvoXQKBgFqCC8Y82eVf0/ORGTgG/KhZ72WFQKHyAeryvoLuadZ6JAI6qW9U1l9P 18SMnCO+opGN5GH2Qx7gdg17KzWzTW1gnbv0QUPNnnYEJU8VYMelNuKa8tmNgFH7 inAwsxbbWoR08ai4exzbJrNrLpDRg5ih2wMtknN6D8m+EAvBC/Gj -----END RSA PRIVATE KEY----- tahoe_lafs-1.20.0/src/allmydata/test/data/openssl-rsa-2048.txt0000644000000000000000000000325013615410400020650 0ustar00-----BEGIN PRIVATE KEY----- MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDF1MeXulDWFO05 YXCh8aqNc1dS1ddJRzsti4BOWuDOepUc0oCaSIcC5aR7XJ+vhX7a02mTIwvLcuEH 8sxx0BJU4jCDpRI6aAqaKJxwZx1e6AcVFJDl7vzymhvWhqHuKh0jTvwM2zONWTwV V8m2PbDdxu0Prwdx+Mt2sDT6xHEhJj5fI/GUDUEdkhLJF6DQSulFRqqd0qP7qcI9 fSHZbM7MywfzqFUe8J1+tk4fBh2v7gNzN1INpzh2mDtLPAtxr4ZPtEb/0D0U4PsP CniOHP0U8sF3VY0+K5qoCQr92cLRJvT/vLpQGVNUTFdFrtbqDoFxUCyEH4FUqRDX 2mVrPo2xAgMBAAECggEAA0Ev1y5/1NTPbgytBeIIH3d+v9hwKDbHecVoMwnOVeFJ BZpONrOToovhAc1NXH2wj4SvwYWfpJ1HR9piDAuLeKlnuUu4ffzfE0gQok4E+v4r 2yg9ZcYBs/NOetAYVwbq960tiv/adFRr71E0WqbfS3fBx8q2L3Ujkkhd98PudUhQ izbrTvkT7q00OPCWGwgWepMlLEowUWwZehGI0MlbONg7SbRraZZmG586Iy0tpC3e AM7wC1/ORzFqcRgTIxXizQ5RHL7S0OQPLhbEJbuwPonNjze3p0EP4wNBELZTaVOd xeA22Py4Bh/d1q3aEgbwR7tLyA8YfEzshTaY6oV8AQKBgQD0uFo8pyWk0AWXfjzn jV4yYyPWy8pJA6YfAJAST8m7B/JeYgGlfHxTlNZiB40DsJq08tOZv3HAubgMpFIa reuDxPqo6/Quwdy4Syu+AFhY48KIuwuoegG/L+5qcQLE69r1w71ZV6wUvLmXYX2I Y6nYz+OdpD1JrMIr6Js60XURsQKBgQDO8yWl7ufIDKMbQpbs0PgUQsH4FtzGcP4J j/7/8GfhKYt6rPsrojPHUbAi1+25xBVOuhm0Zx2ku2t+xPIMJoS+15EcER1Z2iHZ Zci9UGpJpUxGcUhG7ETF1HZv0xKHcEOl9eIIOcAP9Vd9DqnGk85gy6ti6MHe/5Tn IMD36OQ8AQKBgQDwqE7NMM67KnslRNaeG47T3F0FQbm3XehCuqnz6BUJYcI+gQD/ fdFB3K+LDcPmKgmqAtaGbxdtoPXXMM0xQXHHTrH15rxmMu1dK0dj/TDkkW7gSZko YHtRSdCbSnGfuBXG9GxD7QzkA8g7j3sE4oXIGoDLqRVAW61DwubMy+jlsQKBgGNB +Zepi1/Gt+BWQt8YpzPIhRIBnShMf3uEphCJdLlo3K4dE2btKBp8UpeTq0CDDJky 5ytAndYp0jf+K/2p59dEuyOUDdjPp5aGnA446JGkB35tzPW/Uoj0C049FVEChl+u HBhH4peE285uXv2QXNbOOMh6zKmxOfDVI9iDyhwBAoGBAIXq2Ar0zDXXaL3ncEKo pXt9BZ8OpJo2pvB1t2VPePOwEQ0wdT+H62fKNY47NiF9+LyS541/ps5Qhv6AmiKJ Z7I0Vb6+sxQljYH/LNW+wc2T/pIAi/7sNcmnlBtZfoVwt99bk2CyoRALPLWHYCkh c7Tty2bZzDZy6aCX+FGRt5N/ -----END PRIVATE KEY----- tahoe_lafs-1.20.0/src/allmydata/test/data/pycryptopp-rsa-1024-priv.txt0000644000000000000000000000151413615410400022366 0ustar00MIICdQIBADANBgkqhkiG9w0BAQEFAASCAl8wggJbAgEAAoGBAJLEAfZueLuT4vUQ1+c8ZM9dJ/LA29CYgA5toaMklQjbVQ2Skywvw1wEkRjhMpjQAx5+lpLTE2xCtqtfkHooMRNnquOxoh0o1Xya60jUHze7VB5QMV7BMKeUTff1hQqpIgw/GLvJRtar53cVY+SYf4SXx2/slDbVr8BI3DPwdeNtAgERAoGABzHD3GTJrteQJRxu+cQ3I0NPwx2IQ/Nlplq1GZDaIQ/FbJY+bhZrdXOswnl4cOcPNjNhu+c1qHGznv0ntayjCGgJ9dDySGqknDau+ezZcBO1JrIpPOABS7MVMst79mn47vB2+t8w5krrBYahAVp/L5kY8k+Pr9AU+L9mbevFW9MCQQDA+bAeMRNBfGc4gvoVV8ecovE1KRksFDlkaDVEOc76zNW6JZazHhQF/zIoMkV81rrg5UBntw3WR3R8A3l9osgDAkEAwrLQICJ3zjsJBt0xEkCBv9tK6IvSIc7MUQIc4J2Y1hiSjqsnTRACRy3UMsODfx/Lg7ITlDbABCLfv3v4D39jzwJBAKpFuYQNLxuqALlkgk8RN6hTiYlCYYE/BXa2TR4U4848RBy3wTSiEarwO1Ck0+afWZlCwFuDZo/kshMSH+dTZS8CQQC3PuIAIHDCGXHoV7W200zwzmSeoba2aEfTxcDTZyZvJi+VVcqi4eQGwbioP4rR/86aEQNeUaWpijv/g7xK0j/RAkBbt2U9bFFcja10KIpgw2bBxDU/c67h4+38lkrBUnM9XVBZxjbtQbnkkeAfOgQDiq3oBDBrHF3/Q8XM0CzZJBWStahoe_lafs-1.20.0/src/allmydata/test/data/pycryptopp-rsa-2048-priv.txt0000644000000000000000000000313113615410400022372 0ustar00MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQC0JwgBbVsI+XlOopqjvBatKkQbJPXuap7Psbe5i4EoMfiYI2PC2UB7GuYeTdE79TvDtmfjFD/RVWA3Y/RTQYQz/lKyCFS4w3wa/TPkZwF1r3OjIMSsCYe2J3W9NV3cK+PVw2A8D2y5DvUIAdO+Mi6aH26p2UV8FTnPqHWvJubrcLQt6979/BQnqKCFJ+SPx4se5XsMZ3vrbs6MCqM2qS9RnNEhexlNrJd1wXezILKsmQdf/QiZiY7LXjEdD6BNG8OYQ2iSbCa8aGEoSPQfdnZZxcTFE02QwKcScZKhU9fRv0Ttqr3i8xiliw9gn4UzptEZO6MVO2BrptS30SjJDXC7AgERAoIBADpI3PFnJPtfxV00m3E1UqFvjoFAqetAnMq5fzR/9RSIo0BHr1Wgo+uXwuuvw7GEC85gqSPR2GlfYuS+dLGGIz3/dRt7KngDAoEzzQYhU0u4w4eZqQp7jcn9tSagUxKGq5f7cfVQSNJ1x77TaibyHiLN7xjVWj67krQf6dbI0j0cYvnxu+4EZbzNdvFw93ddoOZB/dFjLu0kVKVl/mWyCX9GNr2nCSHe9wYipOz5b9WkdD0J2Oy0v8Wkn4y3yOOvo/EgrNYfo4IVslsDo9Yw3Yk32Eml0ZsdwSqu+wM4c+jRbTJ+sBGqci4etPpMhcsH0Vt9+97Lnuan2Jza9xjrL2ECgYEA8wj+/bfjTCXsu22f8V7Z40vJUyM7j4WvUoA9khAQ7qAlnFdqdzq5a7ArA9vRjeN6ya16j36IXCkpT+FGe6YWCsZCKd1ZVy7sZ1Uh7X2hRqf0vxJsSJvG/OmofFUfuwCgLFLKI4SDhHaB+pWAdkAIL4MkJQADg/qVlAdrWoPsfhECgYEAvcNHhSCW010SRudwmTRX5QtndHk/LM6VAgyR0ElarvmG6K5pbpL8MD5CpJ3AhUwKp96SlMsBEG3a9BR5zv6Jvhc/KHxT7W/EjLnV9PSD90+BgHHsTonjg6TayJ9XE6RpO3MqeifVG/2S5WhhFFGGd5KSFnvZwr9ni+LYRuDVpgsCgYEAgKpo4KylgqqqgVgnf8jNtJGIs4sfiDe3K61NxcxFMwl9UsTeAuLaomxTAgr2eEtBAVvXeSTex2EV3v7K9irAYA6bf5NNamQizUswFFGRneByg0X9F2GHdtYN53hcF7UJgOCJIdy+GPNx/SH4txLXKDZebfDyzWaLbHxmAr5QBoECgYBC+aDFkwgOXRWCb81jP6aNExV0Zwc8/Z4AuSRnoWtM0In3xRYnBrNcUjWjgvinhD//A0LLGnjYnz44BzoM0k67j7vwK+Fi3CdAug9HZVvAsqYtVWJ2EoyI0MWwODzZwY6Nc/Df0dK+lbtgBrjZ/qft937awkzbUp0EMfH65fENbQKBgQCSVWXy+WLQXeHtx/+nNv9HyjQnowalp3SwWRf0YoK/xa526xg+ixViVZvT6e2KTcJGdHFQ+cbCsc1Vx6E13n3Mu9y0N3a4WRQkZHPgnsNouPLaKn0SmVY7RX/I/Rz2r0hRE+gDM6+1/99zPuwP3FW5eLoTBX021Y35kBFHbZ4r+w== tahoe_lafs-1.20.0/src/allmydata/test/data/pycryptopp-rsa-2048-pub.txt0000644000000000000000000000061113615410400022200 0ustar00MIIBIDANBgkqhkiG9w0BAQEFAAOCAQ0AMIIBCAKCAQEAtCcIAW1bCPl5TqKao7wWrSpEGyT17mqez7G3uYuBKDH4mCNjwtlAexrmHk3RO/U7w7Zn4xQ/0VVgN2P0U0GEM/5SsghUuMN8Gv0z5GcBda9zoyDErAmHtid1vTVd3Cvj1cNgPA9suQ71CAHTvjIumh9uqdlFfBU5z6h1rybm63C0Leve/fwUJ6ighSfkj8eLHuV7DGd7627OjAqjNqkvUZzRIXsZTayXdcF3syCyrJkHX/0ImYmOy14xHQ+gTRvDmENokmwmvGhhKEj0H3Z2WcXExRNNkMCnEnGSoVPX0b9E7aq94vMYpYsPYJ+FM6bRGTujFTtga6bUt9EoyQ1wuwIBEQ== tahoe_lafs-1.20.0/src/allmydata/test/data/pycryptopp-rsa-2048-sig.txt0000644000000000000000000000053113615410400022175 0ustar00ItsyW1XTOIvet6WsS68AJ/ernMG62aoeJKzyBBZ9fdeB2mVzURCBmgX5P0hTPgxHa1sEI6oIbREv4lIQnWHcPgjvz5qBkDtbOp1YHkkFAFOh533dH4s2MiRECIzHh19sBsqTGe0w/pRTHhwV+nStFqZ0IMsdxv0Qsgk5IClIY/WgBSnHQZpVbxyfL7qwvm1JK2GRuygRRsrSsxLiSnA5RWlOsDkDikVu5nhZI31K+PWa9v1i6U7ZkV4uD9triJkHW2XBIRkCyqT6wgM4KBN6V4H9nqlxZhJSQoSn1U5Rh3pL+XG6yevaZq7+pwOnRUcFkEwiJ2wT/NIK0Bjng8Szmw== tahoe_lafs-1.20.0/src/allmydata/test/data/pycryptopp-rsa-32768-priv.txt0000644000000000000000000006013413615410400022474 0ustar00MIJIQQIBADANBgkqhkiG9w0BAQEFAASCSCswgkgnAgEAAoIQAQC3x9r2dfYoTp7oIMsPdOhyNK5CB3TOtiaxhf3EkGAIaLWTXUVbxvOkiSu3Tca9VqFVnN7EkbT790uDjh4rviGeZF8oplVN+FDxKfcg5tXWv4ec9LnOUUAVRUnrUQA2azkOT+ozXQwZnJwUYr210VoV8D0MkrvOzNgGpb8aErDhW8SwrJcoYkObIE7n3C3zEMaEIyA1OFWSJDiXNGnBDvO54t1/y+/o4IuzLWWG7TPx8hnV+jcHRoxJTX2MZusJ7kugvxhgB0+avwXFTQr6ogvPNcUXak0+aLInLRtkYJ+0DYqo1hLAh8EBY/cLrhZM5LGGC4BAwGgUwsx3KKeOeduNnob3s/1rZpvZGwbGtfiWYQwDB8q68j3Ypf2Qvn7hPwicdOr0Dwe4TXJQ4yRHPeQaToOBUjtTJnrHsKDZET6i+jQ9e07Ct+yYrUwZjiaSXJYU/gCyPCui7L37NasXBJ00f1Ogm3gt4uxl3abO8mO1nKSWM+HbFBEyyO0apT+sSwYj6IL7cyCSJtWYMD4APdW5rXSArhyiaHV+xNbVUXAdBrZSNuwet925hTOf4IQD9uqfzeV3HIoiUCxn5GKYPZy01Kft+DExuDbJjMmES2GhfPWRIFB5MN0UdjlagDHLzFraQUcLTDKlxL0iZ+uV4Itv5dQyaf93Szu2LD1jnkvZOV5GN1RxTmZCH1FIPYCNwS6mIRG/4aPWA0HCZX8HzSMOBshAS6wECaoLWxv8D3K4Tm1rp/EgP7NZRxTj2ToOostJtjzTrVb3f3+zaT5svxD1Exw8tA1fZNRThIDKZXVSSLDYaiRDAUg7xEMD2eDCvNQasjAwX5Tnw7R4M/CZoZhgYVwIE+vHQTh8H+M/J8CNLxPT4N3fuXCqT8YoJVUOmKHe0kE5Rtd87X2BQY5SSx6LFMRRSVdBBpWB6cwLo8egehYAScEDQh0ht/ssaraWZ2LGt5hZL0I5V58iS/6C4IOu+1ry75g6mecWoHD0fBQELB3Q3Qi6c6Hik/jgTLQHb5UMqKj/MDSdTWuxwH2dYU5H4EGAkbfufBoxw9hIpdeS7/aDulvRKtFVPfi/pxmrd1lxQCBA4ionRe4IOY0E9i419TOgMtGgZxNlEXtp445MbeIlurxIDIX8N+RGHWljGR/9K6sjbgtGKyKLUxg51DZeuDKQGdyKXtIIkZ+Od9HN+3Mv0Ch5B9htIRV9hE6oLWLT+grqJCFAOD3olGgrRXByDsd8YouahYfjqb4KNCOyFPS3j5MdUpq+fiLrG3O98/L/xtmXxw+ekl95EGAnlwiCwULsjzVjHJDzSc68cldMnzNqLwhwWXpc0iswCWCQVFce/d1KlWqrtwq2ThH2pX3BJ5Pnu+KMISNNC/tagLe9vjmrh6ZhEks7hefn0srytJdivGDFqMs/ISmcld0U/0ZqE05b7BpErpfVrG9kb5QxWBTpaEb2O0pRsaYRcllFuNF6Nl/jPDBnn4BMYnOFnn9OKGPEDUeV/6CYP9x+Wi96M5Ni6vtv+zw9Xg8drslS5DJazXQFbJ0aqW3EgalUJVV0NgykB6Hr4pxTzrwo0+R/ro32DEj5OfjjU7TB4fYie0eax8tpdvzcWJRZ/c5b/Dg1yK+hbiMg9aTctHAsYJkOvMpxvull20IuV2sErWZ7KZhId19AFOnEQ6ILlHRwUf35AyEVmUL5BqLl137EeEVShEmage4+E/N6PdKzJdJGl1AQGyb7NTD86m0Jj2+8qu6zsBgyUfiJqZ17fixKV6l9HGJKSmY9If2XrX/IhNZ5dvqSmODJ1ZRGC5gjJcxcdHp2Q1179SlNmXiR/7DMcprL/+iVhRyxzM2GEJ78q9jS6j/Z+0vLzdNOPo1KxD191ogYjl5ck9gnHAkbaiANaK4rrfMytDkNm0JRua4p0mVyVHWZWwatoMhJxVl3+9x37OkF24ICTJZ4LSKDLJxi9WCQbhgACIA1mjcW0P+4AszpbuSXOQkPtT+MQ0IxHMzX261yHAIPbGsbSzoTy+PWJywFdMDy5afXDTNpmMfpzWkw2fhBQasNoGHl2CwFftJdr4WWxuN6mSwhNVHJTw1xe4A5fa6bjip5kmrLQK85YF4Ron0OIOofjcCzvjKCkNkGVKBhRiqBoqV6Pzz1XauVHFhFgZZNWXI+le+Fg9SJojeDtFQp5w6dZKBJMxV2uNPqV0U4VOtvAas2+Ul4zIJDB/FJyDX8POrsR+VkW7via64xM1hQlOZ5ispEOUvmO/NWkAsJM0n3S7qgud6NaFqOofQZcbh5r1z2uIrXwUIb85m2t/sPJBI1J/Dql4dmzgfn/q6Siqi8FeDoma/lQBZWyEeGz+/ckHdw/BGPx5FZlc8xLegNrQj4sVkUZXVAjNoUguA5HT9GcAmE5FeOHdHtD0bdTaNFkQbKdi3yUlGA1GZeyPThwfBaizgX3i6oOtGguX3HQMQtExip5xR2vsiYJsbWXuzlKEws8GwXoiJo8xEh+TPavxxtZ7dDdnJY1mUhKTVGLBCqCrJ+uhWdWuHKvC9x++V5NO6WQrUiG/o8oOwkpWyH7GC/VtulpxkoJlxAej3JxlHn91cN4PstDo4goOhQBi9k2A5rsmvjGG75BOKlqvhaQ6BPOa+9F5D5H0RhT0hw43TZmJri+0Ba2WT3FigcHHYGtx4UJfyqfg7d+WXvpIynC7i3SIN3N7atg3EsWwPuzDKE6ycjWTD6ToKmYLMnDgl4PzOEBFstG12OdcuQwhk2Dy5uEdxqGfViy3fV+Muev0yAkE/pRwutgQjQdw0OPXyGoqchYx33/cHq1fDWmkXZab8wuVThcx3He30UI4rr3MMff0gxdnJt3e6YcHHF0R8fGwkVC03zWXI2hfqHq+rNQkBnIbbRnepKvJylmcHn8KVJ13Nm2iHRTw7B8r6fE6LsmUJndh/M2Poa1AtxfGBniMIfqtV0RuT7UR1nDI0C8Lnx7E2KTw1MXCLh4xzGr5wZ+4T5FTeUnzd6yc7EEduLxktqh7RpmnBBPRNIufI9ztPTmRPXgF7r9PxI8MI09Sr2HQq2ZmEs6G0w8l8WMiABvlG/YQd+UHGn29acrzSYp6AfggjuUV7PrCC4flKk5IGBNdUtUqFxBRUuvn0ln7HayAAYLJuVMNv9daBwqMpp3Faor/0K+jC0FhIan3R6wBpKSuJo/6jZJoSlSCLGCkFqM9ks3sgD5cDvxahV7HNOv7AisDws2LsVATHbF0HFeoEA7lp6NzjK5dgqd+9rA95U0c7w31E1E9GbmzLADC/0eSDKEkdKGIJ4mP1erpBOc+cdJ2tVP5e6cZ7KNhzjYf19tORINCTrPAp9/aLXnoHgtLp3ozkFS/dGowLZ6Q5XInPBchgiI4TVHDDxGpwMAZp3G3yM1QDptd3pxRSv4m97QIOa7ma9l3TCK8RA/bs/akYoZnxM92GvG/3FQdws1y3Lz2NjoikVSaX0TS1t16TupL3PQioaeRJLnTZu0WGR20WLL6kEBz6cHJC3ZN9Zilnoje8lEm/7/WYOCt490+w4KS24aJcgDPzV7Z1npXy19p3ywEY0AJND8uurWeTEHIBJNxMPU2OMGd0bGa2S0yr/dfbIz3FmD06noX7/XKMjQ+gW8EBXAA7s8TA2RE0HbD8IGKlg3CCIaYsS4BbvK0B71qHhe/yM8qnUo5+vv1UpbioYVBI77UfiqqUDUAIIg+apIKJjU352GqXiEovXGR6Jeag+ufzPkPq9BqvyIfW0+3r2/wp4nIu7Z9XM6iU1Lj1j/wM1goktBnDfY6hbjHA0acQFCgUrzeGqyzYSe9kufDTSw7ePbx2rLG+fXa9qwqVwY0iBjJ8Hu6xIFmvesHwq0ySH0IqyI/Y53ee2hhju0xWAz8GishuMv4/apVLWQ4MbmG788ybGRxePWqYx/KI8M1fUvZGRXmtwAqEIaakewUVpL3QhawB4eR074Yhl5gY/ElwlcxNboUVayqJwgh4BO+/2tAutTDCtkzdLMjH4JoDpMNsf4GiLVvlSahU76B+oOlttcIm69oRB5BklrgbPCwqbQldsvvP3nHuFxBAlunefMMGZFbTd59JbO5UAkAHQ7XRw3MWDq8B3V1uCF59r4uXc+kvYFS/y8DTpQGKtO0RQx5yIonoNCbJjYWtx+zMACXoXWkrH03IQJMKmPM3IMbtMDMxIdqjD1hdaQ4dAnVcCq7ZvcbIThtCHX0+Vqo9eHoqA2kBtZLRq5rq4GG8Jm7o9mrpuVTLvym0goJuK2KQbF39CxlTG8eIIRKFQNhKC1XtuTGiIQzd14UsHWHhqhWo8uXHGhAvkl3ga8+5bDuJRhJ3ndsNE/tnq/VlJf329ATseDCLmVEDRiqe7CJeeyvMLgN0oE0lGZkmf2iYfRpB0zdkj6EpVdVZs2f/vRTp7S0ldwvV0pTDj5dzboY+nhd2hzR1+EnLPuUbVGqotTz8BWkxo9DpoGkA//5ZMeCkqFtKh3f7/UAWC5EyBZpjoPN3JGtEOdBRLX9pKrvY6tqpwaiGAHA85LywmB3UoudiGyifKe3ydIlMltsSpgc8IESwQaku2+ZlvZklm8N8KVl+ctF+n58bYS0ex63FfYoJEbUzJMcyC8Gse7zfC5MFX7nVQPWRrJ6waRu+r33KKllmKp1pqtTH1SO0N3WTP8W/npELnG6A9RnnsbtXO1WhN1HuyT5yv9KRaVPq+2EkoweAEq/Q1SGtJBX0hxWaK2UDRb4VRMHC1uDF/CVMCcfvTOQ8/ihWgrZtroDQ8J8TU0ICZVCdz3duvw5/C0eCLB5szT1EsMY2x1hKpnfS21Y7SCpG3SYv2Ii47kCex1A35Et/7MMwilelxgrwDCsXyObkepVwdrBwV6YF2qd+jMj+H4mCfhempxwCSlhXgwhS0svSPmPPAJOU4gSmcVktfs/CyqCKLzpGxHXjdcA41/gWVCeYDdjOEirh9rUIy8KlIspI+3y+XNdWrRfH9UkYQsjH7mwvixOQfc3NUvMLOSnCe4bLZ1gR4mIiaGwR15YT+Tl3AkfHu3Ic062iPlWON5Sn6ZOBE1FnGi25YOiBCdDkF1vGdzPb2SLBnucVnEqKfBB3/0KcMrT6bDApKrPxfVQfx7YJnKO6T8nddFdPne2sr2Joz+QJ4DR7nnSBvu0VEZTXLAr+K7OOSJwlE76WYT/oHDHM4LivUit0ChnsUegNFwD7zO6nz3OWYzDaB+XzVr0c5wtpZP1IYRCs20L5jOc2P1dzV7WHErHJ8/VhDZ76d//2SCCdjv5kTfwXXHsfWRK8jMV+TZSmKlKgq+pDd9Um8Ao5ShvGqMz6TThFihNrXUL2xCEXJ1ki7xL3fTTCgK/SlMt7NYeOv5xqIdQdc7tSjYt9y76UbY6bVe+i1H3ppaYh2+oBaSDyzbInglXpHEWS4yJfh7kJxXV5P2u+LeOIzmz3xpZJJCiRjdW/Bl6jbAgERAoIQABPRyc9I9OY6rL6uNAQtPDR5Idnxvsr/kLjKr3IPkeLKCYrfZFezkr7rp9oK5b8V5DjrRTNQ9+j6CqdJDUr96ocK0wvpx/HR/rCYmqave3QFmKoGUEXvqgxVRrd+sjgQlTY/1X4CgU4OYSVV8VJaV4TgLr2XWoc+P3Qq+QBNT0+E4IF8BkMZp+sVDYdvloYib8L0urBn9SZZPVGPsQ1KZZQL6rXwWJ4iQUMCYsrJRFjWWB6a++UtQVMzBgKXpeV2j69z+xlqM0Bf5QO1fCoWfsOFzHh8Z7PoJ0p/2EmR8xryZsvu7fGgNXEXVF4fUrf6i52DwAb7ptUP/PPAnp5sg5lP11byyIGLEM6hCEKbJ1uC77oNY6q/xWowBMHOROYYXcqZKGWdOo7bLPSlC3EYPj8SgaIGW7spy/xv6TCB3BaYeRWwb2VQEfxjAK1sMVYPASBhqr3jWgoKeOFdoYJ7el2BLqprHod1Vbqr+2ahq2Fjt2WIGt3mjmdb8WnGht3f7xfzbX+CYGATPzEKOOHojQJ0lpptITSm336cwdW//4qo4XdMMo/cnO5cKzbjgbAdI1eCIEaSIvmpRgs0PNQuzSKPZ3GBqvPLFPeePeOZsq+IdNXs5YqPTw7BdJ3Wm/VZzZACBSbdjP3Mbr/yG+qEIx2i0x6I690twqy+fxdKy/HHcRGcjiBMODROq+cpxRROjxHqd9/8udNQqjqcg6j/iMzOiQv0FQ9+iEyEzk/jjF8rmFlp9FtSKe4FJ+ZgNfKFAdhDVt+cu5MpW5NZJ1wKkOM2xEzSKZlYrXx1MQbEqsUb6uopkHWoS435jsGrkzgjbDUTN2SW21o/xaiSJn7/27oUiezK7sKqK70Sf2ixdqXQXwBC6sBItE6aK/VFR+r8YcU0ysxzj7WhJB+CDNatv4d4M0oFZkXB9wZ7GIPD282KqAUM+TUOqMnpLKftZAEpRGC5ck/keBU+J7/vGO//HUKOjtPsqYPPV6qY1Pc6jrUn5RkIxzc+qo5lSoae3DL/e/7a+SCKN97Elac/bOtTRy/of4jYf8HgNQVd56NxQeoy+fUboH11jwuz3BSrHmBLnbljxz42gglBRFY4Zw0Vh35KISziV9yXqj+a+72dj1iOXCc0w/27E3gQERaex5m+8eGTxKb1R32HKV9Ww94UYDdkLZwW3g7sG6uXO9+tjJY2uZk8GHFxyYlCUB8a0URVNVMYdKDHqTuhrFLOv/CWjCBg92VB19bwSGFWEfwUroQlZa9nU6FHp0a9SgpLvq2VSeReOppoSngAuft8vxNUDXeDRfZfwf4jtUdp14zLE3QvSU83RKy+Wv/4jC/Y2ro7SqZ6wAWIlYr9Js1ixbOyeXu7e99D8sjWZbB3QMD5zYpsW416jOxZ0OXKrRZ9om+B6CtGgugjxZri8us9VpZXw9Q5TDcW88Ym6Dersajy71qnndzvo0K2FJBW7EMi64J/2lr70yAJADNU9z90B3BK0X5junIBbp88MfJNKVjrm7VV4DVVk5YdmpMqxWUVW/xj51ARIxmu2boXSpUxHs9ZXAoF1C/OoIVcM/7/tOtOERzUFFRClGsw6yeTEPvPlYY6eKnKQJputuCMD/+qbhj6kpxjclAnfEJMr+Wa/QnOLp+0/Lvz9gh5hyMdgYCBIaPe1rJ7TglrqsdcoIjHObvMm2OjeYdZUAHB+Hgozu0H82XC+OD57wax1n4fw+YktMtgobt2YRENRAcyYReehwfMKM0ahR6GVIdRCXQ4RggEbyQUoTArKSS13JpliMLNEhwocFsahqxazDm//tadLKCPEjnuKrWGXEwiHpJBOLas/J2HhQEQ3XKMDCAGz+QIfkjxGvbhYARpBTgf2AWNoj1BzWwPWn1vUQk8v7osEoP0s2kaSencOFlPfRzkVowKJAnR5IZ/xv6lau7bjqsOnMutoKjJ3lWUzvjhuvAHUh7AG/t/Uubn0ZdZalVIvDR4xcjcRdQSsyxcVKg5cw9V7e8fOFocHlb/JKYUqWaG7edondhueTNK9n4YAwjgykPhcj7+aJOWJAP6tTlqIt10lC09mHIkgfGdEU7gGmODgXMj6C5bW51TGKi38mtAs4YwCiUJ/m1x+yGFP3LBsB0jswMxSIL1/5B9djzeqbYRoZAUoBuS/qPzDtSNqOO7ZLmCb2YL6vV1x9nCEUkmIvEyDNB83MxZeMMv3cIp8VXPx8X5U78sLfqTHlq8dZnhvGs9zwVOUk729bfGLuk9ZQxHuFwoodFOUMLTdgJGPaXWjEaY/rdzKnuN5GDhtJ7MDqipVFd4O7PUNCjeqQo9hJAbPRaCXh7cweIWcBkVl/0df+Y4vGtmvQEyt4wvQyYYCCVE3J5m1UK60Uf/DB3OtM08Xcr/DiRG6zdIUVcdpQzRBRIJLUoP5vDp/jj4qpoh+bsR4uIQpvU1ityWixGiAAMVZuuvnJ+G/A7mc5naLN+hH6wELoqRxDbUqNerfxulkEKIpPwiZ3l5AI5O8yLiG2Pu9tPj0QoTz5neBDDNyx2EyAlQh6Be7hSZyWqOuS5YWbs+h+XVmsNdQaY0CKDsX5NjgmtYeh1KF+RPYTs44982RosMVUnijKP5LrtM945zk38/RZ5qR/Wn66Qm2ToKEiTnw5wQFFx86/lZPeFDQKpsxx+qi9rf7pxVALvl+p7vehLrNajnFDAh5DvsNlWkID/jgipuNSFIN6TsLuMvRAbqWWJBpOOVaE9Mj174Lv+/C75EJPVMUAkzvBpr2scTNl9sSixXgdFsc1TZ3zXs+vV4AKuYjw3Gq6dmnAj6Qu0XaYfgnGZqz4lzYJIff2mP1AAPHN7rCfnlza03cAppazc1WvTqIC22Gx1Sn906cdcG8LUobdx08sXTVxi6wgyqfQUuU+JbCpH4eoHFpUMifXmGHRHciQCytE/UIOKTPX1JNFnRKmEM5DYhfD8/wi5nHgNS/L6zHqpsrWfu5UyvumZJ7XA/djiZ37x7JdpTVj/8EgIn146AYRoVlS+V1xWDOz6c1BG9BUN8ZWdpY/Y4W65owEN19CNg9eKWizEQD8TH7X5rz874WVlrsEuBOTN9feYylhT0uyJCAPWX/ARhwX2iTSVsIemAGwI8tvoqq9u8vXU/j0+EtiFYjBm+GTo/E/GqLjSsEIc+B7RnARWTjfMNqNu49DoGVLUtvQWAoZlYqGLGpvis7PlO1tNIRbhaXcSXasBbO6DpASLBZwGTfZzpm3D2OC60v52f22uwJx/2tHRUILWXgbmc7/kWnkb1FZbpUSfrkxiLcX6cK+3RLT//Pnbk9wva+noJ/aVFb9ldBkkAk4iX5XYHSTWf2IdPe5Lz1bBB2Y3WtFo0MR1LKf46yQncL+FbzWTLRSHPY3UeRhVg3FHkH6MnXYpov8hHwZ4FrJaT7LMmdj13DL3HF5lwwYzvkclyUJ2taQCwnXPlgXvWRgmYfNblc98/yn3m3wWzx5rS4gGFHqBkJYwTqW2cGuRDVZ0V3t3+UfzqIJmK8nXpm0GKjZT50PfMjsS6+uVgTHaQ38HDFvpBM/1z2Sh2fcGfbkxVBWt8Wwl0Xntt6tYYamFGfqR+8W6VRVQJitb6uZZiA+wcbO+kfZOw55VGHld/USRiRv8QuxGe95TZV47f1CcCJzZhWqiaNH65DLsLAja7DeNwxd6CHaDAik6S6rD0FyZ9PQPaICPPI4/xAo/0ZVnd/yEc8OI+3yM4Ks+YgQ02Gnrl1z9lv2Y9zytEPBDFy8iWYtiyXZ8i4U7AXOGd5i4h3jKPlW7h0OkRKiSSh4TgO7dD+5Sxk5kAMUo9nxumcCmTBWL6i6yRnsKmS0nkIyZI4wuEihk4Icof6JsPqrvXxc9VgQ6QWQ0FgAeubKbqIFgV58l2JK4Qfv3JKYrKMS/n/BCjRVZh3DfkTcZzQg+m9Ytcze7bv52bN0S2xrDITaw4q0IKPgmXI5Nwb4HA2t4p0iBHgoqtMbU2tkoVyh16EVnCwnS/IhHi4HTlcKSNDCWp52NXf0cWGjgxDV2ds37QYD6JoLz6Jf+NIUElPQ/CySdVnfcTHK6h1xjG3K5OoeIboMqJ0WxKdRm+Eu/2OpC2T/x4i0YxM6pthPXUQ+tYnjYd4csTbjE9aAVexoM+ARW6WJj/utUp0VvRQOiFRTLDVNJfzG1YUDXq3u0cAWkezq9q8bny97HBHP5vnjzymajF89NHP+bjZrvPNigJOXSPybJPPFLhTPZGjryD+78fT0VrvMHkXutC/Yqa2OEXe+jYXOhx5phxknCngScLmIudX2c/fXXxxoLeJHD9Hjv2ASlDszSEuBFDawPEMuQaNf6sjTi3PLgOaVZDID+NAh9sw3RqcnQjMcyR6ojGxkDpzxj5VBNHxbPXNuAUXPNkl8KfkAgwbP1qBWbyHAzUBg0+rBcRBjnD+WHkhiJRqKW7RMyyGMgpk7E2p75ZsdtjDX1uzxJ99QT+q3qEoM8qfAMniuUoxeVX4WWaL+eS3aDhE9hJtz2qVJjx/oYu+X6tSjSoY/3OHlum80NLM5h/tVBXi8kSFmtV9NkiGPXT3OVpEodhhCXBZOblOTOkolbawoROX1tJNXpNAJCxz5d7jkjPM/VUoBrvtXcfMBJOGyAgrfCu/qZ787tsi49ZwMKPjW7SAWzgzsVVynVS3SyPfUs69um4QESoW5rMqbnh0jTRCiCGAjK/2jDjhqpA3r395j0TDlQh9goCzwzYfEyFEAPspF73GcEcR2eb64S0bRjT/SUrPrRFUSV0MhFefwXwd+mv2VcF7Zr8GzlR9fOpngy3xrC7GkyeSz2jNSwIkpssLpvXPbG4mzXs4WBFDcDb0hZmFHvU+fLI1+Do9lQ3KbSyCXxA3VoveSEv7spX+9EGJpHjesN8cPcjChjVozfOzGWDXw9xRAFVbE/eLLrik+ftGqzmqm1zNSbXInJqfFmgeJAH95eS7j6r/kqO6b38rKtMIRMWj/2xtArTtpqmEbF7JgQNM56dIsKgf+Iea3XeV2A5wa/d1EMj7omPTUezw5beqBExgShFc5xkibXHuSTLD/ibQTya42F514GH+1CpmXJ2MtoQMBv5mxJ5l+HynS6i11kfku33m6CMPzv9H7vsO+0OMgK9zf7qOIPIN6tpOkHXJPy6ytHkPNJoQ1SStUawwwddGGOVu0u/IfaCp47sLMqIoUAF1kZSt3laLGeW0Y3/Mbdb5j5NwK+36XuWUvJs+eHIKRvc7KqcW8Ww+ReglXFdc9HGmUOHV6t7hQ6YT059ThcDZQf0JasLJwFPAo9BfHL2sgBUdF4rRt0jLBVNaXbcwO+tg374KIf7dHcKKkPQ9HT0fzkBu0+SlsEJfpqMklksImd6Ls1clJSORvKAnzcPvSbxA2vcGg++Lu2vdqSzQXD+2BegqE95A7h0Dd7VH6AvuqosfLpuarI5Hs+FX4H6vpxMa9lb8RTIi2lAI70CgggBALr8nb9910Az4BdF02PCn0uM5oa1W94D2wQN9sW88ivd2pXMRlht4y0546P96ud8Daxtv1acT2henrCw1S3I9CpR/0HDoKywEzPgN3JQsJhDfsvEhRCrKnU9miwvjCe38nlkMG9PVZmVTjlvt5UWihzbTnjv9nBSnQ6fhz4QqqRBAi8Lcmc6IKuz7CuROsY4lNCHW1xLcVoKJOTOMV1DUKCXn36K4bkiYE0lhWCtAZQBVHkJWupZpogjd5mr9qy8IfXF91iIPKw02XLgNiclPX6q4r3m98aMD0c/slvsIH0r5fphjLdoQHYPt4Mp+Vum1cGk+ogmpcwSJnBJ1qbrFvlBmcGb5LoMd9z4qhvWwWVOKw565kyWkaB5WO4v1KFx67KVdPszzAUF8u2Ac5RIPY+4Db8hvTCovDH2y3q3mBynYJX2FjHS+3Q02E66thuzHfbxHIKHSazq5gJWzr+hYfal+5kZxOfydFMIC+jdRmFajNmoKFM2LOUlZMVAHPVTK40DshixVjakvEMUCJyDHURyydgDbs9W0ElSYq9mVMXF/2m11KY0Eptzvuh1LkFHIfDOdUCjKOrsd7JeUqF860WPgxHUnAas5HKBTM2xNXEyAsQXtQk1jU/CxKgLr3WDLF4eQ76a/BO3SeGhytpasDKUMQiqXyN7v1gJeBQoyiFitC1oHUVVTg7EgJfN0B0dFWKL8iyYItWB7xKtXHPsedU9EWRfghBAxoAqf8GLW0905DMHdnIQKg/43iaKWNqmNqCVRMKQnShA6GN6tOxtvaVV4WRNtwtEuOP2U42cNA702e0qFtmWDBjARuee1qhJCuklkYdDFKrzn0MXT/5xxNCtGVLeZCFPWw0uDUQu+HjD8Izc42fnVGS8fLwGLjj0Ajnn/MtVusCHvUFJSPLG8qsCXBuhsywmtpZGKKe2EP+KKphBFfExQQJWXR9tbBGIcygK9c6wj3Tnrwii8D3oIGvEgnNYWUL0pRVSs6tpRwzXwK1el1wAoU7rUQ16UoJQx01tWEvxN7wTsbo/V3IHp8F/UAMNnK1GQDZqn/NDR1Ln70yT56kqXsNf88WI38eox55vtOCePiFmpHddvRuMZrmSu9FFQtd2rK4eDMrDuGxFJh63+n53iLFlCBbNcc1XV5CP99B3STPSzYHPS9n0aCoiDL5kJ96LelFEkFqr9gOhG/3JpW7rGw30Mv1rFN4dFKn58dSyfi2tHbz2geuIVG5BEhujxvhYg53CC8v1agYd2zlSPQnCKU2efI47iXbGw66l1ACwLWsI21pR/HVt4YyjKwy8IWJoNPPN0AjcDq1Czis6kUXfmLRDks7DciEdhOqT49zQyn4hNebkFg+VCs3Y1JfMilRYdCH5aJJn6g6w9wqE/qCx6wQuq/7Y5ImEpKEYme40uqJMjO2oekz1FhsZ8PWSku+d+Srus0pQkB8MMjHoFrAtXi0QWY1y0wo6Ci1kM6T9wbVLmF8hXkfqhEdB+RcyNqQeGquNxM6rU2JKvy/HLwO+zTD53CQC1ToYV2+5MCRr9+N2/CbcifMUN4VIEn1Eej0zwHF/yN2Dc+UYWiyEQtlG14z2hlkDP0CPGq4tt8VdftJ+HvCw8DXvTWTnLnn1Zp8JOcQmEeP99YAYcjKhKnol+34BK6OqlAPxBhpdin+TRG05T1CoGS4qDFCdS/mIdCVFv9g2/QS1SdUQIS52zaRHnQQCSCWEa+ZSTfRHd58wlVwt58M3tCbGyNiM6wA90GWFA+zPn5OSuWleAC/cHp8uaJ5p1tC2CPYxbU19N/pQmg+fwNTBO24wUN+3zJXC++eGtiFofpQjnDWXLH27+oIG+YuutaWh1jf4Jsf3HybnAmDBUf4D39zprOur24+buf+h5uDddADdFHnQ8GHo7txQ0pEU1Q5L6tUw7JY4zVLZ7PF04Bl/XLIRHwb9hGoAEGsblcahUXa6SWq6oQmyoNO5l91ZDZk2ovSdq0kMrEB543Y6Uo8UvPIDgOwvcVhjrx2BDy7H0YG8rMIerCI+4mXi+xrU5Akhyom5b8TFqsEmZN5lvrsdcNtYc4/d7qnkbVYBZlx2MyeDC+ch5f1yVBY1cLnpjFFHFUXZFmpzUrhXPc20vgeXnQQgqQtV5fbQDYUGz5KIe8d1wVGIVMut1rmRa9/dspSJMmE24mNe/K11eSymPBI+oSmwmo2KobIOb4otMXXGiNmwVSN8Yv22FoF3u2zgpx6esCfGLLScnsXOpCf0f7aP4aqwqN5yeypzAlhF3+yakuuv0m/dHUEhxuOqStrxEG8ShJv5tkHsM3V1WLRkpBAadXPy6gSysA265grR8BX4LbUZnFqvoDDNrvRSweNv2HddvI2fcgltJ/fIcEu8Qk/WNLUUWJXdMRbaUwO9IPvhQULFEUCLdqvK5bB5oDnUQQ3FTq7Lspp/naoolLMn7k6K5gx2IxQnpq9+iTCzU/vrKL+O7Mi86AHJxPCr9tk/MPEqzaH1SjA8zrPqdZdtyngTEMn5ZPiHV2zMUuWPJ2xXT2zrpyx7mVXJdl0SE2gbnOTs2/5wFPy9aTKynFtKxZB1y1iWEAlBWsTnoS8FE+6CBZH01xww9GRjoMi9xDee+wXV/olDo/dROj4RPYSvIeB3tIxorxRR17YjyzZPssKDGTvfzKM8kqYYNE/BqEKBKLCz0bhPCCWxu3JaVJomTVJTrFy9JzmBMy2O3sgLRDl6X7vkqOm1AoIIAQD7nE/lfkcKEttlB0HLSKT+yDGo8kJAR4zKmi5fZpVgWYK30Aib5HFTA9BHVZElnhTeNyvYMdSO1FdtNsa7tQ1/0rD985d/GLXe/f25PAbEsmgnFMmc9zSmpLIZ5vTxIC7Bk73mqwwgZZxSNvpqurbUO+787vMn2wKC74fJHC6NF5FMFrCypu4B5RLs6C9fGjRKab1vW2mi2967gCZrB1celCcgkBzN6XA7tvjDozDz7JU+x7ugmBx+6MKpsLc/FPrRgEhwWdPIsV6R+vOqRugeTBtr+NyvFhAa639l/e9EQwpEbVJgbNg5okOZliYDF4UM7YADgv0aKJtir+4xN5Cka7Jb8vyYIAcchy4cjz8IDNK3SuvhRmPTbEOs/xwZpoN3YqUiARI0RvYznaByKpOJSpxzqqP1W/026K6n0KagIjyQht6p5ElpsXlIgcH0fwpXseNYl2pQAzj0jAGFaJNYSBdgyQdZkoiUDprKUm9dZfDL8m9FFpoDV+BuJmxDe2XUpLfDhTnF5n/F9wYjmd4Vhfui0HA6kh0dLvOS0EZEvz4mT6zD7Sxx+T4uyZJE9nq1KOEpQTW27mzJad4jXJkiYe5C33DSEdOpwVAu8pIYFxmcj9uuNHoK2hpYcst/wYuNzgAHB9LuJaJRFLZSXN+IVyBWU2S8iejIVYzAKhm7Pj72hIE25Z7oQE/MniMQeUgmoIlqxbSpnWho+K4koZGNIyiGv3N9XFTjN9YCdWSC4AVuyfyKa8c8Wl1cWggnOwhj1CkFeMCK+f02a64kupllLUL5I2bzC2drmjpdEGB8m7KaCWl+W86pWKHKltns7u6Z0TlEPCk2Y2+ypD7GEicZSbMwAPt5jpTfxoMk2h9ICzgDbFPaJTtAsYNMiAYz9Sa+w0ELdSYoGD1OqN/ZkPE/sGRcXfAk4efEkfRDbCU0hiH2HMbKFLhH63/RfGSbgeYSGDHTs66JOJ3htSh1arYOmkwBB5v33cnVCmRiUGgE4QijTnMmYLKH42txfzD6fU1TJKUr2woazXiPvpS53tgSbO/zmBUE6fiFIaOGpT0iHXhx38sDX21VPVY4zwkYvmFNKliwgnZTZiThCNF8e1r4W5SlOyoCm+cc6UnPB1XOYx/Nd1W7Njm46rL4rsfZ2w18vATLl4ofn+6M1dgN39FO6ueKvZzxHUH1Gp2J3Z1cphfke3+O8NKi0BmIe+TjfuTzCt6l/rkr0UjKqXqYF1OedZe0kwkIRDmY6cY+gQlIdIFOaefF/3bBu95mAozWMTtZZGAPrf1QM52AJ/0fZKjoBvvZTVbeP6TnuulOcahtZVGDs3Q2Io9d5Y/c/adXwEyizH19Z8dV/ImY9JdmXDB80wDodoo0/uL8Ig/2NslKCu4KtxjzLwgKHhsz2wWgjagn3AGkD6nlVdElCPwRMdHW0v1Ld5RzZG+oXD88tXe91cLH7YY6k44pB86gD2EauwqDPSk1Q0TPy+Fj8sLEWwg/prsVZWMvwLvGCRRCCUWiDJhuWT1dzOxHTcbLJSAqSTaRDccvIrFR9YdqqmZtinnSwzByzOG0xY4uO3j4EhK3GVpi6L8zgoEqP4F1vU1EwPn/W7VfsLggBBRhG06yk+R4zOBtUNOHi3Ra/P/D7smXKmgR5hnz8tfObTgCO6FdIZAnP7DbS4bw1eykk55rG9x/k76Kd9iB6PtlnTl2gqaCcx/JX09lhWNbXL0NL9J1T+aEyJiHZyViVcHBKjXaUSlf8yYbuFMSV82iT/LgYLSmEb+tsS3bm6Sa1r4uoOrET40Dky88Oru7hoZ49f1HJrGLhoRlDO4rCnXV7QABqwAE5qJCDZ0Kx1Vvs0WrK1yypHAjbmK9O4+98Ih+65HhdXoR5Ds2Yj1ovv+d9NWBMEQpLEpOdtEoZ6xqAr1DDgdPVg5wSPtEavKOEfQWfPERqCQC/oqcO9rMbwEZGx3wcJyIZZ6jbupWGcHmSu3bvb0sJjdX69wQGL9Gl5WzR3xrqMYDX/ObNKml0QM0//SX0+j3FhMzMzwzqDc79a0FnXjjMBloIRVWsFdGqt5ZF8fXSEkHejycJDbyXZ2amxtPN9LgOZ6GvboFEnoEpslW4shx2+zO3Q/u0YYbaLGZu5zKumObpau92s8clYwC37htg/IT/JYLUVvSx6HaWj3GaVfvFlQ2/oH+Pk3MOVAyx1GXpZoOtjcs44/U1fKVIIAn0jX4g//wcsdt9jdbdU1PD6UpH5VlH8xJ3fNWxr37R8nIw8HzBnbrgm6PWH1wiWzbZSR5dAn5WUv8MS8JxMKC+QyNjZ6/kgfO1Yt0PV1EPJ4ji6A0F+akKWlYVXdbgGVQyISsje66u4fncZOMHgVwlF3X2sNe+ybRMUTysPsTAmRm2YUvIX6b0IGL+CcSWMKM7PeCyX+utfIn2IWZ0Wa5mjN56TRFBx0b9Xdnq9gLbx+HaUHSLERJloYg8jfeshmUIha6qfb7ywtLBixXcJTQUYtlXkQJ5pzXyYNWqv5gKShjAxsOMxvg/AvXw1g2TKjq/vZs7X+lIbghfEilIu8UUn1r2Lkwak0AI4si1prjsqNCaxduiZGGjeKiOlDA9c+72AmrGj8hbgCyzOq8mAYTlvadCUH2GRmQQnGVvw2pxoHpFFFBx1ZPWmmU44lnBjlWxPfQ2Ic9u1yLYHEnUVYTxDKHK5bT8940F86YFfjozWK67PFKWju0iuriL7cvbi8yxyeiTwKCCABX/mhaHRoAGGl0XRgu8izYQk5dgoWVp3YgBpI+74EFlZQKQgL8b/JvosV6WV97/iSNYNKDHGGahuFEFvroXpEE20rxxXjJvEVFlrCuRBbePeFQ1PNTI19GOxtgFmASsOqTenElUoKioJ1INJKggxPRWCTtnhmeRP6deD+kvIyJiAEHFHISdbUFgdiM+QyZhAnLiv3RFHGTyInVFbzgmCXxOEsOX3lIEC1RexGW6AC+Hr5XE3YT7fQD1HSEjSjJwfHdEd3PTyucVRsI4ftdtyv/X3nCxwswQekSeFPvBbTvnC/9WxULA+IZcM7UT/zf1go9AlfHmbdvF5meQN17ueyxiEhbHC9mnHSkOMiFkjzkYQUz/ZmNdAhLhGYVvCfTgOdjGSf9vgWoAsysADZj5cKd/EK0TBzLmrLqVgVm7PxJuC1zvxmA28GgGN5DKrANCP8Ky9EuXchRX3tMZRX/03llAtDAhJjln0XMuH4TOvPxlAYMEuXMzjM+qC9r4e+CgX3oAb04y+xV8ytq3EBJpxzU6rlWmDQlVgeqCKbpIRjViloToNyKctuUcrQxKBXEXbWef0Y8iQQyUSlE4RfThhRc+D2uCbLV9wIXxGBgy9zp+Wq2ob6a7AZDpvMh52GgtjL/HU0OZw02dF8AxJuyDI8m3FNPXzvUdngpbd4nmrl5H2PZIe+oKCS7p8QLM6064IKIulPYwBBkeWFyM3bNI/0ZDa3U4aaePJmluaWIQZRhoGtjTs5Ty18WkztdbkfubFXxNy9qnmgS8V5M7nNCFYZr7C3U2UcUXJM+GZC7HFS7voSr15JIRpxH4gM/0kblyAUibAg/pxjI6x3FOCWk6j6AUXVULGta+CrZBpzUys9H47x+hhCpXc1clO9ninAazS45Xhyb7Bul5YY81zFjMHIyW3ajl2NgEjfOPyIwziYd5qqiAILL2vFqgv6lYKtTi4F8QWSdgEOCTuj1AWH/A9MFiabM3kgfgi+RkFSM5j+NkrUGSqGUtQCdm+noOZA9UzCc6CmNJjhYgb0MWgsIfBK1aRaYBmfZEgAZm5aQmCGQbSVRNosibkq2S0WKIkswx+V3vBjiLFl5IT5WSjrfyZnAvYWPqB90dBUGpLq5xYP2tyD/ZaMOVl5xmPS/b70VVkdTFpK8dF6u+coe+COx3G1BAPbwLyHSI4Ta8xbBQd0u4meGfQKOjMFv+nJZI1UdOtyMOWK+ch1Cq9HCVeLJMRisWttYTRJWwD3v4thf+wS3lZRXNcJe8fVRs/5hDPVlEj331ZDxQ9kjT3ZInw1kb/GrmBRCOmoQMQncJJ4iSXBRiNl9wTVODt5y8p9wW/l4/tUjGGs6vJuGpjd7tqD4RiMzsVT8JATcZdxMOSImx300FwrXxh14zDJcUjLSR/MTibbiZe4VvnXyBef3XlervqD9sdrN6p9/0d6qyq65j1LhbyauEt2AFVl+nkhCkGNQG1AVXFSJ4NOgnAt4D7Plm4mK8d6hgQqnlbIynRFSMoGXqrRSuBYf4VGAdZTFpvruKZKO7bxNX/wuzpTG/l8I+nR69L1oIDmGNnit4cfvxWO3GoTJp6b81gsVKLexavCW2e5wFYOoK/9yHTu8j4AZYY3VIX9Ic3uWInWJe1O2laC0wDW9eQTuL/3g8X3yqqAB2tWyDebSn6e67cr4x5NhBLqASgWimpECey0adDrVCgSggA+dZRV6fA2niJpsAhSonzO+P7/ScTc6b/SYGjao1gQpq7nh/vioPphvcMOQvYRt0eH4Z5Xwjk9ZmzfpvxNGrdkVaBpXrXWs/+JGAJRwFrylg6uRxSs/xuxL9PBFtmegv5x3Z4Tx5SojnYKoTCiSzyFPCuF7uAEeeReGmGlY5m999oVwwcDwxKjiShh44IIbNSXTuOjgJgi8voJhFKq+rZyC7Y3MosnbCdLe0oX5cXgDSiAx4emb0L70D63dhNdBSMRAzIfrKilZGtk5CqcJs5vmJBTTDC7OOZBDVQ2fELUj2hc4p2F3S8ro1oC1hfbx8FEBDoioatCFGOPID+bXZlK285umMC93t2jQhlM6C4GtHSUEp7r7S/PvRq6pLpwwiGw7CKAKc4BXfPa81igg3qEjCRfeRywkkUpd7P6Yh9cUZKh0JawCXY7bi4WLCjzbEvq6M7BXU34O/uqgQJAtv3mYLLMkc4RRPytT4TzIUxuN5uKuJOkx9yZViprAy3Nb/kyzoPuIOFgzPIrhO54w1bqvWMMv6MW4cw7sf/G5vIuz+aNfRS5HqGlgSL2cFoEllTrxeU6JQRQqy8t/kD5nhJIA55++zc9j8yAHk/sJY0DzJulv33tQYstVofdkSEmUFmmAYMrNVB8BnguDd2fKLOpeyfSw1stu7y5DsBjNrzi+/q2wZr2naA+Fly3FEXGHySjJUGwWz9LuCYgGevfZyUT9aTsi5eufmlIG1/PJoQFy5Xud4TGAVGPB2BMs9/b1DZpbMcYW54M5Dq2eqrsfCgTLZ+jNIwopJJuDzLybSC+EA3RvbzYRrMdCCvgzQbgU7t6+9WTggPoS39Fcq7LSFqB277kIzIXQm6hD+zKECmHmPN9ruEvZ5EWdalz5ZCj+NSe2xXjW7+Pd8HYg3Sx81IllU2azy+C26QDiGjbYqbvNU7DOLvY3rQjUAXVJWkIusxfVsQmO8biXxE9iNoDPNEARvQzqhNyExrr5kMmVDbgbD5+c9/BeI2tmV7SUp9cEkQKCCAEA3gJknVFNvZgq/soq/qmChnRoDYp2sTAS0OJlJwApcyHNsT8Wp6tzDQNdbB5S5PTlPIsIkZVhMrtcMzBU//oa+FB+DUBYfzPrxMH9/cuNgGEuuRJXin/FC4JCy4+M1MILI0YgB8QZwjuJ7jCCmmiDM7xpdcPHfYUCN0vSKeuwmpxTBubYJSnhELsQsur8nzU9MpmJB+c/Fzp5PAepbX7yhGSa/p1Gl5G9Yd2uUkSyuRwLN2Tw2P6vu0XY8BRlc+VVx+mpVBMGKY1xj91tlj6QkzQYMhfRx6oONd7Z0nal8O/b4gYbgkHr9p47paKaArpmVrNw9AoqnpxM3ps7lNaszU/3uosbHND3N0oZoLqhBxpfkquE1dSyb0Fo4/An2mW/SzjsDvHi4tUzlvR+gtpF8ZwvsVpUbxTue74/wT+iFNLqJSu1aLpe5MnFXhgjm38nPlGqe1hs3TAFFAMQZqeREakFkaJRx4FLVXZMWCqef5Yu0hIl8aqH5NURUiHnDl3SUjb8f1dvNiW8CQcjiNMPQCrtFzBjBoDsgyltgYqYWsbcfCgvBzquvur6ocDqeRW3kMm3nN8vZSy6V11pprsdtOz/aC6QuVsGDkEooeUXfqr4exWFmbXVGKJTezgc+EFdBKa0uujJLHuPOHuv7lHyaT3RPRxn8abcdIe4bVJS8II3jjiuP39P+hqgw5qXaON75djxuJBUHTCJTZAhL2FiT1tB4E6TFEJpBLjL5A06kZh9Q6MqH8iCnqoWJE9wmxX4WBWNm2qLxeujMASotv7/0b6GY1t49JGXfQ+c6LQY5mtDPJ7knKtb/tW77v2THFpaD0AjeHFRile86OtGcoh82hPaV4hla0GSaxiR1TjubL6a1dgNwHs0SCQojtJf0331AqxIc4V8BUKQcpUBv/hcZV9nnMtba9ZjFtsi0hQg0/3huwpVDKje1gwHXnzRPesWTDN3QlM/pkEDxydf7yHr7sRhLhXF2rSjB0Vnogq2Imw0zFRHfDc0HYxt3J1nc5u8ssX7JrI2F6Y9M4oKwh764xTTuNF79UbqV1nqo/s18OzTr8V25Nu60r2mblxTUhFk6bvz5wmzsv/GL/i41z+qnudlCkNDL3qAoQoT8uhaxSpJPNK1DplB/YPLF6lG7WbtyGmp4NEBZzLDbTUoDD3060e9Pi7VxbBnX8wwptKZ6FZRUSGsyWsUNU40pZp+qp0kXfqIOBz9vUAxK0o+/qsrqe9Jn1SPf8O6Wb82c2LL9KMIrpmuY2jwUJa1LNUS2xxhixxUwop2GZb0YgUqozqzJxU4ko+I4jgoF8MKGAnu9x0pzo9IbABgYeisHVhIXHx/2vCq9i5klyofDn12h36FIthMGGiYEKSqKcOzuyFIMkXhGINwXhpwgWXbxFfXyeZnMjqYCTr/UeJPIK2THjsEckGyUaW/OKPqDQYZrgmHxZ5+sGgrJKBQQlIuyXb7U9I2c8yNxZW1L9IDG/RRgBQWVkfSQA4qV0+0vcvGlJ7E+GV3cGzbyzxYAq4Jwk3vNF63rSpGVsRCGyPv9LR4fsV6jMpX7NLlRSbIv2Gm+QDjVkOL/Ot7h82BByj5wj2eh/WRSrpUvdgp/iG3oPn6JRkU4w8gYHR+aIobX1e0f7STwZ3jWxZTIor6pxUTTUOsf1nZjAsFdjOVLtrf3IJAfKAc6QnkXA9krtyhleUUlb6S65LBsa5zO3WyBVHT/JOblK/phDiGlZc/GofnMfgRZkec+k8Dgd7f4wIt6ZHWTYKBRzzWTfav/gHNeZBNdG/eNL6pmb4ano4tLP46arruihMVIMH8WSmG2q7gcXbDxTyHi9qPKzkwNq/h+SW18WJ+9/qBEDQ5AVKsAfJaUd7qIUmJ040lL/xUTV075bnpkBuHb5+M29JAFJe2P2vULBtv3Jc56pq/lri35sSME9eniAzUexzUp/iT4Y8fFib8TJ+ZLQ5ezHDs4o8yngXg7xDUF+V8IGazHUMDICtl+IpeuViut68EH4jR7KldLsO5syRkJU+2lpaeh+7HUwXzBPRbm0iO42h8PW5rIDxp1iQKruVtnS+e5B/0P0OLD/JFReX2TWAEWMWGBHm29Quil/VHc4XQ8sMODvUb+hEVLUsv/iv9iVXx48ERGTiotz3e9zgv84SEZFbYjM5DhG2+CWwCS24OEmgYWM2P8G7OSuwa0RmmDPshoBQVf4+ZzuBxFBPVRLC0pvvdJMow2DpTRcKCq9CS4MG0QS1AH2QRCuT9VsrYTueWGxi75+Sq6tOcSR0CEM/MkLgz/KPeNcu6r8ywuSKbIYDZtoAvwOrZ0swTEE4F05yeVJ0CmTxaQa2GkpLPaxPSpMOWCHNF9Bp9RTeeGNAVzEcEIf5L5TK/ayA6eN4MGob3PjByTlNtxOTn5cIHkYSd1mROIyh14hMeZ4gPTXNqWwZG3G8tHz1GKDTflZkb9a6Wm0iUd2xPaiStKFpQSlm7zxyRfm7b1K6hbIQvs8ulciXVr1dz4w9Y62+cGyDbox4JikfONtmKEcsroixC2JVSgqVIHYvHoMR4mXX2Mft2Occ04gE+iCbE5wcIheYFncStlNeLvFGSjCQvw9y9PJ6wLI482gAaaivJIFgGxsvu6DRDu4XrwF1ISoH6KALeSRlMJ+ZdKQUAxFDJLnGPXew7GFoGXNygE6IexiWV/swbq/VLl2BM4IvboDzAhtERI3zLRPMLfEg4OOjAO9zmvGMCgggBAI0U6B4zfbor2UG5zkmlHcBbOc+a3/N4PNZLwcWfMcS6hzsU8v7fgM1sOz03K4EEPr1ULSI/Tq71XsIcaGPt124quX2O6wzplYsDYy40MBeeKry7xsaLnGo5UCqvCprelYx2zGUY/fuz2UJxbeMyM9m9uTBZ8h3rOuioGQgmRDhI+ACcti4kMKg1W0nqd2pZ69tgCEGt3H2puq9SmukNm41xYE3YkMvo5e7yjlWVcdQ93K3x3dPP8mtr6ckkKMhOoxDB3tsd69LTxXc3ebhD1u/pGhqyAvpXcPaN0TqjhNMKdnn+G+g7BfOjmO0FsF4ElRO5d/O7KrUs/E6vfvE4m46KeWlE1plG8C6Ukx/Af6UwCHtWTMQihLfskuIMz67o/YDOnJ7miGb146yd3E1nOjydRwUoSeVPYzLCL4R7aO8DCdKbmVnQyh/xUBSM1m+MWH/UyqFQMx+vFMseDoPjx/+G2ZvKa/GXNRoThXonVpAFFXUzEU2DzIzxa75FWUNU4Nhc9h3HLsYCG4hYYb2ab45cQD3uOjHIS1VB6tXKLbwBfIQFH9bi3wnUdmGBnRHAU3NEvflxmNFCejBZoLsbqp/niVr1BIzvmmZHOR0di07sVkKdoRGBFuLuS53UPOBndoQJre+SESEyVNwdN8jnDFsCQ3k4KZbS9d85MgoCagtNA9XaZ0kvQtwP7zBqVAwEeCn/cJG2yKbVMXOstGGW4TexTHiGlSCT0Q9lSAYPLJqT96x8vL5JoUeGaIL1h7b1hdwR14LZgp0nmROKzCKovASkuMaPvSDv8kA3TLG9mJD0flp8cB2y3+njj3j8O8aY/RHx3qNJwIR8djGnmcpw5hjzFA6rbx0zj0UCc69ogNTbdeh9Ia5Z9RMdsEUkBLj1+AABk5AV90xv8wAUjxzpflhR+fz51wsvAL9CIPwvJIvbzSHZEPOgKiW1zwOkO8NOrG1GdyPYgD2JseLxfZQ3pivqfcOekLJ+X8ZT3VN+wqojz99lXFUDA4IU5WSRYVROmWypZm5LfhulX13+REGgDs0sGNmjqCNcsQ6UW6NFFIK6dh6OmVnuKW1+lSG097xlhv64IDabYM/wf9kH34QLyyZvI0OVVoUnKuiebZMExAZJ8NzxTyM6ol/J8wIHRuSHXwu812AVgUdIDGdswNF2PjapNrb/6TRXZeP6BtizlHWoMlpJp9QaNOqhNPj3uONB7P8EJrS0u23SXunTz+GIKzGP4x/a1hDburtYmoKUrls+rF2eufbTypANSJf5u7niVnXaQn2Mpy07FeeeptyYi2hWgXOrWjtsUy9OLRgR4TKyzKtj3rJX+jRJ/SgNv59VQta83JN0Xw+4qIJPWhYHvgSAdp1EugnSK70PvoLN7T3OX3Ox3HjtKcZR/ClR9w2hpoWbGEMmqeiUug64aYFQ6UyBeKWEUpPT3rd7Cusu57WiSoj7OsXX5vWlUz23Dmz/UqJq91qo7UorjlueIkyMgPpaKfEeF6FM5i/lkBBPlB0rD8l5RaJ7c6EgD/6ahcyM7bteQIpL/7P8G9VcWD/45D1HqOhc0DXenSJuZBnA50IMLaT77bomcEtEigMCiBjKpTCSjNJ/CL9aJe+1EOQpYL8kEH7ZHrUlQtO8tnOCM3tQ+0d72g0zo35pPTbwgEOdH7BAqs4z/EEdjmEaE15VdmVeDXYUUnl2XSX4TO45G2l5O7wzLwEvYWRx+cez6ro+Hv4f5MkPeQvyqLzKwuwocOG6GD4TzsUl3w/h3Tw+kEkbzPW2UijeygLSYad44jmfwTQCwee/DzPbiGXv8a3Zo1KrT0+RLgKQ4K5/RLqLFfcHZtgKqSIFKbPaNPoBs9YWNkR82Be75bYtyAWuaNj/taw9h06hHBlN9JAlXE46wUjZ8ScG9Lw+pI9SxW+k5sWzrOjCv0rH6wGF2XwEjU7zTXe9njj4zPj+Jgsc1Q01ThYUNfAXG0M1cm9SteVEovgRXT14nv3yqgyOMW2Q/REGqNuyRvrbxjfwfk7ZbvVF6mDR5ayB0qdnH5YlEbDfE/MmbEQ1UQvEkbMZsyrCzNjUoG/DxThsuCARZt1P9OpDSYmcG1LL4TgFfSZIF40QfHeJJjZhwotCwrBSWCkThF/TAHO6MFYaUvX0iofIMzIjdojuf7eTLU2dVaxDLoYWorvKl18T1zo9ESws0Ro453sXTzvQbyGJaDhYQbAhkYwvzX3D1tq4r4iBDqTlJKGsX59z2G1m5K48dIAqpjysknMyDeCz2MyfpKbj1ja0GzuNbtv0X48PMR+6PTMc25zatNU93aDR30fE1BEtjRgUrUuZzMSC0FkAkuqWTuN0mK7kYZZ8Uv3fSa0pOGh/uEyuIZ2+slOobCeqiG9hgmOnjvPAY/DXRTu+sSsRyeICSfLaawja3ZGhpz/fTFKygSY8O8Iolyg1MeyPrIz3eNndkd7RlBifbN+RZD8pNHJhljHnBRvO579Kn5eBey9cih0/DCXrqiJrxz2/rulNezKuLsY3m+l//IqzA38kpR5sbHEDoO+0HZcNTpU7hsc+3yj806eZ0SvJdDLxjiOoebLBLo6JebfOmaBAjplam8GLLuoJfH0DlwJkAEUEQvcx4Y0AbUAL3CmQUHWHiGrlCrWml7nlIyEhLj7Uj32z9lRXxBBrH5obgwl8RWpmCAti7K4ryFSveRMo0A67wR3APYYvF1DoSbIRABn2ikQVvPrcjiXDNwkxtahoe_lafs-1.20.0/src/allmydata/test/data/spki-hash-test-vectors.yaml0000644000000000000000000001266613615410400022474 0ustar00vector: - expected-hash: >- JIj6ezHkdSBlHhrnezAgIC_mrVQHy4KAFyL-8ZNPGPM expected-spki: >- MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAv9vqtA8Toy9D6xLGq41iUafSiAXnuirWxML2ct/LAcGJzATg6JctmJxxZQL7vkmaFFPBF6Y39bOGbbECM2iQYn2Qemj5fl3IzKTnYLqzryGM0ZwwnNbPyetSe/sksAIYRLzn49d6l+AHR+DjGyvoLzIyGUTn41MTDafMNtPgWx1i+65lFW3GHYpEmugu4bjeUPizNja2LrqwvwFuYXwmKxbIMdioCoRvDGX9SI3/euFstuR4rbOEUDxniYRF5g6reP8UMF30zJzF5j0kyDg8Z5b1XpKFNZAeyRYxcs9wJCqVlP6BLPDnvNVpMXodnWLeTK+r6YWvGadGVufkYNC1PwIDAQAB certificate: | -----BEGIN CERTIFICATE----- MIIDWTCCAkECFCf+I+3oEhTfqt+6ruH4qQ4Wst1DMA0GCSqGSIb3DQEBCwUAMGkx CzAJBgNVBAYTAlpaMRAwDgYDVQQIDAdOb3doZXJlMRQwEgYDVQQHDAtFeGFtcGxl dG93bjEcMBoGA1UECgwTRGVmYXVsdCBDb21wYW55IEx0ZDEUMBIGA1UEAwwLZXhh bXBsZS5jb20wHhcNMjIwMzAyMTUyNTQ3WhcNMjMwMzAyMTUyNTQ3WjBpMQswCQYD VQQGEwJaWjEQMA4GA1UECAwHTm93aGVyZTEUMBIGA1UEBwwLRXhhbXBsZXRvd24x HDAaBgNVBAoME0RlZmF1bHQgQ29tcGFueSBMdGQxFDASBgNVBAMMC2V4YW1wbGUu Y29tMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAv9vqtA8Toy9D6xLG q41iUafSiAXnuirWxML2ct/LAcGJzATg6JctmJxxZQL7vkmaFFPBF6Y39bOGbbEC M2iQYn2Qemj5fl3IzKTnYLqzryGM0ZwwnNbPyetSe/sksAIYRLzn49d6l+AHR+Dj GyvoLzIyGUTn41MTDafMNtPgWx1i+65lFW3GHYpEmugu4bjeUPizNja2LrqwvwFu YXwmKxbIMdioCoRvDGX9SI3/euFstuR4rbOEUDxniYRF5g6reP8UMF30zJzF5j0k yDg8Z5b1XpKFNZAeyRYxcs9wJCqVlP6BLPDnvNVpMXodnWLeTK+r6YWvGadGVufk YNC1PwIDAQABMA0GCSqGSIb3DQEBCwUAA4IBAQByrhn78GSS3dJ0pJ6czmhMX5wH +fauCtt1+Wbn+ctTodTycS+pfULO4gG7wRzhl8KNoOqLmWMjyA2A3mon8kdkD+0C i8McpoPaGS2wQcqC28Ud6kP9YO81YFyTl4nHVKQ0nmplT+eoLDTCIWMVxHHzxIgs 2ybUluAc+THSjpGxB6kWSAJeg3N+f2OKr+07Yg9LiQ2b8y0eZarpiuuuXCzWeWrQ PudP0aniyq/gbPhxq0tYF628IBvhDAnr/2kqEmVF2TDr2Sm/Y3PDBuPY6MeIxjnr ox5zO3LrQmQw11OaIAs2/kviKAoKTFFxeyYcpS5RuKNDZfHQCXlLwt9bySxG -----END CERTIFICATE----- - expected-hash: >- jIvdTaNKVK_iyt2EOMb0PwF23vpY3yfsQwbr5V2Rt1k expected-spki: >- MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAxMjhLl8R6KX+/itDHCT/T7LQM1i9F6LHe3TW0KWY2FKC2Ov6sJi1pn4NM2qrlW3EUPhX4l0Ru0VE9ZJuwQB1nzFkZIP70Kr8MLmYBoDjWWXsxTiNG4Lj3ydMxBMq/LLSpgHYgb3+Hh+OQeByboW1nVWWm8+QjZNXHhMvRhJmYvyFi0VWoITe/L5R0ubMtGwZ5mal/z9OnvYcE+Jb4PUxiujDhhvAxr4acHscPDn8e4+HBswDSvIHwyxKkE/w6G0yiw736YUbGmxsThSqRqilujh3dAdIVJJxlxhHwrdUkdK/Eq96SOx/BB6M/M8n8KrRNgwuF25MsabRPphgT/l4M46ddyq4209skSnoa1uJdzfx7HQuWep2n0Nagu6WtcKtrzPI3/BKiOMzOcTNOI63VavCtn995CYY9aUoTpz/x/rlp/5TPM1KiaYMBaq+MneBtqlHyYEQUZP9l8QNtvMUO7nLYaYZhcs/QA+qmpJnxcK07njvmw6gh2oLXuvbUbohPVq/3dmRBdJh4tOZWtJsjFP0XYe41Hhw/sUSWXlJAPghLXBBbgAkkeyK5KatuvD7Lpfs/iuz17No1mo8MhLr3+EnzZ1JBuRo8Nksw4FX5ivZmJxt/HQ2UcQ9HZLejIZJbYBEpUu5hvaC0rOmWDWfftLAjD7DzDPu+u46ZNGa8ykCAwEAAQ== certificate: | -----BEGIN CERTIFICATE----- MIIFazCCA1OgAwIBAgIUWcQFI0lueRJyK4txfA/Ydn0bPRIwDQYJKoZIhvcNAQEL BQAwRTELMAkGA1UEBhMCQVUxEzARBgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoM GEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDAeFw0yMzA4MjIxMjUxNDFaFw0yNDA4 MjExMjUxNDFaMEUxCzAJBgNVBAYTAkFVMRMwEQYDVQQIDApTb21lLVN0YXRlMSEw HwYDVQQKDBhJbnRlcm5ldCBXaWRnaXRzIFB0eSBMdGQwggIiMA0GCSqGSIb3DQEB AQUAA4ICDwAwggIKAoICAQDEyOEuXxHopf7+K0McJP9PstAzWL0Xosd7dNbQpZjY UoLY6/qwmLWmfg0zaquVbcRQ+FfiXRG7RUT1km7BAHWfMWRkg/vQqvwwuZgGgONZ ZezFOI0bguPfJ0zEEyr8stKmAdiBvf4eH45B4HJuhbWdVZabz5CNk1ceEy9GEmZi /IWLRVaghN78vlHS5sy0bBnmZqX/P06e9hwT4lvg9TGK6MOGG8DGvhpwexw8Ofx7 j4cGzANK8gfDLEqQT/DobTKLDvfphRsabGxOFKpGqKW6OHd0B0hUknGXGEfCt1SR 0r8Sr3pI7H8EHoz8zyfwqtE2DC4XbkyxptE+mGBP+Xgzjp13KrjbT2yRKehrW4l3 N/HsdC5Z6nafQ1qC7pa1wq2vM8jf8EqI4zM5xM04jrdVq8K2f33kJhj1pShOnP/H +uWn/lM8zUqJpgwFqr4yd4G2qUfJgRBRk/2XxA228xQ7ucthphmFyz9AD6qakmfF wrTueO+bDqCHagte69tRuiE9Wr/d2ZEF0mHi05la0myMU/Rdh7jUeHD+xRJZeUkA +CEtcEFuACSR7Irkpq268Psul+z+K7PXs2jWajwyEuvf4SfNnUkG5Gjw2SzDgVfm K9mYnG38dDZRxD0dkt6MhkltgESlS7mG9oLSs6ZYNZ9+0sCMPsPMM+767jpk0Zrz KQIDAQABo1MwUTAdBgNVHQ4EFgQUl/JLslQ7ISm+9JR1dMaq2I54KAIwHwYDVR0j BBgwFoAUl/JLslQ7ISm+9JR1dMaq2I54KAIwDwYDVR0TAQH/BAUwAwEB/zANBgkq hkiG9w0BAQsFAAOCAgEAwcorbUP98LPyDmOdTe/Y9yLWSgD/xJV/L1oQpB8HhbXA J3mEnlXtPMNFZULSdHxJycexeHe1tiDcFgatQv/YwURHW67s0TFHBXTvSitWz9tU CL/t7pEIdKgzbUL2yQry7voWVUaXOf7//l/4P9x2/egn78L6+KuRek6umtIECsN0 HoOiZzqTrXn2WNtnU1Br9m0cxFFzMzP/g2Rd9MUKjIDag7DLfvRCmTMK8825vTJI L3nzGfWk5R+ZWO4BudfvQWpI7iMj2/7lRWxYvmS+SSJh+DFwYwV+4CaCPecXVI2x cD/M3uKTLhUMWo1Ge0qQWhl/qwtJ6FNaxp86yiX8x8EHYB0bDZgH4xMQE0/6o0Vg vKpy/IrEwnN8WM8yYLpm9kTe9H+jM/NEOxPMh4uid/FLmi7KN549UItAzUS3h7zP gP4cpSW+3Dgj0l7C58RIWxwABIIJZMH/2wMT/PeNg2pqDjhkoPDg8rwsvaFn6T0u 1A6pJFnVtWGUuyxJESVYBq4vNSLH68v/xkajxl62uWPDkpgAqWuj5TOUP0e/1Uj5 wqF/jNlRhLMw10r0U40AYkzQjgN2Q4jasqUKsZyhDa8F8861BHsSvFPrASLy4UrZ 9Tb4DMYXTNZOY6v1iQerRk4ujx/lTjlwuaX9FsirbkuLv/xF346uEl0jBYR7eMo= -----END CERTIFICATE----- - expected-hash: >- nG1UHCwz7nXHp2zMCiSfxRbCY29OK3RockkeOiw-t8A expected-spki: >- MCowBQYDK2VwAyEA6gbCgxeb9kkSDo4WbB76aTvBWnpyzColUKDxyDhPu94= certificate: | -----BEGIN CERTIFICATE----- MIIBnzCCAVGgAwIBAgIUBM5d9fmVxhjKQod7TLp6Bb2vEd4wBQYDK2VwMEUxCzAJ BgNVBAYTAkFVMRMwEQYDVQQIDApTb21lLVN0YXRlMSEwHwYDVQQKDBhJbnRlcm5l dCBXaWRnaXRzIFB0eSBMdGQwHhcNMjMwODIyMTI1NjE0WhcNMjQwODIxMTI1NjE0 WjBFMQswCQYDVQQGEwJBVTETMBEGA1UECAwKU29tZS1TdGF0ZTEhMB8GA1UECgwY SW50ZXJuZXQgV2lkZ2l0cyBQdHkgTHRkMCowBQYDK2VwAyEA6gbCgxeb9kkSDo4W bB76aTvBWnpyzColUKDxyDhPu96jUzBRMB0GA1UdDgQWBBQC8cbPWjZilcD4FSU/ J1sSNYwpAjAfBgNVHSMEGDAWgBQC8cbPWjZilcD4FSU/J1sSNYwpAjAPBgNVHRMB Af8EBTADAQH/MAUGAytlcANBAGfmvq0a+Ip6nDBlj1tOpyJzcl1J+wj+4N72V23z H1c75cXDrl9DMOqLwNVK9YD2wmaxPyEWO4tdth560Nir4QM= -----END CERTIFICATE----- tahoe_lafs-1.20.0/src/allmydata/test/mutable/__init__.py0000644000000000000000000000000013615410400020065 0ustar00tahoe_lafs-1.20.0/src/allmydata/test/mutable/test_checker.py0000644000000000000000000002374513615410400021016 0ustar00""" Ported to Python 3. """ from ..common import AsyncTestCase from foolscap.api import flushEventualQueue from allmydata.monitor import Monitor from allmydata.mutable.common import CorruptShareError from .util import PublishMixin, corrupt, CheckerMixin class Checker(AsyncTestCase, CheckerMixin, PublishMixin): def setUp(self): super(Checker, self).setUp() return self.publish_one() def test_check_good(self): d = self._fn.check(Monitor()) d.addCallback(self.check_good, "test_check_good") return d def test_check_mdmf_good(self): d = self.publish_mdmf() d.addCallback(lambda ignored: self._fn.check(Monitor())) d.addCallback(self.check_good, "test_check_mdmf_good") return d def test_check_no_shares(self): for shares in list(self._storage._peers.values()): shares.clear() d = self._fn.check(Monitor()) d.addCallback(self.check_bad, "test_check_no_shares") return d def test_check_mdmf_no_shares(self): d = self.publish_mdmf() def _then(ignored): for share in list(self._storage._peers.values()): share.clear() d.addCallback(_then) d.addCallback(lambda ignored: self._fn.check(Monitor())) d.addCallback(self.check_bad, "test_check_mdmf_no_shares") return d def test_check_not_enough_shares(self): for shares in list(self._storage._peers.values()): for shnum in list(shares.keys()): if shnum > 0: del shares[shnum] d = self._fn.check(Monitor()) d.addCallback(self.check_bad, "test_check_not_enough_shares") return d def test_check_mdmf_not_enough_shares(self): d = self.publish_mdmf() def _then(ignored): for shares in list(self._storage._peers.values()): for shnum in list(shares.keys()): if shnum > 0: del shares[shnum] d.addCallback(_then) d.addCallback(lambda ignored: self._fn.check(Monitor())) d.addCallback(self.check_bad, "test_check_mdmf_not_enougH_shares") return d def test_check_all_bad_sig(self): d = corrupt(None, self._storage, 1) # bad sig d.addCallback(lambda ignored: self._fn.check(Monitor())) d.addCallback(self.check_bad, "test_check_all_bad_sig") return d def test_check_mdmf_all_bad_sig(self): d = self.publish_mdmf() d.addCallback(lambda ignored: corrupt(None, self._storage, 1)) d.addCallback(lambda ignored: self._fn.check(Monitor())) d.addCallback(self.check_bad, "test_check_mdmf_all_bad_sig") return d def test_verify_mdmf_all_bad_sharedata(self): d = self.publish_mdmf() # On 8 of the shares, corrupt the beginning of the share data. # The signature check during the servermap update won't catch this. d.addCallback(lambda ignored: corrupt(None, self._storage, "share_data", list(range(8)))) # On 2 of the shares, corrupt the end of the share data. # The signature check during the servermap update won't catch # this either, and the retrieval process will have to process # all of the segments before it notices. d.addCallback(lambda ignored: # the block hash tree comes right after the share data, so if we # corrupt a little before the block hash tree, we'll corrupt in the # last block of each share. corrupt(None, self._storage, "block_hash_tree", [8, 9], -5)) d.addCallback(lambda ignored: self._fn.check(Monitor(), verify=True)) # The verifier should flag the file as unhealthy, and should # list all 10 shares as bad. d.addCallback(self.check_bad, "test_verify_mdmf_all_bad_sharedata") def _check_num_bad(r): self.failIf(r.is_recoverable()) smap = r.get_servermap() self.failUnlessEqual(len(smap.get_bad_shares()), 10) d.addCallback(_check_num_bad) return d def test_check_all_bad_blocks(self): d = corrupt(None, self._storage, "share_data", [9]) # bad blocks # the Checker won't notice this.. it doesn't look at actual data d.addCallback(lambda ignored: self._fn.check(Monitor())) d.addCallback(self.check_good, "test_check_all_bad_blocks") return d def test_check_mdmf_all_bad_blocks(self): d = self.publish_mdmf() d.addCallback(lambda ignored: corrupt(None, self._storage, "share_data")) d.addCallback(lambda ignored: self._fn.check(Monitor())) d.addCallback(self.check_good, "test_check_mdmf_all_bad_blocks") return d def test_verify_good(self): d = self._fn.check(Monitor(), verify=True) d.addCallback(self.check_good, "test_verify_good") return d def test_verify_all_bad_sig(self): d = corrupt(None, self._storage, 1) # bad sig d.addCallback(lambda ignored: self._fn.check(Monitor(), verify=True)) d.addCallback(self.check_bad, "test_verify_all_bad_sig") return d def test_verify_one_bad_sig(self): d = corrupt(None, self._storage, 1, [9]) # bad sig d.addCallback(lambda ignored: self._fn.check(Monitor(), verify=True)) d.addCallback(self.check_bad, "test_verify_one_bad_sig") return d def test_verify_one_bad_block(self): d = corrupt(None, self._storage, "share_data", [9]) # bad blocks # the Verifier *will* notice this, since it examines every byte d.addCallback(lambda ignored: self._fn.check(Monitor(), verify=True)) d.addCallback(self.check_bad, "test_verify_one_bad_block") d.addCallback(self.check_expected_failure, CorruptShareError, "block hash tree failure", "test_verify_one_bad_block") return d def test_verify_one_bad_sharehash(self): d = corrupt(None, self._storage, "share_hash_chain", [9], 5) d.addCallback(lambda ignored: self._fn.check(Monitor(), verify=True)) d.addCallback(self.check_bad, "test_verify_one_bad_sharehash") d.addCallback(self.check_expected_failure, CorruptShareError, "corrupt hashes", "test_verify_one_bad_sharehash") return d def test_verify_one_bad_encprivkey(self): d = corrupt(None, self._storage, "enc_privkey", [9]) # bad privkey d.addCallback(lambda ignored: self._fn.check(Monitor(), verify=True)) d.addCallback(self.check_bad, "test_verify_one_bad_encprivkey") d.addCallback(self.check_expected_failure, CorruptShareError, "invalid privkey", "test_verify_one_bad_encprivkey") return d def test_verify_one_bad_encprivkey_uncheckable(self): d = corrupt(None, self._storage, "enc_privkey", [9]) # bad privkey readonly_fn = self._fn.get_readonly() # a read-only node has no way to validate the privkey d.addCallback(lambda ignored: readonly_fn.check(Monitor(), verify=True)) d.addCallback(self.check_good, "test_verify_one_bad_encprivkey_uncheckable") return d def test_verify_mdmf_good(self): d = self.publish_mdmf() d.addCallback(lambda ignored: self._fn.check(Monitor(), verify=True)) d.addCallback(self.check_good, "test_verify_mdmf_good") return d def test_verify_mdmf_one_bad_block(self): d = self.publish_mdmf() d.addCallback(lambda ignored: corrupt(None, self._storage, "share_data", [1])) d.addCallback(lambda ignored: self._fn.check(Monitor(), verify=True)) # We should find one bad block here d.addCallback(self.check_bad, "test_verify_mdmf_one_bad_block") d.addCallback(self.check_expected_failure, CorruptShareError, "block hash tree failure", "test_verify_mdmf_one_bad_block") return d def test_verify_mdmf_bad_encprivkey(self): d = self.publish_mdmf() d.addCallback(lambda ignored: corrupt(None, self._storage, "enc_privkey", [0])) d.addCallback(lambda ignored: self._fn.check(Monitor(), verify=True)) d.addCallback(self.check_bad, "test_verify_mdmf_bad_encprivkey") d.addCallback(self.check_expected_failure, CorruptShareError, "privkey", "test_verify_mdmf_bad_encprivkey") return d def test_verify_mdmf_bad_sig(self): d = self.publish_mdmf() d.addCallback(lambda ignored: corrupt(None, self._storage, 1, [1])) d.addCallback(lambda ignored: self._fn.check(Monitor(), verify=True)) d.addCallback(self.check_bad, "test_verify_mdmf_bad_sig") return d def test_verify_mdmf_bad_encprivkey_uncheckable(self): d = self.publish_mdmf() d.addCallback(lambda ignored: corrupt(None, self._storage, "enc_privkey", [1])) d.addCallback(lambda ignored: self._fn.get_readonly()) d.addCallback(lambda fn: fn.check(Monitor(), verify=True)) d.addCallback(self.check_good, "test_verify_mdmf_bad_encprivkey_uncheckable") return d def test_verify_sdmf_empty(self): d = self.publish_sdmf(b"") d.addCallback(lambda ignored: self._fn.check(Monitor(), verify=True)) d.addCallback(self.check_good, "test_verify_sdmf") d.addCallback(flushEventualQueue) return d def test_verify_mdmf_empty(self): d = self.publish_mdmf(b"") d.addCallback(lambda ignored: self._fn.check(Monitor(), verify=True)) d.addCallback(self.check_good, "test_verify_mdmf") d.addCallback(flushEventualQueue) return d tahoe_lafs-1.20.0/src/allmydata/test/mutable/test_datahandle.py0000644000000000000000000000305513615410400021467 0ustar00""" Ported to Python 3. """ from ..common import SyncTestCase from allmydata.mutable.publish import MutableData from testtools.matchers import Equals, HasLength class DataHandle(SyncTestCase): def setUp(self): super(DataHandle, self).setUp() self.test_data = b"Test Data" * 50000 self.uploadable = MutableData(self.test_data) def test_datahandle_read(self): chunk_size = 10 for i in range(0, len(self.test_data), chunk_size): data = self.uploadable.read(chunk_size) data = b"".join(data) start = i end = i + chunk_size self.assertThat(data, Equals(self.test_data[start:end])) def test_datahandle_get_size(self): actual_size = len(self.test_data) size = self.uploadable.get_size() self.assertThat(size, Equals(actual_size)) def test_datahandle_get_size_out_of_order(self): # We should be able to call get_size whenever we want without # disturbing the location of the seek pointer. chunk_size = 100 data = self.uploadable.read(chunk_size) self.assertThat(b"".join(data), Equals(self.test_data[:chunk_size])) # Now get the size. size = self.uploadable.get_size() self.assertThat(self.test_data, HasLength(size)) # Now get more data. We should be right where we left off. more_data = self.uploadable.read(chunk_size) start = chunk_size end = chunk_size * 2 self.assertThat(b"".join(more_data), Equals(self.test_data[start:end])) tahoe_lafs-1.20.0/src/allmydata/test/mutable/test_different_encoding.py0000644000000000000000000000204513615410400023214 0ustar00""" Ported to Python 3. """ from ..common import AsyncTestCase from .util import FakeStorage, make_nodemaker class DifferentEncoding(AsyncTestCase): def setUp(self): super(DifferentEncoding, self).setUp() self._storage = s = FakeStorage() self.nodemaker = make_nodemaker(s) def test_filenode(self): # create a file with 3-of-20, then modify it with a client configured # to do 3-of-10. #1510 tracks a failure here self.nodemaker.default_encoding_parameters["n"] = 20 d = self.nodemaker.create_mutable_file(b"old contents") def _created(n): filecap = n.get_cap().to_string() del n # we want a new object, not the cached one self.nodemaker.default_encoding_parameters["n"] = 10 n2 = self.nodemaker.create_from_cap(filecap) return n2 d.addCallback(_created) def modifier(old_contents, servermap, first_time): return b"new contents" d.addCallback(lambda n: n.modify(modifier)) return d tahoe_lafs-1.20.0/src/allmydata/test/mutable/test_exceptions.py0000644000000000000000000000101113615410400021551 0ustar00""" Ported to Python 3. """ from ..common import SyncTestCase from allmydata.mutable.common import NeedMoreDataError, UncoordinatedWriteError class Exceptions(SyncTestCase): def test_repr(self): nmde = NeedMoreDataError(100, 50, 100) self.assertTrue("NeedMoreDataError" in repr(nmde), msg=repr(nmde)) self.assertTrue("NeedMoreDataError" in repr(nmde), msg=repr(nmde)) ucwe = UncoordinatedWriteError() self.assertTrue("UncoordinatedWriteError" in repr(ucwe), msg=repr(ucwe)) tahoe_lafs-1.20.0/src/allmydata/test/mutable/test_filehandle.py0000644000000000000000000000516213615410400021476 0ustar00""" Ported to Python 3. """ import os from io import BytesIO from ..common import SyncTestCase from allmydata.mutable.publish import MutableFileHandle class FileHandle(SyncTestCase): def setUp(self): super(FileHandle, self).setUp() self.test_data = b"Test Data" * 50000 self.sio = BytesIO(self.test_data) self.uploadable = MutableFileHandle(self.sio) def test_filehandle_read(self): self.basedir = "mutable/FileHandle/test_filehandle_read" chunk_size = 10 for i in range(0, len(self.test_data), chunk_size): data = self.uploadable.read(chunk_size) data = b"".join(data) start = i end = i + chunk_size self.failUnlessEqual(data, self.test_data[start:end]) def test_filehandle_get_size(self): self.basedir = "mutable/FileHandle/test_filehandle_get_size" actual_size = len(self.test_data) size = self.uploadable.get_size() self.failUnlessEqual(size, actual_size) def test_filehandle_get_size_out_of_order(self): # We should be able to call get_size whenever we want without # disturbing the location of the seek pointer. chunk_size = 100 data = self.uploadable.read(chunk_size) self.failUnlessEqual(b"".join(data), self.test_data[:chunk_size]) # Now get the size. size = self.uploadable.get_size() self.failUnlessEqual(size, len(self.test_data)) # Now get more data. We should be right where we left off. more_data = self.uploadable.read(chunk_size) start = chunk_size end = chunk_size * 2 self.failUnlessEqual(b"".join(more_data), self.test_data[start:end]) def test_filehandle_file(self): # Make sure that the MutableFileHandle works on a file as well # as a BytesIO object, since in some cases it will be asked to # deal with files. self.basedir = self.mktemp() # necessary? What am I doing wrong here? os.mkdir(self.basedir) f_path = os.path.join(self.basedir, "test_file") f = open(f_path, "wb") f.write(self.test_data) f.close() f = open(f_path, "rb") uploadable = MutableFileHandle(f) data = uploadable.read(len(self.test_data)) self.failUnlessEqual(b"".join(data), self.test_data) size = uploadable.get_size() self.failUnlessEqual(size, len(self.test_data)) def test_close(self): # Make sure that the MutableFileHandle closes its handle when # told to do so. self.uploadable.close() self.failUnless(self.sio.closed) tahoe_lafs-1.20.0/src/allmydata/test/mutable/test_filenode.py0000644000000000000000000007536113615410400021200 0ustar00""" Ported to Python 3. """ from io import StringIO from twisted.internet import defer, reactor from ..common import AsyncBrokenTestCase from testtools.matchers import ( Equals, Contains, HasLength, Is, IsInstance, ) from allmydata import uri, client from allmydata.util.consumer import MemoryConsumer from allmydata.interfaces import SDMF_VERSION, MDMF_VERSION, DownloadStopped from allmydata.mutable.filenode import MutableFileNode, BackoffAgent from allmydata.mutable.common import MODE_ANYTHING, MODE_WRITE, MODE_READ, UncoordinatedWriteError from allmydata.mutable.publish import MutableData from ..test_download import PausingConsumer, PausingAndStoppingConsumer, \ StoppingConsumer, ImmediatelyStoppingConsumer from .. import common_util as testutil from ...crypto.rsa import create_signing_keypair from .util import ( FakeStorage, make_nodemaker_with_peers, make_peer, ) class Filenode(AsyncBrokenTestCase, testutil.ShouldFailMixin): # this used to be in Publish, but we removed the limit. Some of # these tests test whether the new code correctly allows files # larger than the limit. OLD_MAX_SEGMENT_SIZE = 3500000 def setUp(self): super(Filenode, self).setUp() self._storage = FakeStorage() self._peers = list( make_peer(self._storage, n) for n # 10 is the default for N. We're trying to make enough servers # here so that each only gets one share. in range(10) ) self.nodemaker = make_nodemaker_with_peers(self._peers) def test_create(self): d = self.nodemaker.create_mutable_file() def _created(n): self.assertThat(n, IsInstance(MutableFileNode)) self.assertThat(n.get_storage_index(), Equals(n._storage_index)) sb = self.nodemaker.storage_broker peer0 = sorted(sb.get_all_serverids())[0] shnums = self._storage._peers[peer0].keys() self.assertThat(shnums, HasLength(1)) d.addCallback(_created) return d async def test_create_with_keypair(self): """ An SDMF can be created using a given keypair. """ (priv, pub) = create_signing_keypair(2048) node = await self.nodemaker.create_mutable_file(keypair=(pub, priv)) self.assertThat( (node.get_privkey(), node.get_pubkey()), Equals((priv, pub)), ) def test_create_mdmf(self): d = self.nodemaker.create_mutable_file(version=MDMF_VERSION) def _created(n): self.assertThat(n, IsInstance(MutableFileNode)) self.assertThat(n.get_storage_index(), Equals(n._storage_index)) sb = self.nodemaker.storage_broker peer0 = sorted(sb.get_all_serverids())[0] shnums = self._storage._peers[peer0].keys() self.assertThat(shnums, HasLength(1)) d.addCallback(_created) return d def test_single_share(self): # Make sure that we tolerate publishing a single share. self.nodemaker.default_encoding_parameters['k'] = 1 self.nodemaker.default_encoding_parameters['happy'] = 1 self.nodemaker.default_encoding_parameters['n'] = 1 d = defer.succeed(None) for v in (SDMF_VERSION, MDMF_VERSION): d.addCallback(lambda ignored, v=v: self.nodemaker.create_mutable_file(version=v)) def _created(n): self.assertThat(n, IsInstance(MutableFileNode)) self._node = n return n d.addCallback(_created) d.addCallback(lambda n: n.overwrite(MutableData(b"Contents" * 50000))) d.addCallback(lambda ignored: self._node.download_best_version()) d.addCallback(lambda contents: self.assertThat(contents, Equals(b"Contents" * 50000))) return d def test_max_shares(self): self.nodemaker.default_encoding_parameters['n'] = 255 d = self.nodemaker.create_mutable_file(version=SDMF_VERSION) def _created(n): self.assertThat(n, IsInstance(MutableFileNode)) self.assertThat(n.get_storage_index(), Equals(n._storage_index)) sb = self.nodemaker.storage_broker num_shares = sum([len(self._storage._peers[x].keys()) for x \ in sb.get_all_serverids()]) self.assertThat(num_shares, Equals(255)) self._node = n return n d.addCallback(_created) # Now we upload some contents d.addCallback(lambda n: n.overwrite(MutableData(b"contents" * 50000))) # ...then download contents d.addCallback(lambda ignored: self._node.download_best_version()) # ...and check to make sure everything went okay. d.addCallback(lambda contents: self.assertThat(b"contents" * 50000, Equals(contents))) return d def test_max_shares_mdmf(self): # Test how files behave when there are 255 shares. self.nodemaker.default_encoding_parameters['n'] = 255 d = self.nodemaker.create_mutable_file(version=MDMF_VERSION) def _created(n): self.assertThat(n, IsInstance(MutableFileNode)) self.assertThat(n.get_storage_index(), Equals(n._storage_index)) sb = self.nodemaker.storage_broker num_shares = sum([len(self._storage._peers[x].keys()) for x \ in sb.get_all_serverids()]) self.assertThat(num_shares, Equals(255)) self._node = n return n d.addCallback(_created) d.addCallback(lambda n: n.overwrite(MutableData(b"contents" * 50000))) d.addCallback(lambda ignored: self._node.download_best_version()) d.addCallback(lambda contents: self.assertThat(contents, Equals(b"contents" * 50000))) return d def test_mdmf_filenode_cap(self): # Test that an MDMF filenode, once created, returns an MDMF URI. d = self.nodemaker.create_mutable_file(version=MDMF_VERSION) def _created(n): self.assertThat(n, IsInstance(MutableFileNode)) cap = n.get_cap() self.assertThat(cap, IsInstance(uri.WriteableMDMFFileURI)) rcap = n.get_readcap() self.assertThat(rcap, IsInstance(uri.ReadonlyMDMFFileURI)) vcap = n.get_verify_cap() self.assertThat(vcap, IsInstance(uri.MDMFVerifierURI)) d.addCallback(_created) return d def test_create_from_mdmf_writecap(self): # Test that the nodemaker is capable of creating an MDMF # filenode given an MDMF cap. d = self.nodemaker.create_mutable_file(version=MDMF_VERSION) def _created(n): self.assertThat(n, IsInstance(MutableFileNode)) s = n.get_uri() self.assertTrue(s.startswith(b"URI:MDMF")) n2 = self.nodemaker.create_from_cap(s) self.assertThat(n2, IsInstance(MutableFileNode)) self.assertThat(n.get_storage_index(), Equals(n2.get_storage_index())) self.assertThat(n.get_uri(), Equals(n2.get_uri())) d.addCallback(_created) return d def test_create_from_mdmf_readcap(self): d = self.nodemaker.create_mutable_file(version=MDMF_VERSION) def _created(n): self.assertThat(n, IsInstance(MutableFileNode)) s = n.get_readonly_uri() n2 = self.nodemaker.create_from_cap(s) self.assertThat(n2, IsInstance(MutableFileNode)) # Check that it's a readonly node self.assertTrue(n2.is_readonly()) d.addCallback(_created) return d def test_internal_version_from_cap(self): # MutableFileNodes and MutableFileVersions have an internal # switch that tells them whether they're dealing with an SDMF or # MDMF mutable file when they start doing stuff. We want to make # sure that this is set appropriately given an MDMF cap. d = self.nodemaker.create_mutable_file(version=MDMF_VERSION) def _created(n): self.uri = n.get_uri() self.assertThat(n._protocol_version, Equals(MDMF_VERSION)) n2 = self.nodemaker.create_from_cap(self.uri) self.assertThat(n2._protocol_version, Equals(MDMF_VERSION)) d.addCallback(_created) return d def test_serialize(self): n = MutableFileNode(None, None, {"k": 3, "n": 10}, None) calls = [] def _callback(*args, **kwargs): self.assertThat(args, Equals((4,))) self.assertThat(kwargs, Equals({"foo": 5})) calls.append(1) return 6 d = n._do_serialized(_callback, 4, foo=5) def _check_callback(res): self.assertThat(res, Equals(6)) self.assertThat(calls, Equals([1])) d.addCallback(_check_callback) def _errback(): raise ValueError("heya") d.addCallback(lambda res: self.shouldFail(ValueError, "_check_errback", "heya", n._do_serialized, _errback)) return d def test_upload_and_download(self): d = self.nodemaker.create_mutable_file() def _created(n): d = defer.succeed(None) d.addCallback(lambda res: n.get_servermap(MODE_READ)) d.addCallback(lambda smap: smap.dump(StringIO())) d.addCallback(lambda sio: self.assertTrue("3-of-10" in sio.getvalue())) d.addCallback(lambda res: n.overwrite(MutableData(b"contents 1"))) d.addCallback(lambda res: self.assertThat(res, Is(None))) d.addCallback(lambda res: n.download_best_version()) d.addCallback(lambda res: self.assertThat(res, Equals(b"contents 1"))) d.addCallback(lambda res: n.get_size_of_best_version()) d.addCallback(lambda size: self.assertThat(size, Equals(len(b"contents 1")))) d.addCallback(lambda res: n.overwrite(MutableData(b"contents 2"))) d.addCallback(lambda res: n.download_best_version()) d.addCallback(lambda res: self.assertThat(res, Equals(b"contents 2"))) d.addCallback(lambda res: n.get_servermap(MODE_WRITE)) d.addCallback(lambda smap: n.upload(MutableData(b"contents 3"), smap)) d.addCallback(lambda res: n.download_best_version()) d.addCallback(lambda res: self.assertThat(res, Equals(b"contents 3"))) d.addCallback(lambda res: n.get_servermap(MODE_ANYTHING)) d.addCallback(lambda smap: n.download_version(smap, smap.best_recoverable_version())) d.addCallback(lambda res: self.assertThat(res, Equals(b"contents 3"))) # test a file that is large enough to overcome the # mapupdate-to-retrieve data caching (i.e. make the shares larger # than the default readsize, which is 2000 bytes). A 15kB file # will have 5kB shares. d.addCallback(lambda res: n.overwrite(MutableData(b"large size file" * 1000))) d.addCallback(lambda res: n.download_best_version()) d.addCallback(lambda res: self.assertThat(res, Equals(b"large size file" * 1000))) return d d.addCallback(_created) return d def test_upload_and_download_mdmf(self): d = self.nodemaker.create_mutable_file(version=MDMF_VERSION) def _created(n): d = defer.succeed(None) d.addCallback(lambda ignored: n.get_servermap(MODE_READ)) def _then(servermap): dumped = servermap.dump(StringIO()) self.assertThat(dumped.getvalue(), Contains("3-of-10")) d.addCallback(_then) # Now overwrite the contents with some new contents. We want # to make them big enough to force the file to be uploaded # in more than one segment. big_contents = b"contents1" * 100000 # about 900 KiB big_contents_uploadable = MutableData(big_contents) d.addCallback(lambda ignored: n.overwrite(big_contents_uploadable)) d.addCallback(lambda ignored: n.download_best_version()) d.addCallback(lambda data: self.assertThat(data, Equals(big_contents))) # Overwrite the contents again with some new contents. As # before, they need to be big enough to force multiple # segments, so that we make the downloader deal with # multiple segments. bigger_contents = b"contents2" * 1000000 # about 9MiB bigger_contents_uploadable = MutableData(bigger_contents) d.addCallback(lambda ignored: n.overwrite(bigger_contents_uploadable)) d.addCallback(lambda ignored: n.download_best_version()) d.addCallback(lambda data: self.assertThat(data, Equals(bigger_contents))) return d d.addCallback(_created) return d def test_retrieve_producer_mdmf(self): # We should make sure that the retriever is able to pause and stop # correctly. data = b"contents1" * 100000 d = self.nodemaker.create_mutable_file(MutableData(data), version=MDMF_VERSION) d.addCallback(lambda node: node.get_best_mutable_version()) d.addCallback(self._test_retrieve_producer, "MDMF", data) return d # note: SDMF has only one big segment, so we can't use the usual # after-the-first-write() trick to pause or stop the download. # Disabled until we find a better approach. def OFF_test_retrieve_producer_sdmf(self): data = b"contents1" * 100000 d = self.nodemaker.create_mutable_file(MutableData(data), version=SDMF_VERSION) d.addCallback(lambda node: node.get_best_mutable_version()) d.addCallback(self._test_retrieve_producer, "SDMF", data) return d def _test_retrieve_producer(self, version, kind, data): # Now we'll retrieve it into a pausing consumer. c = PausingConsumer() d = version.read(c) d.addCallback(lambda ign: self.assertThat(c.size, Equals(len(data)))) c2 = PausingAndStoppingConsumer() d.addCallback(lambda ign: self.shouldFail(DownloadStopped, kind+"_pause_stop", "our Consumer called stopProducing()", version.read, c2)) c3 = StoppingConsumer() d.addCallback(lambda ign: self.shouldFail(DownloadStopped, kind+"_stop", "our Consumer called stopProducing()", version.read, c3)) c4 = ImmediatelyStoppingConsumer() d.addCallback(lambda ign: self.shouldFail(DownloadStopped, kind+"_stop_imm", "our Consumer called stopProducing()", version.read, c4)) def _then(ign): c5 = MemoryConsumer() d1 = version.read(c5) c5.producer.stopProducing() return self.shouldFail(DownloadStopped, kind+"_stop_imm2", "our Consumer called stopProducing()", lambda: d1) d.addCallback(_then) return d def test_download_from_mdmf_cap(self): # We should be able to download an MDMF file given its cap d = self.nodemaker.create_mutable_file(version=MDMF_VERSION) def _created(node): self.uri = node.get_uri() # also confirm that the cap has no extension fields pieces = self.uri.split(b":") self.assertThat(pieces, HasLength(4)) return node.overwrite(MutableData(b"contents1" * 100000)) def _then(ignored): node = self.nodemaker.create_from_cap(self.uri) return node.download_best_version() def _downloaded(data): self.assertThat(data, Equals(b"contents1" * 100000)) d.addCallback(_created) d.addCallback(_then) d.addCallback(_downloaded) return d def test_mdmf_write_count(self): """ Publishing an MDMF file causes exactly one write for each share that is to be published. Otherwise, we introduce undesirable semantics that are a regression from SDMF. """ upload = MutableData(b"MDMF" * 100000) # about 400 KiB d = self.nodemaker.create_mutable_file(upload, version=MDMF_VERSION) def _check_server_write_counts(ignored): for peer in self._peers: # There were enough servers for each to only get a single # share. self.assertEqual(peer.storage_server.queries, 1) d.addCallback(_check_server_write_counts) return d def test_create_with_initial_contents(self): upload1 = MutableData(b"contents 1") d = self.nodemaker.create_mutable_file(upload1) def _created(n): d = n.download_best_version() d.addCallback(lambda res: self.assertThat(res, Equals(b"contents 1"))) upload2 = MutableData(b"contents 2") d.addCallback(lambda res: n.overwrite(upload2)) d.addCallback(lambda res: n.download_best_version()) d.addCallback(lambda res: self.assertThat(res, Equals(b"contents 2"))) return d d.addCallback(_created) return d def test_create_mdmf_with_initial_contents(self): initial_contents = b"foobarbaz" * 131072 # 900KiB initial_contents_uploadable = MutableData(initial_contents) d = self.nodemaker.create_mutable_file(initial_contents_uploadable, version=MDMF_VERSION) def _created(n): d = n.download_best_version() d.addCallback(lambda data: self.assertThat(data, Equals(initial_contents))) uploadable2 = MutableData(initial_contents + b"foobarbaz") d.addCallback(lambda ignored: n.overwrite(uploadable2)) d.addCallback(lambda ignored: n.download_best_version()) d.addCallback(lambda data: self.assertThat(data, Equals(initial_contents + b"foobarbaz"))) return d d.addCallback(_created) return d def test_create_with_initial_contents_function(self): data = b"initial contents" def _make_contents(n): self.assertThat(n, IsInstance(MutableFileNode)) key = n.get_writekey() self.assertTrue(isinstance(key, bytes), key) self.assertThat(key, HasLength(16)) # AES key size return MutableData(data) d = self.nodemaker.create_mutable_file(_make_contents) def _created(n): return n.download_best_version() d.addCallback(_created) d.addCallback(lambda data2: self.assertThat(data2, Equals(data))) return d def test_create_mdmf_with_initial_contents_function(self): data = b"initial contents" * 100000 def _make_contents(n): self.assertThat(n, IsInstance(MutableFileNode)) key = n.get_writekey() self.assertTrue(isinstance(key, bytes), key) self.assertThat(key, HasLength(16)) return MutableData(data) d = self.nodemaker.create_mutable_file(_make_contents, version=MDMF_VERSION) d.addCallback(lambda n: n.download_best_version()) d.addCallback(lambda data2: self.assertThat(data2, Equals(data))) return d def test_create_with_too_large_contents(self): BIG = b"a" * (self.OLD_MAX_SEGMENT_SIZE + 1) BIG_uploadable = MutableData(BIG) d = self.nodemaker.create_mutable_file(BIG_uploadable) def _created(n): other_BIG_uploadable = MutableData(BIG) d = n.overwrite(other_BIG_uploadable) return d d.addCallback(_created) return d def failUnlessCurrentSeqnumIs(self, n, expected_seqnum, which): d = n.get_servermap(MODE_READ) d.addCallback(lambda servermap: servermap.best_recoverable_version()) d.addCallback(lambda verinfo: self.assertThat(verinfo[0], Equals(expected_seqnum), which)) return d def test_modify(self): def _modifier(old_contents, servermap, first_time): new_contents = old_contents + b"line2" return new_contents def _non_modifier(old_contents, servermap, first_time): return old_contents def _none_modifier(old_contents, servermap, first_time): return None def _error_modifier(old_contents, servermap, first_time): raise ValueError("oops") def _toobig_modifier(old_contents, servermap, first_time): new_content = b"b" * (self.OLD_MAX_SEGMENT_SIZE + 1) return new_content calls = [] def _ucw_error_modifier(old_contents, servermap, first_time): # simulate an UncoordinatedWriteError once calls.append(1) if len(calls) <= 1: raise UncoordinatedWriteError("simulated") new_contents = old_contents + b"line3" return new_contents def _ucw_error_non_modifier(old_contents, servermap, first_time): # simulate an UncoordinatedWriteError once, and don't actually # modify the contents on subsequent invocations calls.append(1) if len(calls) <= 1: raise UncoordinatedWriteError("simulated") return old_contents initial_contents = b"line1" d = self.nodemaker.create_mutable_file(MutableData(initial_contents)) def _created(n): d = n.modify(_modifier) d.addCallback(lambda res: n.download_best_version()) d.addCallback(lambda res: self.assertThat(res, Equals(b"line1line2"))) d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "m")) d.addCallback(lambda res: n.modify(_non_modifier)) d.addCallback(lambda res: n.download_best_version()) d.addCallback(lambda res: self.assertThat(res, Equals(b"line1line2"))) d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "non")) d.addCallback(lambda res: n.modify(_none_modifier)) d.addCallback(lambda res: n.download_best_version()) d.addCallback(lambda res: self.assertThat(res, Equals(b"line1line2"))) d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "none")) d.addCallback(lambda res: self.shouldFail(ValueError, "error_modifier", None, n.modify, _error_modifier)) d.addCallback(lambda res: n.download_best_version()) d.addCallback(lambda res: self.assertThat(res, Equals(b"line1line2"))) d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "err")) d.addCallback(lambda res: n.download_best_version()) d.addCallback(lambda res: self.assertThat(res, Equals(b"line1line2"))) d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "big")) d.addCallback(lambda res: n.modify(_ucw_error_modifier)) d.addCallback(lambda res: self.assertThat(calls, HasLength(2))) d.addCallback(lambda res: n.download_best_version()) d.addCallback(lambda res: self.assertThat(res, Equals(b"line1line2line3"))) d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 3, "ucw")) def _reset_ucw_error_modifier(res): calls[:] = [] return res d.addCallback(_reset_ucw_error_modifier) # in practice, this n.modify call should publish twice: the first # one gets a UCWE, the second does not. But our test jig (in # which the modifier raises the UCWE) skips over the first one, # so in this test there will be only one publish, and the seqnum # will only be one larger than the previous test, not two (i.e. 4 # instead of 5). d.addCallback(lambda res: n.modify(_ucw_error_non_modifier)) d.addCallback(lambda res: self.assertThat(calls, HasLength(2))) d.addCallback(lambda res: n.download_best_version()) d.addCallback(lambda res: self.assertThat(res, Equals(b"line1line2line3"))) d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 4, "ucw")) d.addCallback(lambda res: n.modify(_toobig_modifier)) return d d.addCallback(_created) return d def test_modify_backoffer(self): def _modifier(old_contents, servermap, first_time): return old_contents + b"line2" calls = [] def _ucw_error_modifier(old_contents, servermap, first_time): # simulate an UncoordinatedWriteError once calls.append(1) if len(calls) <= 1: raise UncoordinatedWriteError("simulated") return old_contents + b"line3" def _always_ucw_error_modifier(old_contents, servermap, first_time): raise UncoordinatedWriteError("simulated") def _backoff_stopper(node, f): return f def _backoff_pauser(node, f): d = defer.Deferred() reactor.callLater(0.5, d.callback, None) return d # the give-up-er will hit its maximum retry count quickly giveuper = BackoffAgent() giveuper._delay = 0.1 giveuper.factor = 1 d = self.nodemaker.create_mutable_file(MutableData(b"line1")) def _created(n): d = n.modify(_modifier) d.addCallback(lambda res: n.download_best_version()) d.addCallback(lambda res: self.assertThat(res, Equals(b"line1line2"))) d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "m")) d.addCallback(lambda res: self.shouldFail(UncoordinatedWriteError, "_backoff_stopper", None, n.modify, _ucw_error_modifier, _backoff_stopper)) d.addCallback(lambda res: n.download_best_version()) d.addCallback(lambda res: self.assertThat(res, Equals(b"line1line2"))) d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "stop")) def _reset_ucw_error_modifier(res): calls[:] = [] return res d.addCallback(_reset_ucw_error_modifier) d.addCallback(lambda res: n.modify(_ucw_error_modifier, _backoff_pauser)) d.addCallback(lambda res: n.download_best_version()) d.addCallback(lambda res: self.assertThat(res, Equals(b"line1line2line3"))) d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 3, "pause")) d.addCallback(lambda res: self.shouldFail(UncoordinatedWriteError, "giveuper", None, n.modify, _always_ucw_error_modifier, giveuper.delay)) d.addCallback(lambda res: n.download_best_version()) d.addCallback(lambda res: self.assertThat(res, Equals(b"line1line2line3"))) d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 3, "giveup")) return d d.addCallback(_created) return d def test_upload_and_download_full_size_keys(self): self.nodemaker.key_generator = client.KeyGenerator() d = self.nodemaker.create_mutable_file() def _created(n): d = defer.succeed(None) d.addCallback(lambda res: n.get_servermap(MODE_READ)) d.addCallback(lambda smap: smap.dump(StringIO())) d.addCallback(lambda sio: self.assertTrue("3-of-10" in sio.getvalue())) d.addCallback(lambda res: n.overwrite(MutableData(b"contents 1"))) d.addCallback(lambda res: self.assertThat(res, Is(None))) d.addCallback(lambda res: n.download_best_version()) d.addCallback(lambda res: self.assertThat(res, Equals(b"contents 1"))) d.addCallback(lambda res: n.overwrite(MutableData(b"contents 2"))) d.addCallback(lambda res: n.download_best_version()) d.addCallback(lambda res: self.assertThat(res, Equals(b"contents 2"))) d.addCallback(lambda res: n.get_servermap(MODE_WRITE)) d.addCallback(lambda smap: n.upload(MutableData(b"contents 3"), smap)) d.addCallback(lambda res: n.download_best_version()) d.addCallback(lambda res: self.assertThat(res, Equals(b"contents 3"))) d.addCallback(lambda res: n.get_servermap(MODE_ANYTHING)) d.addCallback(lambda smap: n.download_version(smap, smap.best_recoverable_version())) d.addCallback(lambda res: self.assertThat(res, Equals(b"contents 3"))) return d d.addCallback(_created) return d def test_size_after_servermap_update(self): # a mutable file node should have something to say about how big # it is after a servermap update is performed, since this tells # us how large the best version of that mutable file is. d = self.nodemaker.create_mutable_file() def _created(n): self.n = n return n.get_servermap(MODE_READ) d.addCallback(_created) d.addCallback(lambda ignored: self.assertThat(self.n.get_size(), Equals(0))) d.addCallback(lambda ignored: self.n.overwrite(MutableData(b"foobarbaz"))) d.addCallback(lambda ignored: self.assertThat(self.n.get_size(), Equals(9))) d.addCallback(lambda ignored: self.nodemaker.create_mutable_file(MutableData(b"foobarbaz"))) d.addCallback(_created) d.addCallback(lambda ignored: self.assertThat(self.n.get_size(), Equals(9))) return d tahoe_lafs-1.20.0/src/allmydata/test/mutable/test_interoperability.py0000644000000000000000000010607413615410400022774 0ustar00""" Ported to Python 3. """ import os, base64 from ..common import AsyncTestCase from testtools.matchers import HasLength from allmydata import uri from allmydata.storage.common import storage_index_to_dir from allmydata.util import fileutil from .. import common_util as testutil from ..no_network import GridTestMixin class Interoperability(GridTestMixin, AsyncTestCase, testutil.ShouldFailMixin): sdmf_old_shares = {} sdmf_old_shares[0] = b"VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcABOOLy8EETxh7h7/z9d62EiPu9CNpRrCOLxUhn+JUS+DuAAhgcAb/adrQFrhlrRNoRpvjDuxmFebA4F0qCyqWssm61AAQ/EX4eC/1+hGOQ/h4EiKUkqxdsfzdcPlDvd11SGWZ0VHsUclZChTzuBAU2zLTXm+cG8IFhO50ly6Ey/DB44NtMKVaVzO0nU8DE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA=" sdmf_old_shares[1] = b"VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcABOOLy8EETxh7h7/z9d62EiPu9CNpRrCOLxUhn+JUS+DuAAhgcAb/adrQFrhlrRNoRpvjDuxmFebA4F0qCyqWssm61AAP7FHJWQoU87gQFNsy015vnBvCBYTudJcuhMvwweODbTD8Rfh4L/X6EY5D+HgSIpSSrF2x/N1w+UO93XVIZZnRUeePDXEwhqYDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA=" sdmf_old_shares[2] = b"VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcABOOLy8EETxh7h7/z9d62EiPu9CNpRrCOLxUhn+JUS+DuAAd8jdiCodW233N1acXhZGnulDKR3hiNsMdEIsijRPemewASoSCFpVj4utEE+eVFM146xfgC6DX39GaQ2zT3YKsWX3GiLwKtGffwqV7IlZIcBEVqMfTXSTZsY+dZm1MxxCZH0Zd33VY0yggDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA=" sdmf_old_shares[3] = b"VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcABOOLy8EETxh7h7/z9d62EiPu9CNpRrCOLxUhn+JUS+DuAAd8jdiCodW233N1acXhZGnulDKR3hiNsMdEIsijRPemewARoi8CrRn38KleyJWSHARFajH010k2bGPnWZtTMcQmR9GhIIWlWPi60QT55UUzXjrF+ALoNff0ZpDbNPdgqxZfcSNSplrHqtsDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA=" sdmf_old_shares[4] = b"VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcAA6dlE140Fc7FgB77PeM5Phv+bypQEYtyfLQHxd+OxlG3AAoIM8M4XulprmLd4gGMobS2Bv9CmwB5LpK/ySHE1QWjdwAUMA7/aVz7Mb1em0eks+biC8ZuVUhuAEkTVOAF4YulIjE8JlfW0dS1XKk62u0586QxiN38NTsluUDx8EAPTL66yRsfb1f3rRIDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA=" sdmf_old_shares[5] = b"VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcAA6dlE140Fc7FgB77PeM5Phv+bypQEYtyfLQHxd+OxlG3AAoIM8M4XulprmLd4gGMobS2Bv9CmwB5LpK/ySHE1QWjdwATPCZX1tHUtVypOtrtOfOkMYjd/DU7JblA8fBAD0y+uskwDv9pXPsxvV6bR6Sz5uILxm5VSG4ASRNU4AXhi6UiMUKZHBmcmEgDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA=" sdmf_old_shares[6] = b"VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcAA6dlE140Fc7FgB77PeM5Phv+bypQEYtyfLQHxd+OxlG3AAlyHZU7RfTJjbHu1gjabWZsTu+7nAeRVG6/ZSd4iMQ1ZgAWDSFSPvKzcFzRcuRlVgKUf0HBce1MCF8SwpUbPPEyfVJty4xLZ7DvNU/Eh/R6BarsVAagVXdp+GtEu0+fok7nilT4LchmHo8DE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA=" sdmf_old_shares[7] = b"VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcAA6dlE140Fc7FgB77PeM5Phv+bypQEYtyfLQHxd+OxlG3AAlyHZU7RfTJjbHu1gjabWZsTu+7nAeRVG6/ZSd4iMQ1ZgAVbcuMS2ew7zVPxIf0egWq7FQGoFV3afhrRLtPn6JO54oNIVI+8rNwXNFy5GVWApR/QcFx7UwIXxLClRs88TJ9UtLnNF4/mM0DE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA=" sdmf_old_shares[8] = b"VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgABUSzNKiMx0E91q51/WH6ASL0fDEOLef9oxuyBX5F5cpoABojmWkDX3k3FKfgNHIeptE3lxB8HHzxDfSD250psyfNCAAwGsKbMxbmI2NpdTozZ3SICrySwgGkatA1gsDOJmOnTzgAYmqKY7A9vQChuYa17fYSyKerIb3682jxiIneQvCMWCK5WcuI4PMeIsUAj8yxdxHvV+a9vtSCEsDVvymrrooDKX1GK98t37yoDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA=" sdmf_old_shares[9] = b"VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgABUSzNKiMx0E91q51/WH6ASL0fDEOLef9oxuyBX5F5cpoABojmWkDX3k3FKfgNHIeptE3lxB8HHzxDfSD250psyfNCAAwGsKbMxbmI2NpdTozZ3SICrySwgGkatA1gsDOJmOnTzgAXVnLiODzHiLFAI/MsXcR71fmvb7UghLA1b8pq66KAyl+aopjsD29AKG5hrXt9hLIp6shvfrzaPGIid5C8IxYIrjgBj1YohGgDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA=" sdmf_old_cap = b"URI:SSK:gmjgofw6gan57gwpsow6gtrz3e:5adm6fayxmu3e4lkmfvt6lkkfix34ai2wop2ioqr4bgvvhiol3kq" sdmf_old_contents = b"This is a test file.\n" def copy_sdmf_shares(self): # We'll basically be short-circuiting the upload process. servernums = list(self.g.servers_by_number.keys()) assert len(servernums) == 10 assignments = list(zip(self.sdmf_old_shares.keys(), servernums)) # Get the storage index. cap = uri.from_string(self.sdmf_old_cap) si = cap.get_storage_index() # Now execute each assignment by writing the storage. for (share, servernum) in assignments: sharedata = base64.b64decode(self.sdmf_old_shares[share]) storedir = self.get_serverdir(servernum) storage_path = os.path.join(storedir, "shares", storage_index_to_dir(si)) fileutil.make_dirs(storage_path) fileutil.write(os.path.join(storage_path, "%d" % share), sharedata) # ...and verify that the shares are there. shares = self.find_uri_shares(self.sdmf_old_cap) self.assertThat(shares, HasLength(10)) def test_new_downloader_can_read_old_shares(self): self.basedir = "mutable/Interoperability/new_downloader_can_read_old_shares" self.set_up_grid() self.copy_sdmf_shares() nm = self.g.clients[0].nodemaker n = nm.create_from_cap(self.sdmf_old_cap) d = n.download_best_version() d.addCallback(self.assertEqual, self.sdmf_old_contents) return d tahoe_lafs-1.20.0/src/allmydata/test/mutable/test_multiple_encodings.py0000644000000000000000000001452013615410400023265 0ustar00""" Ported to Python 3. """ from ..common import AsyncTestCase from testtools.matchers import Equals from allmydata.interfaces import SDMF_VERSION from allmydata.monitor import Monitor from foolscap.logging import log from allmydata.mutable.common import MODE_READ from allmydata.mutable.publish import Publish, MutableData from allmydata.mutable.servermap import ServerMap, ServermapUpdater from ..common_util import DevNullDictionary from .util import FakeStorage, make_nodemaker class MultipleEncodings(AsyncTestCase): def setUp(self): super(MultipleEncodings, self).setUp() self.CONTENTS = b"New contents go here" self.uploadable = MutableData(self.CONTENTS) self._storage = FakeStorage() self._nodemaker = make_nodemaker(self._storage, num_peers=20) self._storage_broker = self._nodemaker.storage_broker d = self._nodemaker.create_mutable_file(self.uploadable) def _created(node): self._fn = node d.addCallback(_created) return d def _encode(self, k, n, data, version=SDMF_VERSION): # encode 'data' into a peerid->shares dict. fn = self._fn # disable the nodecache, since for these tests we explicitly need # multiple nodes pointing at the same file self._nodemaker._node_cache = DevNullDictionary() fn2 = self._nodemaker.create_from_cap(fn.get_uri()) # then we copy over other fields that are normally fetched from the # existing shares fn2._pubkey = fn._pubkey fn2._privkey = fn._privkey fn2._encprivkey = fn._encprivkey # and set the encoding parameters to something completely different fn2._required_shares = k fn2._total_shares = n s = self._storage s._peers = {} # clear existing storage p2 = Publish(fn2, self._storage_broker, None) uploadable = MutableData(data) d = p2.publish(uploadable) def _published(res): shares = s._peers s._peers = {} return shares d.addCallback(_published) return d def make_servermap(self, mode=MODE_READ, oldmap=None): if oldmap is None: oldmap = ServerMap() smu = ServermapUpdater(self._fn, self._storage_broker, Monitor(), oldmap, mode) d = smu.update() return d def test_multiple_encodings(self): # we encode the same file in two different ways (3-of-10 and 4-of-9), # then mix up the shares, to make sure that download survives seeing # a variety of encodings. This is actually kind of tricky to set up. contents1 = b"Contents for encoding 1 (3-of-10) go here"*1000 contents2 = b"Contents for encoding 2 (4-of-9) go here"*1000 contents3 = b"Contents for encoding 3 (4-of-7) go here"*1000 # we make a retrieval object that doesn't know what encoding # parameters to use fn3 = self._nodemaker.create_from_cap(self._fn.get_uri()) # now we upload a file through fn1, and grab its shares d = self._encode(3, 10, contents1) def _encoded_1(shares): self._shares1 = shares d.addCallback(_encoded_1) d.addCallback(lambda res: self._encode(4, 9, contents2)) def _encoded_2(shares): self._shares2 = shares d.addCallback(_encoded_2) d.addCallback(lambda res: self._encode(4, 7, contents3)) def _encoded_3(shares): self._shares3 = shares d.addCallback(_encoded_3) def _merge(res): log.msg("merging sharelists") # we merge the shares from the two sets, leaving each shnum in # its original location, but using a share from set1 or set2 # according to the following sequence: # # 4-of-9 a s2 # 4-of-9 b s2 # 4-of-7 c s3 # 4-of-9 d s2 # 3-of-9 e s1 # 3-of-9 f s1 # 3-of-9 g s1 # 4-of-9 h s2 # # so that neither form can be recovered until fetch [f], at which # point version-s1 (the 3-of-10 form) should be recoverable. If # the implementation latches on to the first version it sees, # then s2 will be recoverable at fetch [g]. # Later, when we implement code that handles multiple versions, # we can use this framework to assert that all recoverable # versions are retrieved, and test that 'epsilon' does its job places = [2, 2, 3, 2, 1, 1, 1, 2] sharemap = {} sb = self._storage_broker for peerid in sorted(sb.get_all_serverids()): for shnum in self._shares1.get(peerid, {}): if shnum < len(places): which = places[shnum] else: which = "x" self._storage._peers[peerid] = peers = {} in_1 = shnum in self._shares1[peerid] in_2 = shnum in self._shares2.get(peerid, {}) in_3 = shnum in self._shares3.get(peerid, {}) if which == 1: if in_1: peers[shnum] = self._shares1[peerid][shnum] sharemap[shnum] = peerid elif which == 2: if in_2: peers[shnum] = self._shares2[peerid][shnum] sharemap[shnum] = peerid elif which == 3: if in_3: peers[shnum] = self._shares3[peerid][shnum] sharemap[shnum] = peerid # we don't bother placing any other shares # now sort the sequence so that share 0 is returned first new_sequence = [sharemap[shnum] for shnum in sorted(sharemap.keys())] self._storage._sequence = new_sequence log.msg("merge done") d.addCallback(_merge) d.addCallback(lambda res: fn3.download_best_version()) def _retrieved(new_contents): # the current specified behavior is "first version recoverable" self.assertThat(new_contents, Equals(contents1)) d.addCallback(_retrieved) return d tahoe_lafs-1.20.0/src/allmydata/test/mutable/test_multiple_versions.py0000644000000000000000000001044713615410400023170 0ustar00""" Ported to Python 3. """ from ..common import AsyncTestCase from testtools.matchers import Equals, HasLength from allmydata.monitor import Monitor from allmydata.mutable.common import MODE_CHECK, MODE_READ from .util import PublishMixin, CheckerMixin class MultipleVersions(AsyncTestCase, PublishMixin, CheckerMixin): def setUp(self): super(MultipleVersions, self).setUp() return self.publish_multiple() def test_multiple_versions(self): # if we see a mix of versions in the grid, download_best_version # should get the latest one self._set_versions(dict([(i,2) for i in (0,2,4,6,8)])) d = self._fn.download_best_version() d.addCallback(lambda res: self.assertThat(res, Equals(self.CONTENTS[4]))) # and the checker should report problems d.addCallback(lambda res: self._fn.check(Monitor())) d.addCallback(self.check_bad, "test_multiple_versions") # but if everything is at version 2, that's what we should download d.addCallback(lambda res: self._set_versions(dict([(i,2) for i in range(10)]))) d.addCallback(lambda res: self._fn.download_best_version()) d.addCallback(lambda res: self.assertThat(res, Equals(self.CONTENTS[2]))) # if exactly one share is at version 3, we should still get v2 d.addCallback(lambda res: self._set_versions({0:3})) d.addCallback(lambda res: self._fn.download_best_version()) d.addCallback(lambda res: self.assertThat(res, Equals(self.CONTENTS[2]))) # but the servermap should see the unrecoverable version. This # depends upon the single newer share being queried early. d.addCallback(lambda res: self._fn.get_servermap(MODE_READ)) def _check_smap(smap): self.assertThat(smap.unrecoverable_versions(), HasLength(1)) newer = smap.unrecoverable_newer_versions() self.assertThat(newer, HasLength(1)) verinfo, health = list(newer.items())[0] self.assertThat(verinfo[0], Equals(4)) self.assertThat(health, Equals((1,3))) self.assertThat(smap.needs_merge(), Equals(False)) d.addCallback(_check_smap) # if we have a mix of two parallel versions (s4a and s4b), we could # recover either d.addCallback(lambda res: self._set_versions({0:3,2:3,4:3,6:3,8:3, 1:4,3:4,5:4,7:4,9:4})) d.addCallback(lambda res: self._fn.get_servermap(MODE_READ)) def _check_smap_mixed(smap): self.assertThat(smap.unrecoverable_versions(), HasLength(0)) newer = smap.unrecoverable_newer_versions() self.assertThat(newer, HasLength(0)) self.assertTrue(smap.needs_merge()) d.addCallback(_check_smap_mixed) d.addCallback(lambda res: self._fn.download_best_version()) d.addCallback(lambda res: self.assertTrue(res == self.CONTENTS[3] or res == self.CONTENTS[4])) return d def test_replace(self): # if we see a mix of versions in the grid, we should be able to # replace them all with a newer version # if exactly one share is at version 3, we should download (and # replace) v2, and the result should be v4. Note that the index we # give to _set_versions is different than the sequence number. target = dict([(i,2) for i in range(10)]) # seqnum3 target[0] = 3 # seqnum4 self._set_versions(target) def _modify(oldversion, servermap, first_time): return oldversion + b" modified" d = self._fn.modify(_modify) d.addCallback(lambda res: self._fn.download_best_version()) expected = self.CONTENTS[2] + b" modified" d.addCallback(lambda res: self.assertThat(res, Equals(expected))) # and the servermap should indicate that the outlier was replaced too d.addCallback(lambda res: self._fn.get_servermap(MODE_CHECK)) def _check_smap(smap): self.assertThat(smap.highest_seqnum(), Equals(5)) self.assertThat(smap.unrecoverable_versions(), HasLength(0)) self.assertThat(smap.recoverable_versions(), HasLength(1)) d.addCallback(_check_smap) return d tahoe_lafs-1.20.0/src/allmydata/test/mutable/test_problems.py0000644000000000000000000006645313615410400021240 0ustar00""" Ported to Python 3. """ import os, base64 from ..common import AsyncTestCase from testtools.matchers import HasLength from twisted.internet import defer from foolscap.logging import log from allmydata import uri from allmydata.crypto import rsa from allmydata.interfaces import NotEnoughSharesError, SDMF_VERSION, MDMF_VERSION from allmydata.util import fileutil from allmydata.util.hashutil import ssk_writekey_hash, ssk_pubkey_fingerprint_hash from allmydata.mutable.common import \ MODE_CHECK, MODE_WRITE, MODE_READ, \ UncoordinatedWriteError, \ NotEnoughServersError from allmydata.mutable.publish import MutableData from allmydata.storage.common import storage_index_to_dir from ..no_network import GridTestMixin from .. import common_util as testutil from ..common_util import DevNullDictionary class SameKeyGenerator(object): def __init__(self, pubkey, privkey): self.pubkey = pubkey self.privkey = privkey def generate(self, keysize=None): return defer.succeed( (self.pubkey, self.privkey) ) class FirstServerGetsKilled(object): done = False def notify(self, retval, wrapper, methname): if not self.done: wrapper.broken = True self.done = True return retval class FirstServerGetsDeleted(object): def __init__(self): self.done = False self.silenced = None def notify(self, retval, wrapper, methname): if not self.done: # this query will work, but later queries should think the share # has been deleted self.done = True self.silenced = wrapper return retval if wrapper == self.silenced: assert methname == "slot_testv_and_readv_and_writev" return (True, {}) return retval class Problems(GridTestMixin, AsyncTestCase, testutil.ShouldFailMixin): def do_publish_surprise(self, version): self.basedir = "mutable/Problems/test_publish_surprise_%s" % version self.set_up_grid() nm = self.g.clients[0].nodemaker d = nm.create_mutable_file(MutableData(b"contents 1"), version=version) def _created(n): d = defer.succeed(None) d.addCallback(lambda res: n.get_servermap(MODE_WRITE)) def _got_smap1(smap): # stash the old state of the file self.old_map = smap d.addCallback(_got_smap1) # then modify the file, leaving the old map untouched d.addCallback(lambda res: log.msg("starting winning write")) d.addCallback(lambda res: n.overwrite(MutableData(b"contents 2"))) # now attempt to modify the file with the old servermap. This # will look just like an uncoordinated write, in which every # single share got updated between our mapupdate and our publish d.addCallback(lambda res: log.msg("starting doomed write")) d.addCallback(lambda res: self.shouldFail(UncoordinatedWriteError, "test_publish_surprise", None, n.upload, MutableData(b"contents 2a"), self.old_map)) return d d.addCallback(_created) return d def test_publish_surprise_sdmf(self): return self.do_publish_surprise(SDMF_VERSION) def test_publish_surprise_mdmf(self): return self.do_publish_surprise(MDMF_VERSION) def test_retrieve_surprise(self): self.basedir = "mutable/Problems/test_retrieve_surprise" self.set_up_grid() nm = self.g.clients[0].nodemaker d = nm.create_mutable_file(MutableData(b"contents 1"*4000)) def _created(n): d = defer.succeed(None) d.addCallback(lambda res: n.get_servermap(MODE_READ)) def _got_smap1(smap): # stash the old state of the file self.old_map = smap d.addCallback(_got_smap1) # then modify the file, leaving the old map untouched d.addCallback(lambda res: log.msg("starting winning write")) d.addCallback(lambda res: n.overwrite(MutableData(b"contents 2"))) # now attempt to retrieve the old version with the old servermap. # This will look like someone has changed the file since we # updated the servermap. d.addCallback(lambda res: log.msg("starting doomed read")) d.addCallback(lambda res: self.shouldFail(NotEnoughSharesError, "test_retrieve_surprise", "ran out of servers: have 0 of 1", n.download_version, self.old_map, self.old_map.best_recoverable_version(), )) return d d.addCallback(_created) return d def test_unexpected_shares(self): # upload the file, take a servermap, shut down one of the servers, # upload it again (causing shares to appear on a new server), then # upload using the old servermap. The last upload should fail with an # UncoordinatedWriteError, because of the shares that didn't appear # in the servermap. self.basedir = "mutable/Problems/test_unexpected_shares" self.set_up_grid() nm = self.g.clients[0].nodemaker d = nm.create_mutable_file(MutableData(b"contents 1")) def _created(n): d = defer.succeed(None) d.addCallback(lambda res: n.get_servermap(MODE_WRITE)) def _got_smap1(smap): # stash the old state of the file self.old_map = smap # now shut down one of the servers peer0 = list(smap.make_sharemap()[0])[0].get_serverid() self.g.remove_server(peer0) # then modify the file, leaving the old map untouched log.msg("starting winning write") return n.overwrite(MutableData(b"contents 2")) d.addCallback(_got_smap1) # now attempt to modify the file with the old servermap. This # will look just like an uncoordinated write, in which every # single share got updated between our mapupdate and our publish d.addCallback(lambda res: log.msg("starting doomed write")) d.addCallback(lambda res: self.shouldFail(UncoordinatedWriteError, "test_surprise", None, n.upload, MutableData(b"contents 2a"), self.old_map)) return d d.addCallback(_created) return d def test_multiply_placed_shares(self): self.basedir = "mutable/Problems/test_multiply_placed_shares" self.set_up_grid() nm = self.g.clients[0].nodemaker d = nm.create_mutable_file(MutableData(b"contents 1")) # remove one of the servers and reupload the file. def _created(n): self._node = n servers = self.g.get_all_serverids() self.ss = self.g.remove_server(servers[len(servers)-1]) new_server = self.g.make_server(len(servers)-1) self.g.add_server(len(servers)-1, new_server) return self._node.download_best_version() d.addCallback(_created) d.addCallback(lambda data: MutableData(data)) d.addCallback(lambda data: self._node.overwrite(data)) # restore the server we removed earlier, then download+upload # the file again def _overwritten(ign): self.g.add_server(len(self.g.servers_by_number), self.ss) return self._node.download_best_version() d.addCallback(_overwritten) d.addCallback(lambda data: MutableData(data)) d.addCallback(lambda data: self._node.overwrite(data)) d.addCallback(lambda ignored: self._node.get_servermap(MODE_CHECK)) def _overwritten_again(smap): # Make sure that all shares were updated by making sure that # there aren't any other versions in the sharemap. self.assertThat(smap.recoverable_versions(), HasLength(1)) self.assertThat(smap.unrecoverable_versions(), HasLength(0)) d.addCallback(_overwritten_again) return d def test_bad_server(self): # Break one server, then create the file: the initial publish should # complete with an alternate server. Breaking a second server should # not prevent an update from succeeding either. self.basedir = "mutable/Problems/test_bad_server" self.set_up_grid() nm = self.g.clients[0].nodemaker # to make sure that one of the initial peers is broken, we have to # get creative. We create an RSA key and compute its storage-index. # Then we make a KeyGenerator that always returns that one key, and # use it to create the mutable file. This will get easier when we can # use #467 static-server-selection to disable permutation and force # the choice of server for share[0]. d = nm.key_generator.generate() def _got_key(keypair): (pubkey, privkey) = keypair nm.key_generator = SameKeyGenerator(pubkey, privkey) pubkey_s = rsa.der_string_from_verifying_key(pubkey) privkey_s = rsa.der_string_from_signing_key(privkey) u = uri.WriteableSSKFileURI(ssk_writekey_hash(privkey_s), ssk_pubkey_fingerprint_hash(pubkey_s)) self._storage_index = u.get_storage_index() d.addCallback(_got_key) def _break_peer0(res): si = self._storage_index servers = nm.storage_broker.get_servers_for_psi(si) self.g.break_server(servers[0].get_serverid()) self.server1 = servers[1] d.addCallback(_break_peer0) # now "create" the file, using the pre-established key, and let the # initial publish finally happen d.addCallback(lambda res: nm.create_mutable_file(MutableData(b"contents 1"))) # that ought to work def _got_node(n): d = n.download_best_version() d.addCallback(lambda res: self.assertEquals(res, b"contents 1")) # now break the second peer def _break_peer1(res): self.g.break_server(self.server1.get_serverid()) d.addCallback(_break_peer1) d.addCallback(lambda res: n.overwrite(MutableData(b"contents 2"))) # that ought to work too d.addCallback(lambda res: n.download_best_version()) d.addCallback(lambda res: self.assertEquals(res, b"contents 2")) def _explain_error(f): print(f) if f.check(NotEnoughServersError): print("first_error:", f.value.first_error) return f d.addErrback(_explain_error) return d d.addCallback(_got_node) return d def test_bad_server_overlap(self): # like test_bad_server, but with no extra unused servers to fall back # upon. This means that we must re-use a server which we've already # used. If we don't remember the fact that we sent them one share # already, we'll mistakenly think we're experiencing an # UncoordinatedWriteError. # Break one server, then create the file: the initial publish should # complete with an alternate server. Breaking a second server should # not prevent an update from succeeding either. self.basedir = "mutable/Problems/test_bad_server_overlap" self.set_up_grid() nm = self.g.clients[0].nodemaker sb = nm.storage_broker peerids = [s.get_serverid() for s in sb.get_connected_servers()] self.g.break_server(peerids[0]) d = nm.create_mutable_file(MutableData(b"contents 1")) def _created(n): d = n.download_best_version() d.addCallback(lambda res: self.assertEquals(res, b"contents 1")) # now break one of the remaining servers def _break_second_server(res): self.g.break_server(peerids[1]) d.addCallback(_break_second_server) d.addCallback(lambda res: n.overwrite(MutableData(b"contents 2"))) # that ought to work too d.addCallback(lambda res: n.download_best_version()) d.addCallback(lambda res: self.assertEquals(res, b"contents 2")) return d d.addCallback(_created) return d def test_publish_all_servers_bad(self): # Break all servers: the publish should fail self.basedir = "mutable/Problems/test_publish_all_servers_bad" self.set_up_grid() nm = self.g.clients[0].nodemaker for s in nm.storage_broker.get_connected_servers(): s.get_rref().broken = True d = self.shouldFail(NotEnoughServersError, "test_publish_all_servers_bad", "ran out of good servers", nm.create_mutable_file, MutableData(b"contents")) return d def test_publish_no_servers(self): # no servers at all: the publish should fail self.basedir = "mutable/Problems/test_publish_no_servers" self.set_up_grid(num_servers=0) nm = self.g.clients[0].nodemaker d = self.shouldFail(NotEnoughServersError, "test_publish_no_servers", "Ran out of non-bad servers", nm.create_mutable_file, MutableData(b"contents")) return d def test_privkey_query_error(self): # when a servermap is updated with MODE_WRITE, it tries to get the # privkey. Something might go wrong during this query attempt. # Exercise the code in _privkey_query_failed which tries to handle # such an error. self.basedir = "mutable/Problems/test_privkey_query_error" self.set_up_grid(num_servers=20) nm = self.g.clients[0].nodemaker nm._node_cache = DevNullDictionary() # disable the nodecache # we need some contents that are large enough to push the privkey out # of the early part of the file LARGE = b"These are Larger contents" * 2000 # about 50KB LARGE_uploadable = MutableData(LARGE) d = nm.create_mutable_file(LARGE_uploadable) def _created(n): self.uri = n.get_uri() self.n2 = nm.create_from_cap(self.uri) # When a mapupdate is performed on a node that doesn't yet know # the privkey, a short read is sent to a batch of servers, to get # the verinfo and (hopefully, if the file is short enough) the # encprivkey. Our file is too large to let this first read # contain the encprivkey. Each non-encprivkey-bearing response # that arrives (until the node gets the encprivkey) will trigger # a second read to specifically read the encprivkey. # # So, to exercise this case: # 1. notice which server gets a read() call first # 2. tell that server to start throwing errors killer = FirstServerGetsKilled() for s in nm.storage_broker.get_connected_servers(): s.get_rref().post_call_notifier = killer.notify d.addCallback(_created) # now we update a servermap from a new node (which doesn't have the # privkey yet, forcing it to use a separate privkey query). Note that # the map-update will succeed, since we'll just get a copy from one # of the other shares. d.addCallback(lambda res: self.n2.get_servermap(MODE_WRITE)) return d def test_privkey_query_missing(self): # like test_privkey_query_error, but the shares are deleted by the # second query, instead of raising an exception. self.basedir = "mutable/Problems/test_privkey_query_missing" self.set_up_grid(num_servers=20) nm = self.g.clients[0].nodemaker LARGE = b"These are Larger contents" * 2000 # about 50KiB LARGE_uploadable = MutableData(LARGE) nm._node_cache = DevNullDictionary() # disable the nodecache d = nm.create_mutable_file(LARGE_uploadable) def _created(n): self.uri = n.get_uri() self.n2 = nm.create_from_cap(self.uri) deleter = FirstServerGetsDeleted() for s in nm.storage_broker.get_connected_servers(): s.get_rref().post_call_notifier = deleter.notify d.addCallback(_created) d.addCallback(lambda res: self.n2.get_servermap(MODE_WRITE)) return d def test_block_and_hash_query_error(self): # This tests for what happens when a query to a remote server # fails in either the hash validation step or the block getting # step (because of batching, this is the same actual query). # We need to have the storage server persist up until the point # that its prefix is validated, then suddenly die. This # exercises some exception handling code in Retrieve. self.basedir = "mutable/Problems/test_block_and_hash_query_error" self.set_up_grid(num_servers=20) nm = self.g.clients[0].nodemaker CONTENTS = b"contents" * 2000 CONTENTS_uploadable = MutableData(CONTENTS) d = nm.create_mutable_file(CONTENTS_uploadable) def _created(node): self._node = node d.addCallback(_created) d.addCallback(lambda ignored: self._node.get_servermap(MODE_READ)) def _then(servermap): # we have our servermap. Now we set up the servers like the # tests above -- the first one that gets a read call should # start throwing errors, but only after returning its prefix # for validation. Since we'll download without fetching the # private key, the next query to the remote server will be # for either a block and salt or for hashes, either of which # will exercise the error handling code. killer = FirstServerGetsKilled() for s in nm.storage_broker.get_connected_servers(): s.get_rref().post_call_notifier = killer.notify ver = servermap.best_recoverable_version() assert ver return self._node.download_version(servermap, ver) d.addCallback(_then) d.addCallback(lambda data: self.assertEquals(data, CONTENTS)) return d def test_1654(self): # test that the Retrieve object unconditionally verifies the block # hash tree root for mutable shares. The failure mode is that # carefully crafted shares can cause undetected corruption (the # retrieve appears to finish successfully, but the result is # corrupted). When fixed, these shares always cause a # CorruptShareError, which results in NotEnoughSharesError in this # 2-of-2 file. self.basedir = "mutable/Problems/test_1654" self.set_up_grid(num_servers=2) cap = uri.from_string(TEST_1654_CAP) si = cap.get_storage_index() for share, shnum in [(TEST_1654_SH0, 0), (TEST_1654_SH1, 1)]: sharedata = base64.b64decode(share) storedir = self.get_serverdir(shnum) storage_path = os.path.join(storedir, "shares", storage_index_to_dir(si)) fileutil.make_dirs(storage_path) fileutil.write(os.path.join(storage_path, "%d" % shnum), sharedata) nm = self.g.clients[0].nodemaker n = nm.create_from_cap(TEST_1654_CAP) # to exercise the problem correctly, we must ensure that sh0 is # processed first, and sh1 second. NoNetworkGrid has facilities to # stall the first request from a single server, but it's not # currently easy to extend that to stall the second request (mutable # retrievals will see two: first the mapupdate, then the fetch). # However, repeated executions of this run without the #1654 fix # suggests that we're failing reliably even without explicit stalls, # probably because the servers are queried in a fixed order. So I'm # ok with relying upon that. d = self.shouldFail(NotEnoughSharesError, "test #1654 share corruption", "ran out of servers", n.download_best_version) return d TEST_1654_CAP = b"URI:SSK:6jthysgozssjnagqlcxjq7recm:yxawei54fmf2ijkrvs2shs6iey4kpdp6joi7brj2vrva6sp5nf3a" TEST_1654_SH0 = b"""\ VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA46m9s5j6lnzsOHytBTs2JOo AkWe8058hyrDa8igfBSqZMKO3aDOrFuRVt0ySYZ6oihFqPJRAAAAAAAAB8YAAAAA AAAJmgAAAAFPNgDkK8brSCzKz6n8HFqzbnAlALvnaB0Qpa1Bjo9jiZdmeMyneHR+ UoJcDb1Ls+lVLeUqP2JitBEXdCzcF/X2YMDlmKb2zmPqWfOw4fK0FOzYk6gCRZ7z AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABCDwr uIlhFlv21pDqyMeA9X1wHp98a1CKY4qfC7gn5exyODAcnhZKHCV18XBerbZLAgIA AAAAAAAAJgAAAAAAAAAmAAABjwAAAo8AAALTAAAC8wAAAAAAAAMGAAAAAAAAB8Yw ggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQCXKMor062nfxHVutMbqNcj vVC92wXTcQulenNWEX+0huK54igTAG60p0lZ6FpBJ9A+dlStT386bn5I6qe50ky5 CFodQSsQX+1yByMFlzqPDo4rclk/6oVySLypxnt/iBs3FPZ4zruhYXcITc6zaYYU Xqaw/C86g6M06MWQKsGev7PS3tH7q+dtovWzDgU13Q8PG2whGvGNfxPOmEX4j0wL FCBavpFnLpo3bJrj27V33HXxpPz3NP+fkaG0pKH03ANd/yYHfGf74dC+eD5dvWBM DU6fZQN4k/T+cth+qzjS52FPPTY9IHXIb4y+1HryVvxcx6JDifKoOzpFc3SDbBAP AgERKDjOFxVClH81DF/QkqpP0glOh6uTsFNx8Nes02q0d7iip2WqfG9m2+LmiWy8 Pg7RlQQy2M45gert1EDsH4OI69uxteviZP1Mo0wD6HjmWUbGIQRmsT3DmYEZCCMA /KjhNmlov2+OhVxIaHwE7aN840IfkGdJ/JssB6Z/Ym3+ou4+jAYKhifPQGrpBVjd 73oH6w9StnoGYIrEEQw8LFc4jnAFYciKlPuo6E6E3zDseE7gwkcOpCtVVksZu6Ii GQgIV8vjFbNz9M//RMXOBTwKFDiG08IAPh7fv2uKzFis0TFrR7sQcMQ/kZZCLPPi ECIX95NRoFRlxK/1kZ1+FuuDQgABz9+5yd/pjkVybmvc7Jr70bOVpxvRoI2ZEgh/ +QdxfcwAAm5iDnzPtsVdcbuNkKprfI8N4n+QmUOSMbAJ7M8r1cp4z9+5yd/pjkVy bmvc7Jr70bOVpxvRoI2ZEgh/+QdxfcxGzRV0shAW86irr5bDQOyyknYk0p2xw2Wn z6QccyXyobXPOFLO3ZBPnKaE58aaN7x3srQZYUKafet5ZMDX8fsQf2mbxnaeG5NF eO6wG++WBUo9leddnzKBnRcMGRAtJEjwfKMVPE8SmuTlL6kRc7n8wvY2ygClWlRm d7o95tZfoO+mexB/DLEpWLtlAiqh8yJ8cWaC5rYz4ZC2+z7QkeKXCHWAN3i4C++u dfZoD7qWnyAldYTydADwL885dVY7WN6NX9YtQrG3JGrp3wZvFrX5x9Jv7hls0A6l 2xI4NlcSSrgWIjzrGdwQEjIUDyfc7DWroEpJEfIaSnjkeTT0D8WV5NqzWH8UwWoF wjwDltaQ3Y8O/wJPGBqBAJEob+p6QxvP5T2W1jnOvbgsMZLNDuY6FF1XcuR7yvNF sXKP6aXMV8BKSlrehFlpBMTu4HvJ1rZlKuxgR1A9njiaKD2U0NitCKMIpIXQxT6L eZn9M8Ky68m0Zjdw/WCsKz22GTljSM5Nfme32BrW+4G+R55ECwZ1oh08nrnWjXmw PlSHj2lwpnsuOG2fwJkyMnIIoIUII31VLATeLERD9HfMK8/+uZqJ2PftT2fhHL/u CDCIdEWSUBBHpA7p8BbgiZKCpYzf+pbS2/EJGL8gQAvSH1atGv/o0BiAd10MzTXC Xn5xDB1Yh+FtYPYloBGAwmxKieDMnsjy6wp5ovdmOc2y6KBr27DzgEGchLyOxHV4 Q7u0Hkm7Om33ir1TUgK6bdPFL8rGNDOZq/SR4yn4qSsQTPD6Y/HQSK5GzkU4dGLw tU6GNpu142QE36NfWkoUWHKf1YgIYrlAGJWlj93et54ZGUZGVN7pAspZ+mvoMnDU Jh46nrQsEJiQz8AqgREck4Fi4S7Rmjh/AhXmzFWFca3YD0BmuYU6fxGTRPZ70eys LV5qPTmTGpX+bpvufAp0vznkiOdqTn1flnxdslM2AukiD6OwkX1dBH8AvzObhbz0 ABhx3c+cAhAnYhJmsYaAwbpWpp8CM5opmsRgwgaz8f8lxiRfXbrWD8vdd4dm2B9J jaiGCR8/UXHFBGZhCgLB2S+BNXKynIeP+POGQtMIIERUtwOIKt1KfZ9jZwf/ulJK fv/VmBPmGu+CHvFIlHAzlxwJeUz8wSltUeeHjADZ9Wag5ESN3R6hsmJL+KL4av5v DFobNPiNWbc+4H+3wg1R0oK/uTQb8u1S7uWIGVmi5fJ4rVVZ/VKKtHGVwm/8OGKF tcrJFJcJADFVkgpsqN8UINsMJLxfJRoBgABEWih5DTRwNXK76Ma2LjDBrEvxhw8M 7SLKhi5vH7/Cs7jfLZFgh2T6flDV4VM/EA7CYEHgEb8MFmioFGOmhUpqifkA3SdX jGi2KuZZ5+O+sHFWXsUjiFPEzUJF+syPEzH1aF5R+F8pkhifeYh0KP6OHd6Sgn8s TStXB+q0MndBXw5ADp/Jac1DVaSWruVAdjemQ+si1olk8xH+uTMXU7PgV9WkpIiy 4BhnFU9IbCr/m7806c13xfeelaffP2pr7EDdgwz5K89VWCa3k9OSDnMtj2CQXlC7 bQHi/oRGA1aHSn84SIt+HpAfRoVdr4N90bYWmYQNqfKoyWCbEr+dge/GSD1nddAJ 72mXGlqyLyWYuAAAAAA=""" TEST_1654_SH1 = b"""\ VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA45R4Y4kuV458rSTGDVTqdzz 9Fig3NQ3LermyD+0XLeqbC7KNgvv6cNzMZ9psQQ3FseYsIR1AAAAAAAAB8YAAAAA AAAJmgAAAAFPNgDkd/Y9Z+cuKctZk9gjwF8thT+fkmNCsulILsJw5StGHAA1f7uL MG73c5WBcesHB2epwazfbD3/0UZTlxXWXotywVHhjiS5XjnytJMYNVOp3PP0WKDc AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABCDwr uIlhFlv21pDqyMeA9X1wHp98a1CKY4qfC7gn5exyODAcnhZKHCV18XBerbZLAgIA AAAAAAAAJgAAAAAAAAAmAAABjwAAAo8AAALTAAAC8wAAAAAAAAMGAAAAAAAAB8Yw ggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQCXKMor062nfxHVutMbqNcj vVC92wXTcQulenNWEX+0huK54igTAG60p0lZ6FpBJ9A+dlStT386bn5I6qe50ky5 CFodQSsQX+1yByMFlzqPDo4rclk/6oVySLypxnt/iBs3FPZ4zruhYXcITc6zaYYU Xqaw/C86g6M06MWQKsGev7PS3tH7q+dtovWzDgU13Q8PG2whGvGNfxPOmEX4j0wL FCBavpFnLpo3bJrj27V33HXxpPz3NP+fkaG0pKH03ANd/yYHfGf74dC+eD5dvWBM DU6fZQN4k/T+cth+qzjS52FPPTY9IHXIb4y+1HryVvxcx6JDifKoOzpFc3SDbBAP AgERKDjOFxVClH81DF/QkqpP0glOh6uTsFNx8Nes02q0d7iip2WqfG9m2+LmiWy8 Pg7RlQQy2M45gert1EDsH4OI69uxteviZP1Mo0wD6HjmWUbGIQRmsT3DmYEZCCMA /KjhNmlov2+OhVxIaHwE7aN840IfkGdJ/JssB6Z/Ym3+ou4+jAYKhifPQGrpBVjd 73oH6w9StnoGYIrEEQw8LFc4jnAFYciKlPuo6E6E3zDseE7gwkcOpCtVVksZu6Ii GQgIV8vjFbNz9M//RMXOBTwKFDiG08IAPh7fv2uKzFis0TFrR7sQcMQ/kZZCLPPi ECIX95NRoFRlxK/1kZ1+FuuDQgABz9+5yd/pjkVybmvc7Jr70bOVpxvRoI2ZEgh/ +QdxfcwAAm5iDnzPtsVdcbuNkKprfI8N4n+QmUOSMbAJ7M8r1cp40cTBnAw+rMKC 98P4pURrotx116Kd0i3XmMZu81ew57H3Zb73r+syQCXZNOP0xhMDclIt0p2xw2Wn z6QccyXyobXPOFLO3ZBPnKaE58aaN7x3srQZYUKafet5ZMDX8fsQf2mbxnaeG5NF eO6wG++WBUo9leddnzKBnRcMGRAtJEjwfKMVPE8SmuTlL6kRc7n8wvY2ygClWlRm d7o95tZfoO+mexB/DLEpWLtlAiqh8yJ8cWaC5rYz4ZC2+z7QkeKXCHWAN3i4C++u dfZoD7qWnyAldYTydADwL885dVY7WN6NX9YtQrG3JGrp3wZvFrX5x9Jv7hls0A6l 2xI4NlcSSrgWIjzrGdwQEjIUDyfc7DWroEpJEfIaSnjkeTT0D8WV5NqzWH8UwWoF wjwDltaQ3Y8O/wJPGBqBAJEob+p6QxvP5T2W1jnOvbgsMZLNDuY6FF1XcuR7yvNF sXKP6aXMV8BKSlrehFlpBMTu4HvJ1rZlKuxgR1A9njiaKD2U0NitCKMIpIXQxT6L eZn9M8Ky68m0Zjdw/WCsKz22GTljSM5Nfme32BrW+4G+R55ECwZ1oh08nrnWjXmw PlSHj2lwpnsuOG2fwJkyMnIIoIUII31VLATeLERD9HfMK8/+uZqJ2PftT2fhHL/u CDCIdEWSUBBHpA7p8BbgiZKCpYzf+pbS2/EJGL8gQAvSH1atGv/o0BiAd10MzTXC Xn5xDB1Yh+FtYPYloBGAwmxKieDMnsjy6wp5ovdmOc2y6KBr27DzgEGchLyOxHV4 Q7u0Hkm7Om33ir1TUgK6bdPFL8rGNDOZq/SR4yn4qSsQTPD6Y/HQSK5GzkU4dGLw tU6GNpu142QE36NfWkoUWHKf1YgIYrlAGJWlj93et54ZGUZGVN7pAspZ+mvoMnDU Jh46nrQsEJiQz8AqgREck4Fi4S7Rmjh/AhXmzFWFca3YD0BmuYU6fxGTRPZ70eys LV5qPTmTGpX+bpvufAp0vznkiOdqTn1flnxdslM2AukiD6OwkX1dBH8AvzObhbz0 ABhx3c+cAhAnYhJmsYaAwbpWpp8CM5opmsRgwgaz8f8lxiRfXbrWD8vdd4dm2B9J jaiGCR8/UXHFBGZhCgLB2S+BNXKynIeP+POGQtMIIERUtwOIKt1KfZ9jZwf/ulJK fv/VmBPmGu+CHvFIlHAzlxwJeUz8wSltUeeHjADZ9Wag5ESN3R6hsmJL+KL4av5v DFobNPiNWbc+4H+3wg1R0oK/uTQb8u1S7uWIGVmi5fJ4rVVZ/VKKtHGVwm/8OGKF tcrJFJcJADFVkgpsqN8UINsMJLxfJRoBgABEWih5DTRwNXK76Ma2LjDBrEvxhw8M 7SLKhi5vH7/Cs7jfLZFgh2T6flDV4VM/EA7CYEHgEb8MFmioFGOmhUpqifkA3SdX jGi2KuZZ5+O+sHFWXsUjiFPEzUJF+syPEzH1aF5R+F8pkhifeYh0KP6OHd6Sgn8s TStXB+q0MndBXw5ADp/Jac1DVaSWruVAdjemQ+si1olk8xH+uTMXU7PgV9WkpIiy 4BhnFU9IbCr/m7806c13xfeelaffP2pr7EDdgwz5K89VWCa3k9OSDnMtj2CQXlC7 bQHi/oRGA1aHSn84SIt+HpAfRoVdr4N90bYWmYQNqfKoyWCbEr+dge/GSD1nddAJ 72mXGlqyLyWYuAAAAAA=""" tahoe_lafs-1.20.0/src/allmydata/test/mutable/test_repair.py0000644000000000000000000003010513615410400020660 0ustar00""" Ported to Python 3. """ from ..common import AsyncTestCase from testtools.matchers import Equals, HasLength from allmydata.interfaces import IRepairResults, ICheckAndRepairResults from allmydata.monitor import Monitor from allmydata.mutable.common import MODE_CHECK from allmydata.mutable.layout import unpack_header from allmydata.mutable.repairer import MustForceRepairError from ..common import ShouldFailMixin from .util import PublishMixin class Repair(AsyncTestCase, PublishMixin, ShouldFailMixin): def get_shares(self, s): all_shares = {} # maps (peerid, shnum) to share data for peerid in s._peers: shares = s._peers[peerid] for shnum in shares: data = shares[shnum] all_shares[ (peerid, shnum) ] = data return all_shares def copy_shares(self, ignored=None): self.old_shares.append(self.get_shares(self._storage)) def test_repair_nop(self): self.old_shares = [] d = self.publish_one() d.addCallback(self.copy_shares) d.addCallback(lambda res: self._fn.check(Monitor())) d.addCallback(lambda check_results: self._fn.repair(check_results)) def _check_results(rres): self.assertThat(IRepairResults.providedBy(rres), Equals(True)) self.assertThat(rres.get_successful(), Equals(True)) # TODO: examine results self.copy_shares() initial_shares = self.old_shares[0] new_shares = self.old_shares[1] # TODO: this really shouldn't change anything. When we implement # a "minimal-bandwidth" repairer", change this test to assert: #self.assertThat(new_shares, Equals(initial_shares)) # all shares should be in the same place as before self.assertThat(set(initial_shares.keys()), Equals(set(new_shares.keys()))) # but they should all be at a newer seqnum. The IV will be # different, so the roothash will be too. for key in initial_shares: (version0, seqnum0, root_hash0, IV0, k0, N0, segsize0, datalen0, o0) = unpack_header(initial_shares[key]) (version1, seqnum1, root_hash1, IV1, k1, N1, segsize1, datalen1, o1) = unpack_header(new_shares[key]) self.assertThat(version0, Equals(version1)) self.assertThat(seqnum0+1, Equals(seqnum1)) self.assertThat(k0, Equals(k1)) self.assertThat(N0, Equals(N1)) self.assertThat(segsize0, Equals(segsize1)) self.assertThat(datalen0, Equals(datalen1)) d.addCallback(_check_results) return d def failIfSharesChanged(self, ignored=None): old_shares = self.old_shares[-2] current_shares = self.old_shares[-1] self.assertThat(old_shares, Equals(current_shares)) def _test_whether_repairable(self, publisher, nshares, expected_result): d = publisher() def _delete_some_shares(ign): shares = self._storage._peers for peerid in shares: for shnum in list(shares[peerid]): if shnum >= nshares: del shares[peerid][shnum] d.addCallback(_delete_some_shares) d.addCallback(lambda ign: self._fn.check(Monitor())) def _check(cr): self.assertThat(cr.is_healthy(), Equals(False)) self.assertThat(cr.is_recoverable(), Equals(expected_result)) return cr d.addCallback(_check) d.addCallback(lambda check_results: self._fn.repair(check_results)) d.addCallback(lambda crr: self.assertThat(crr.get_successful(), Equals(expected_result))) return d def test_unrepairable_0shares(self): return self._test_whether_repairable(self.publish_one, 0, False) def test_mdmf_unrepairable_0shares(self): return self._test_whether_repairable(self.publish_mdmf, 0, False) def test_unrepairable_1share(self): return self._test_whether_repairable(self.publish_one, 1, False) def test_mdmf_unrepairable_1share(self): return self._test_whether_repairable(self.publish_mdmf, 1, False) def test_repairable_5shares(self): return self._test_whether_repairable(self.publish_one, 5, True) def test_mdmf_repairable_5shares(self): return self._test_whether_repairable(self.publish_mdmf, 5, True) def _test_whether_checkandrepairable(self, publisher, nshares, expected_result): """ Like the _test_whether_repairable tests, but invoking check_and_repair instead of invoking check and then invoking repair. """ d = publisher() def _delete_some_shares(ign): shares = self._storage._peers for peerid in shares: for shnum in list(shares[peerid]): if shnum >= nshares: del shares[peerid][shnum] d.addCallback(_delete_some_shares) d.addCallback(lambda ign: self._fn.check_and_repair(Monitor())) d.addCallback(lambda crr: self.assertThat(crr.get_repair_successful(), Equals(expected_result))) return d def test_unrepairable_0shares_checkandrepair(self): return self._test_whether_checkandrepairable(self.publish_one, 0, False) def test_mdmf_unrepairable_0shares_checkandrepair(self): return self._test_whether_checkandrepairable(self.publish_mdmf, 0, False) def test_unrepairable_1share_checkandrepair(self): return self._test_whether_checkandrepairable(self.publish_one, 1, False) def test_mdmf_unrepairable_1share_checkandrepair(self): return self._test_whether_checkandrepairable(self.publish_mdmf, 1, False) def test_repairable_5shares_checkandrepair(self): return self._test_whether_checkandrepairable(self.publish_one, 5, True) def test_mdmf_repairable_5shares_checkandrepair(self): return self._test_whether_checkandrepairable(self.publish_mdmf, 5, True) def test_merge(self): self.old_shares = [] d = self.publish_multiple() # repair will refuse to merge multiple highest seqnums unless you # pass force=True d.addCallback(lambda res: self._set_versions({0:3,2:3,4:3,6:3,8:3, 1:4,3:4,5:4,7:4,9:4})) d.addCallback(self.copy_shares) d.addCallback(lambda res: self._fn.check(Monitor())) def _try_repair(check_results): ex = "There were multiple recoverable versions with identical seqnums, so force=True must be passed to the repair() operation" d2 = self.shouldFail(MustForceRepairError, "test_merge", ex, self._fn.repair, check_results) d2.addCallback(self.copy_shares) d2.addCallback(self.failIfSharesChanged) d2.addCallback(lambda res: check_results) return d2 d.addCallback(_try_repair) d.addCallback(lambda check_results: self._fn.repair(check_results, force=True)) # this should give us 10 shares of the highest roothash def _check_repair_results(rres): self.assertThat(rres.get_successful(), Equals(True)) pass # TODO d.addCallback(_check_repair_results) d.addCallback(lambda res: self._fn.get_servermap(MODE_CHECK)) def _check_smap(smap): self.assertThat(smap.recoverable_versions(), HasLength(1)) self.assertThat(smap.unrecoverable_versions(), HasLength(0)) # now, which should have won? roothash_s4a = self.get_roothash_for(3) roothash_s4b = self.get_roothash_for(4) if roothash_s4b > roothash_s4a: expected_contents = self.CONTENTS[4] else: expected_contents = self.CONTENTS[3] new_versionid = smap.best_recoverable_version() self.assertThat(new_versionid[0], Equals(5)) # seqnum 5 d2 = self._fn.download_version(smap, new_versionid) d2.addCallback(self.assertEqual, expected_contents) return d2 d.addCallback(_check_smap) return d def test_non_merge(self): self.old_shares = [] d = self.publish_multiple() # repair should not refuse a repair that doesn't need to merge. In # this case, we combine v2 with v3. The repair should ignore v2 and # copy v3 into a new v5. d.addCallback(lambda res: self._set_versions({0:2,2:2,4:2,6:2,8:2, 1:3,3:3,5:3,7:3,9:3})) d.addCallback(lambda res: self._fn.check(Monitor())) d.addCallback(lambda check_results: self._fn.repair(check_results)) # this should give us 10 shares of v3 def _check_repair_results(rres): self.assertThat(rres.get_successful(), Equals(True)) pass # TODO d.addCallback(_check_repair_results) d.addCallback(lambda res: self._fn.get_servermap(MODE_CHECK)) def _check_smap(smap): self.assertThat(smap.recoverable_versions(), HasLength(1)) self.assertThat(smap.unrecoverable_versions(), HasLength(0)) # now, which should have won? expected_contents = self.CONTENTS[3] new_versionid = smap.best_recoverable_version() self.assertThat(new_versionid[0], Equals(5)) # seqnum 5 d2 = self._fn.download_version(smap, new_versionid) d2.addCallback(self.assertEquals, expected_contents) return d2 d.addCallback(_check_smap) return d def get_roothash_for(self, index): # return the roothash for the first share we see in the saved set shares = self._copied_shares[index] for peerid in shares: for shnum in shares[peerid]: share = shares[peerid][shnum] (version, seqnum, root_hash, IV, k, N, segsize, datalen, o) = \ unpack_header(share) return root_hash def test_check_and_repair_readcap(self): # we can't currently repair from a mutable readcap: #625 self.old_shares = [] d = self.publish_one() d.addCallback(self.copy_shares) def _get_readcap(res): self._fn3 = self._fn.get_readonly() # also delete some shares for peerid,shares in list(self._storage._peers.items()): shares.pop(0, None) d.addCallback(_get_readcap) d.addCallback(lambda res: self._fn3.check_and_repair(Monitor())) def _check_results(crr): self.assertThat(ICheckAndRepairResults.providedBy(crr), Equals(True)) # we should detect the unhealthy, but skip over mutable-readcap # repairs until #625 is fixed self.assertThat(crr.get_pre_repair_results().is_healthy(), Equals(False)) self.assertThat(crr.get_repair_attempted(), Equals(False)) self.assertThat(crr.get_post_repair_results().is_healthy(), Equals(False)) d.addCallback(_check_results) return d def test_repair_empty(self): # bug 1689: delete one share of an empty mutable file, then repair. # In the buggy version, the check that precedes the retrieve+publish # cycle uses MODE_READ, instead of MODE_REPAIR, and fails to get the # privkey that repair needs. d = self.publish_sdmf(b"") def _delete_one_share(ign): shares = self._storage._peers for peerid in shares: for shnum in list(shares[peerid]): if shnum == 0: del shares[peerid][shnum] d.addCallback(_delete_one_share) d.addCallback(lambda ign: self._fn2.check(Monitor())) d.addCallback(lambda check_results: self._fn2.repair(check_results)) def _check(crr): self.assertThat(crr.get_successful(), Equals(True)) d.addCallback(_check) return d tahoe_lafs-1.20.0/src/allmydata/test/mutable/test_roundtrip.py0000644000000000000000000004266713615410400021444 0ustar00""" Ported to Python 3. """ from io import StringIO from ..common import AsyncTestCase from testtools.matchers import Equals, HasLength, Contains from twisted.internet import defer from allmydata.util import base32, consumer from allmydata.interfaces import NotEnoughSharesError from allmydata.monitor import Monitor from allmydata.mutable.common import MODE_READ, UnrecoverableFileError from allmydata.mutable.servermap import ServerMap, ServermapUpdater from allmydata.mutable.retrieve import Retrieve from .util import PublishMixin, make_storagebroker, corrupt from .. import common_util as testutil class Roundtrip(AsyncTestCase, testutil.ShouldFailMixin, PublishMixin): def setUp(self): super(Roundtrip, self).setUp() return self.publish_one() def make_servermap(self, mode=MODE_READ, oldmap=None, sb=None): if oldmap is None: oldmap = ServerMap() if sb is None: sb = self._storage_broker smu = ServermapUpdater(self._fn, sb, Monitor(), oldmap, mode) d = smu.update() return d def abbrev_verinfo(self, verinfo): if verinfo is None: return None (seqnum, root_hash, IV, segsize, datalength, k, N, prefix, offsets_tuple) = verinfo return "%d-%s" % (seqnum, base32.b2a(root_hash)[:4]) def abbrev_verinfo_dict(self, verinfo_d): output = {} for verinfo,value in list(verinfo_d.items()): (seqnum, root_hash, IV, segsize, datalength, k, N, prefix, offsets_tuple) = verinfo output["%d-%s" % (seqnum, base32.b2a(root_hash)[:4])] = value return output def dump_servermap(self, servermap): print("SERVERMAP", servermap) print("RECOVERABLE", [self.abbrev_verinfo(v) for v in servermap.recoverable_versions()]) print("BEST", self.abbrev_verinfo(servermap.best_recoverable_version())) print("available", self.abbrev_verinfo_dict(servermap.shares_available())) def do_download(self, servermap, version=None): if version is None: version = servermap.best_recoverable_version() r = Retrieve(self._fn, self._storage_broker, servermap, version) c = consumer.MemoryConsumer() d = r.download(consumer=c) d.addCallback(lambda mc: b"".join(mc.chunks)) return d def test_basic(self): d = self.make_servermap() def _do_retrieve(servermap): self._smap = servermap #self.dump_servermap(servermap) self.assertThat(servermap.recoverable_versions(), HasLength(1)) return self.do_download(servermap) d.addCallback(_do_retrieve) def _retrieved(new_contents): self.assertThat(new_contents, Equals(self.CONTENTS)) d.addCallback(_retrieved) # we should be able to re-use the same servermap, both with and # without updating it. d.addCallback(lambda res: self.do_download(self._smap)) d.addCallback(_retrieved) d.addCallback(lambda res: self.make_servermap(oldmap=self._smap)) d.addCallback(lambda res: self.do_download(self._smap)) d.addCallback(_retrieved) # clobbering the pubkey should make the servermap updater re-fetch it def _clobber_pubkey(res): self._fn._pubkey = None d.addCallback(_clobber_pubkey) d.addCallback(lambda res: self.make_servermap(oldmap=self._smap)) d.addCallback(lambda res: self.do_download(self._smap)) d.addCallback(_retrieved) return d def test_all_shares_vanished(self): d = self.make_servermap() def _remove_shares(servermap): for shares in list(self._storage._peers.values()): shares.clear() d1 = self.shouldFail(NotEnoughSharesError, "test_all_shares_vanished", "ran out of servers", self.do_download, servermap) return d1 d.addCallback(_remove_shares) return d def test_all_but_two_shares_vanished_updated_servermap(self): # tests error reporting for ticket #1742 d = self.make_servermap() def _remove_shares(servermap): self._version = servermap.best_recoverable_version() for shares in list(self._storage._peers.values())[2:]: shares.clear() return self.make_servermap(servermap) d.addCallback(_remove_shares) def _check(updated_servermap): d1 = self.shouldFail(NotEnoughSharesError, "test_all_but_two_shares_vanished_updated_servermap", "ran out of servers", self.do_download, updated_servermap, version=self._version) return d1 d.addCallback(_check) return d def test_no_servers(self): sb2 = make_storagebroker(num_peers=0) # if there are no servers, then a MODE_READ servermap should come # back empty d = self.make_servermap(sb=sb2) def _check_servermap(servermap): self.assertThat(servermap.best_recoverable_version(), Equals(None)) self.assertFalse(servermap.recoverable_versions()) self.assertFalse(servermap.unrecoverable_versions()) self.assertFalse(servermap.all_servers()) d.addCallback(_check_servermap) return d def test_no_servers_download(self): sb2 = make_storagebroker(num_peers=0) self._fn._storage_broker = sb2 d = self.shouldFail(UnrecoverableFileError, "test_no_servers_download", "no recoverable versions", self._fn.download_best_version) def _restore(res): # a failed download that occurs while we aren't connected to # anybody should not prevent a subsequent download from working. # This isn't quite the webapi-driven test that #463 wants, but it # should be close enough. self._fn._storage_broker = self._storage_broker return self._fn.download_best_version() def _retrieved(new_contents): self.assertThat(new_contents, Equals(self.CONTENTS)) d.addCallback(_restore) d.addCallback(_retrieved) return d def _test_corrupt_all(self, offset, substring, should_succeed=False, corrupt_early=True, failure_checker=None, fetch_privkey=False): d = defer.succeed(None) if corrupt_early: d.addCallback(corrupt, self._storage, offset) d.addCallback(lambda res: self.make_servermap()) if not corrupt_early: d.addCallback(corrupt, self._storage, offset) def _do_retrieve(servermap): ver = servermap.best_recoverable_version() if ver is None and not should_succeed: # no recoverable versions == not succeeding. The problem # should be noted in the servermap's list of problems. if substring: allproblems = [str(f) for f in servermap.get_problems()] self.assertThat("".join(allproblems), Contains(substring)) return servermap if should_succeed: d1 = self._fn.download_version(servermap, ver, fetch_privkey) d1.addCallback(lambda new_contents: self.assertThat(new_contents, Equals(self.CONTENTS))) else: d1 = self.shouldFail(NotEnoughSharesError, "_corrupt_all(offset=%s)" % (offset,), substring, self._fn.download_version, servermap, ver, fetch_privkey) if failure_checker: d1.addCallback(failure_checker) d1.addCallback(lambda res: servermap) return d1 d.addCallback(_do_retrieve) return d def test_corrupt_all_verbyte(self): # when the version byte is not 0 or 1, we hit an UnknownVersionError # error in unpack_share(). d = self._test_corrupt_all(0, "UnknownVersionError") def _check_servermap(servermap): # and the dump should mention the problems s = StringIO() dump = servermap.dump(s).getvalue() self.assertTrue("30 PROBLEMS" in dump, msg=dump) d.addCallback(_check_servermap) return d def test_corrupt_all_seqnum(self): # a corrupt sequence number will trigger a bad signature return self._test_corrupt_all(1, "signature is invalid") def test_corrupt_all_R(self): # a corrupt root hash will trigger a bad signature return self._test_corrupt_all(9, "signature is invalid") def test_corrupt_all_IV(self): # a corrupt salt/IV will trigger a bad signature return self._test_corrupt_all(41, "signature is invalid") def test_corrupt_all_k(self): # a corrupt 'k' will trigger a bad signature return self._test_corrupt_all(57, "signature is invalid") def test_corrupt_all_N(self): # a corrupt 'N' will trigger a bad signature return self._test_corrupt_all(58, "signature is invalid") def test_corrupt_all_segsize(self): # a corrupt segsize will trigger a bad signature return self._test_corrupt_all(59, "signature is invalid") def test_corrupt_all_datalen(self): # a corrupt data length will trigger a bad signature return self._test_corrupt_all(67, "signature is invalid") def test_corrupt_all_pubkey(self): # a corrupt pubkey won't match the URI's fingerprint. We need to # remove the pubkey from the filenode, or else it won't bother trying # to update it. self._fn._pubkey = None return self._test_corrupt_all("pubkey", "pubkey doesn't match fingerprint") def test_corrupt_all_sig(self): # a corrupt signature is a bad one # the signature runs from about [543:799], depending upon the length # of the pubkey return self._test_corrupt_all("signature", "signature is invalid") def test_corrupt_all_share_hash_chain_number(self): # a corrupt share hash chain entry will show up as a bad hash. If we # mangle the first byte, that will look like a bad hash number, # causing an IndexError return self._test_corrupt_all("share_hash_chain", "corrupt hashes") def test_corrupt_all_share_hash_chain_hash(self): # a corrupt share hash chain entry will show up as a bad hash. If we # mangle a few bytes in, that will look like a bad hash. return self._test_corrupt_all(("share_hash_chain",4), "corrupt hashes") def test_corrupt_all_block_hash_tree(self): return self._test_corrupt_all("block_hash_tree", "block hash tree failure") def test_corrupt_all_block(self): return self._test_corrupt_all("share_data", "block hash tree failure") def test_corrupt_all_encprivkey(self): # a corrupted privkey won't even be noticed by the reader, only by a # writer. return self._test_corrupt_all("enc_privkey", None, should_succeed=True) def test_corrupt_all_encprivkey_late(self): # this should work for the same reason as above, but we corrupt # after the servermap update to exercise the error handling # code. # We need to remove the privkey from the node, or the retrieve # process won't know to update it. self._fn._privkey = None return self._test_corrupt_all("enc_privkey", None, # this shouldn't fail should_succeed=True, corrupt_early=False, fetch_privkey=True) # disabled until retrieve tests checkstring on each blockfetch. I didn't # just use a .todo because the failing-but-ignored test emits about 30kB # of noise. def OFF_test_corrupt_all_seqnum_late(self): # corrupting the seqnum between mapupdate and retrieve should result # in NotEnoughSharesError, since each share will look invalid def _check(res): f = res[0] self.assertThat(f.check(NotEnoughSharesError), HasLength(1)) self.assertThat("uncoordinated write" in str(f), Equals(True)) return self._test_corrupt_all(1, "ran out of servers", corrupt_early=False, failure_checker=_check) def test_corrupt_all_block_late(self): def _check(res): f = res[0] self.assertTrue(f.check(NotEnoughSharesError)) return self._test_corrupt_all("share_data", "block hash tree failure", corrupt_early=False, failure_checker=_check) def test_basic_pubkey_at_end(self): # we corrupt the pubkey in all but the last 'k' shares, allowing the # download to succeed but forcing a bunch of retries first. Note that # this is rather pessimistic: our Retrieve process will throw away # the whole share if the pubkey is bad, even though the rest of the # share might be good. self._fn._pubkey = None k = self._fn.get_required_shares() N = self._fn.get_total_shares() d = defer.succeed(None) d.addCallback(corrupt, self._storage, "pubkey", shnums_to_corrupt=list(range(0, N-k))) d.addCallback(lambda res: self.make_servermap()) def _do_retrieve(servermap): self.assertTrue(servermap.get_problems()) self.assertThat("pubkey doesn't match fingerprint" in str(servermap.get_problems()[0]), Equals(True)) ver = servermap.best_recoverable_version() r = Retrieve(self._fn, self._storage_broker, servermap, ver) c = consumer.MemoryConsumer() return r.download(c) d.addCallback(_do_retrieve) d.addCallback(lambda mc: b"".join(mc.chunks)) d.addCallback(lambda new_contents: self.assertThat(new_contents, Equals(self.CONTENTS))) return d def _test_corrupt_some(self, offset, mdmf=False): if mdmf: d = self.publish_mdmf() else: d = defer.succeed(None) d.addCallback(lambda ignored: corrupt(None, self._storage, offset, list(range(5)))) d.addCallback(lambda ignored: self.make_servermap()) def _do_retrieve(servermap): ver = servermap.best_recoverable_version() self.assertTrue(ver) return self._fn.download_best_version() d.addCallback(_do_retrieve) d.addCallback(lambda new_contents: self.assertThat(new_contents, Equals(self.CONTENTS))) return d def test_corrupt_some(self): # corrupt the data of first five shares (so the servermap thinks # they're good but retrieve marks them as bad), so that the # MODE_READ set of 6 will be insufficient, forcing node.download to # retry with more servers. return self._test_corrupt_some("share_data") def test_download_fails(self): d = corrupt(None, self._storage, "signature") d.addCallback(lambda ignored: self.shouldFail(UnrecoverableFileError, "test_download_anyway", "no recoverable versions", self._fn.download_best_version)) return d def test_corrupt_mdmf_block_hash_tree(self): d = self.publish_mdmf() d.addCallback(lambda ignored: self._test_corrupt_all(("block_hash_tree", 12 * 32), "block hash tree failure", corrupt_early=True, should_succeed=False)) return d def test_corrupt_mdmf_block_hash_tree_late(self): # Note - there is no SDMF counterpart to this test, as the SDMF # files are guaranteed to have exactly one block, and therefore # the block hash tree fits within the initial read (#1240). d = self.publish_mdmf() d.addCallback(lambda ignored: self._test_corrupt_all(("block_hash_tree", 12 * 32), "block hash tree failure", corrupt_early=False, should_succeed=False)) return d def test_corrupt_mdmf_share_data(self): d = self.publish_mdmf() d.addCallback(lambda ignored: # TODO: Find out what the block size is and corrupt a # specific block, rather than just guessing. self._test_corrupt_all(("share_data", 12 * 40), "block hash tree failure", corrupt_early=True, should_succeed=False)) return d def test_corrupt_some_mdmf(self): return self._test_corrupt_some(("share_data", 12 * 40), mdmf=True) tahoe_lafs-1.20.0/src/allmydata/test/mutable/test_servermap.py0000644000000000000000000002373513615410400021415 0ustar00""" Ported to Python 3. """ from ..common import AsyncTestCase from testtools.matchers import Equals, NotEquals, HasLength from twisted.internet import defer from allmydata.monitor import Monitor from allmydata.mutable.common import \ MODE_CHECK, MODE_ANYTHING, MODE_WRITE, MODE_READ from allmydata.mutable.publish import MutableData from allmydata.mutable.servermap import ServerMap, ServermapUpdater from .util import PublishMixin class Servermap(AsyncTestCase, PublishMixin): def setUp(self): super(Servermap, self).setUp() return self.publish_one() def make_servermap(self, mode=MODE_CHECK, fn=None, sb=None, update_range=None): if fn is None: fn = self._fn if sb is None: sb = self._storage_broker smu = ServermapUpdater(fn, sb, Monitor(), ServerMap(), mode, update_range=update_range) d = smu.update() return d def update_servermap(self, oldmap, mode=MODE_CHECK): smu = ServermapUpdater(self._fn, self._storage_broker, Monitor(), oldmap, mode) d = smu.update() return d def failUnlessOneRecoverable(self, sm, num_shares): self.assertThat(sm.recoverable_versions(), HasLength(1)) self.assertThat(sm.unrecoverable_versions(), HasLength(0)) best = sm.best_recoverable_version() self.assertThat(best, NotEquals(None)) self.assertThat(sm.recoverable_versions(), Equals(set([best]))) self.assertThat(sm.shares_available(), HasLength(1)) self.assertThat(sm.shares_available()[best], Equals((num_shares, 3, 10))) shnum, servers = list(sm.make_sharemap().items())[0] server = list(servers)[0] self.assertThat(sm.version_on_server(server, shnum), Equals(best)) self.assertThat(sm.version_on_server(server, 666), Equals(None)) return sm def test_basic(self): d = defer.succeed(None) ms = self.make_servermap us = self.update_servermap d.addCallback(lambda res: ms(mode=MODE_CHECK)) d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10)) d.addCallback(lambda res: ms(mode=MODE_WRITE)) d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10)) d.addCallback(lambda res: ms(mode=MODE_READ)) # this mode stops at k+epsilon, and epsilon=k, so 6 shares d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 6)) d.addCallback(lambda res: ms(mode=MODE_ANYTHING)) # this mode stops at 'k' shares d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 3)) # and can we re-use the same servermap? Note that these are sorted in # increasing order of number of servers queried, since once a server # gets into the servermap, we'll always ask it for an update. d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 3)) d.addCallback(lambda sm: us(sm, mode=MODE_READ)) d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 6)) d.addCallback(lambda sm: us(sm, mode=MODE_WRITE)) d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10)) d.addCallback(lambda sm: us(sm, mode=MODE_CHECK)) d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10)) d.addCallback(lambda sm: us(sm, mode=MODE_ANYTHING)) d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10)) return d def test_fetch_privkey(self): d = defer.succeed(None) # use the sibling filenode (which hasn't been used yet), and make # sure it can fetch the privkey. The file is small, so the privkey # will be fetched on the first (query) pass. d.addCallback(lambda res: self.make_servermap(MODE_WRITE, self._fn2)) d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10)) # create a new file, which is large enough to knock the privkey out # of the early part of the file LARGE = b"These are Larger contents" * 200 # about 5KB LARGE_uploadable = MutableData(LARGE) d.addCallback(lambda res: self._nodemaker.create_mutable_file(LARGE_uploadable)) def _created(large_fn): large_fn2 = self._nodemaker.create_from_cap(large_fn.get_uri()) return self.make_servermap(MODE_WRITE, large_fn2) d.addCallback(_created) d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10)) return d def test_mark_bad(self): d = defer.succeed(None) ms = self.make_servermap d.addCallback(lambda res: ms(mode=MODE_READ)) d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 6)) def _made_map(sm): v = sm.best_recoverable_version() vm = sm.make_versionmap() shares = list(vm[v]) self.assertThat(shares, HasLength(6)) self._corrupted = set() # mark the first 5 shares as corrupt, then update the servermap. # The map should not have the marked shares it in any more, and # new shares should be found to replace the missing ones. for (shnum, server, timestamp) in shares: if shnum < 5: self._corrupted.add( (server, shnum) ) sm.mark_bad_share(server, shnum, b"") return self.update_servermap(sm, MODE_WRITE) d.addCallback(_made_map) def _check_map(sm): # this should find all 5 shares that weren't marked bad v = sm.best_recoverable_version() vm = sm.make_versionmap() shares = list(vm[v]) for (server, shnum) in self._corrupted: server_shares = sm.debug_shares_on_server(server) self.assertFalse(shnum in server_shares, "%d was in %s" % (shnum, server_shares)) self.assertThat(shares, HasLength(5)) d.addCallback(_check_map) return d def failUnlessNoneRecoverable(self, sm): self.assertThat(sm.recoverable_versions(), HasLength(0)) self.assertThat(sm.unrecoverable_versions(), HasLength(0)) best = sm.best_recoverable_version() self.assertThat(best, Equals(None)) self.assertThat(sm.shares_available(), HasLength(0)) def test_no_shares(self): self._storage._peers = {} # delete all shares ms = self.make_servermap d = defer.succeed(None) # d.addCallback(lambda res: ms(mode=MODE_CHECK)) d.addCallback(lambda sm: self.failUnlessNoneRecoverable(sm)) d.addCallback(lambda res: ms(mode=MODE_ANYTHING)) d.addCallback(lambda sm: self.failUnlessNoneRecoverable(sm)) d.addCallback(lambda res: ms(mode=MODE_WRITE)) d.addCallback(lambda sm: self.failUnlessNoneRecoverable(sm)) d.addCallback(lambda res: ms(mode=MODE_READ)) d.addCallback(lambda sm: self.failUnlessNoneRecoverable(sm)) return d def failUnlessNotQuiteEnough(self, sm): self.assertThat(sm.recoverable_versions(), HasLength(0)) self.assertThat(sm.unrecoverable_versions(), HasLength(1)) best = sm.best_recoverable_version() self.assertThat(best, Equals(None)) self.assertThat(sm.shares_available(), HasLength(1)) self.assertThat(list(sm.shares_available().values())[0], Equals((2,3,10))) return sm def test_not_quite_enough_shares(self): s = self._storage ms = self.make_servermap num_shares = len(s._peers) for peerid in s._peers: s._peers[peerid] = {} num_shares -= 1 if num_shares == 2: break # now there ought to be only two shares left assert len([peerid for peerid in s._peers if s._peers[peerid]]) == 2 d = defer.succeed(None) d.addCallback(lambda res: ms(mode=MODE_CHECK)) d.addCallback(lambda sm: self.failUnlessNotQuiteEnough(sm)) d.addCallback(lambda sm: self.assertThat(sm.make_sharemap(), HasLength(2))) d.addCallback(lambda res: ms(mode=MODE_ANYTHING)) d.addCallback(lambda sm: self.failUnlessNotQuiteEnough(sm)) d.addCallback(lambda res: ms(mode=MODE_WRITE)) d.addCallback(lambda sm: self.failUnlessNotQuiteEnough(sm)) d.addCallback(lambda res: ms(mode=MODE_READ)) d.addCallback(lambda sm: self.failUnlessNotQuiteEnough(sm)) return d def test_servermapupdater_finds_mdmf_files(self): # setUp already published an MDMF file for us. We just need to # make sure that when we run the ServermapUpdater, the file is # reported to have one recoverable version. d = defer.succeed(None) d.addCallback(lambda ignored: self.publish_mdmf()) d.addCallback(lambda ignored: self.make_servermap(mode=MODE_CHECK)) # Calling make_servermap also updates the servermap in the mode # that we specify, so we just need to see what it says. def _check_servermap(sm): self.assertThat(sm.recoverable_versions(), HasLength(1)) d.addCallback(_check_servermap) return d def test_fetch_update(self): d = defer.succeed(None) d.addCallback(lambda ignored: self.publish_mdmf()) d.addCallback(lambda ignored: self.make_servermap(mode=MODE_WRITE, update_range=(1, 2))) def _check_servermap(sm): # 10 shares self.assertThat(sm.update_data, HasLength(10)) # one version for data in sm.update_data.values(): self.assertThat(data, HasLength(1)) d.addCallback(_check_servermap) return d def test_servermapupdater_finds_sdmf_files(self): d = defer.succeed(None) d.addCallback(lambda ignored: self.publish_sdmf()) d.addCallback(lambda ignored: self.make_servermap(mode=MODE_CHECK)) d.addCallback(lambda servermap: self.assertThat(servermap.recoverable_versions(), HasLength(1))) return d tahoe_lafs-1.20.0/src/allmydata/test/mutable/test_update.py0000644000000000000000000002416713615410400020673 0ustar00""" Ported to Python 3. """ import re from ..common import AsyncTestCase from testtools.matchers import ( Equals, IsInstance, GreaterThan, ) from twisted.internet import defer from allmydata.interfaces import MDMF_VERSION from allmydata.mutable.filenode import MutableFileNode from allmydata.mutable.publish import MutableData, DEFAULT_MUTABLE_MAX_SEGMENT_SIZE from ..no_network import GridTestMixin from .. import common_util as testutil # We should really force a smaller segsize for the duration of the tests, to # let them run faster, but Many of them tests depend upon a specific segment # size. Factor out this expectation here, to start the process of cleaning # this up. SEGSIZE = 128*1024 class Update(GridTestMixin, AsyncTestCase, testutil.ShouldFailMixin): def setUp(self): GridTestMixin.setUp(self) self.basedir = self.mktemp() self.set_up_grid(num_servers=13) self.c = self.g.clients[0] self.nm = self.c.nodemaker # self.data should be at least three segments long. td = b"testdata " self.data = td*(int(3*SEGSIZE//len(td))+10) # currently about 400kB self.assertThat(len(self.data), GreaterThan(3*SEGSIZE)) self.small_data = b"test data" * 10 # 90 B; SDMF def do_upload_sdmf(self): d = self.nm.create_mutable_file(MutableData(self.small_data)) def _then(n): self.assertThat(n, IsInstance(MutableFileNode)) self.sdmf_node = n d.addCallback(_then) return d def do_upload_mdmf(self): d = self.nm.create_mutable_file(MutableData(self.data), version=MDMF_VERSION) def _then(n): self.assertThat(n, IsInstance(MutableFileNode)) self.mdmf_node = n d.addCallback(_then) return d def _test_replace(self, offset, new_data): expected = self.data[:offset]+new_data+self.data[offset+len(new_data):] d0 = self.do_upload_mdmf() def _run(ign): d = defer.succeed(None) d.addCallback(lambda ign: self.mdmf_node.get_best_mutable_version()) d.addCallback(lambda mv: mv.update(MutableData(new_data), offset)) d.addCallback(lambda ign: self.mdmf_node.download_best_version()) def _check(results): if results != expected: print() print("got: %s ... %s" % (results[:20], results[-20:])) print("exp: %s ... %s" % (expected[:20], expected[-20:])) self.fail("results != expected") d.addCallback(_check) return d d0.addCallback(_run) return d0 def test_append(self): # We should be able to append data to a mutable file and get # what we expect. return self._test_replace(len(self.data), b"appended") def test_replace_middle(self): # We should be able to replace data in the middle of a mutable # file and get what we expect back. return self._test_replace(100, b"replaced") def test_replace_beginning(self): # We should be able to replace data at the beginning of the file # without truncating the file return self._test_replace(0, b"beginning") def test_replace_segstart1(self): return self._test_replace(128*1024+1, b"NNNN") def test_replace_zero_length_beginning(self): return self._test_replace(0, b"") def test_replace_zero_length_middle(self): return self._test_replace(50, b"") def test_replace_zero_length_segstart1(self): return self._test_replace(128*1024+1, b"") def test_replace_and_extend(self): # We should be able to replace data in the middle of a mutable # file and extend that mutable file and get what we expect. return self._test_replace(100, b"modified " * 100000) def _check_differences(self, got, expected): # displaying arbitrary file corruption is tricky for a # 1MB file of repeating data,, so look for likely places # with problems and display them separately gotmods = [mo.span() for mo in re.finditer(b'([A-Z]+)', got)] expmods = [mo.span() for mo in re.finditer(b'([A-Z]+)', expected)] gotspans = ["%d:%d=%r" % (start,end,got[start:end]) for (start,end) in gotmods] expspans = ["%d:%d=%r" % (start,end,expected[start:end]) for (start,end) in expmods] #print("expecting: %s" % expspans) if got != expected: print("differences:") for segnum in range(len(expected)//SEGSIZE): start = segnum * SEGSIZE end = (segnum+1) * SEGSIZE got_ends = "%s .. %s" % (got[start:start+20], got[end-20:end]) exp_ends = "%s .. %s" % (expected[start:start+20], expected[end-20:end]) if got_ends != exp_ends: print("expected[%d]: %s" % (start, exp_ends)) print("got [%d]: %s" % (start, got_ends)) if expspans != gotspans: print("expected: %s" % expspans) print("got : %s" % gotspans) open("EXPECTED","wb").write(expected) open("GOT","wb").write(got) print("wrote data to EXPECTED and GOT") self.fail("didn't get expected data") def test_replace_locations(self): # exercise fencepost conditions suspects = list(range(SEGSIZE-3, SEGSIZE+1)) + list( range(2*SEGSIZE-3, 2*SEGSIZE+1)) letters = iter("ABCDEFGHIJKLMNOPQRSTUVWXYZ") d0 = self.do_upload_mdmf() def _run(ign): expected = self.data d = defer.succeed(None) for offset in suspects: new_data = next(letters).encode("ascii") * 2 # "AA", then "BB", etc expected = expected[:offset]+new_data+expected[offset+2:] d.addCallback(lambda ign: self.mdmf_node.get_best_mutable_version()) def _modify(mv, offset=offset, new_data=new_data): # close over 'offset','new_data' md = MutableData(new_data) return mv.update(md, offset) d.addCallback(_modify) d.addCallback(lambda ignored: self.mdmf_node.download_best_version()) d.addCallback(self._check_differences, expected) return d d0.addCallback(_run) return d0 def test_append_power_of_two(self): # If we attempt to extend a mutable file so that its segment # count crosses a power-of-two boundary, the update operation # should know how to reencode the file. # Note that the data populating self.mdmf_node is about 900 KiB # long -- this is 7 segments in the default segment size. So we # need to add 2 segments worth of data to push it over a # power-of-two boundary. segment = b"a" * DEFAULT_MUTABLE_MAX_SEGMENT_SIZE new_data = self.data + (segment * 2) d0 = self.do_upload_mdmf() def _run(ign): d = defer.succeed(None) d.addCallback(lambda ign: self.mdmf_node.get_best_mutable_version()) d.addCallback(lambda mv: mv.update(MutableData(segment * 2), len(self.data))) d.addCallback(lambda ign: self.mdmf_node.download_best_version()) d.addCallback(lambda results: self.assertThat(results, Equals(new_data))) return d d0.addCallback(_run) return d0 def test_update_sdmf(self): # Running update on a single-segment file should still work. new_data = self.small_data + b"appended" d0 = self.do_upload_sdmf() def _run(ign): d = defer.succeed(None) d.addCallback(lambda ign: self.sdmf_node.get_best_mutable_version()) d.addCallback(lambda mv: mv.update(MutableData(b"appended"), len(self.small_data))) d.addCallback(lambda ign: self.sdmf_node.download_best_version()) d.addCallback(lambda results: self.assertThat(results, Equals(new_data))) return d d0.addCallback(_run) return d0 def test_replace_in_last_segment(self): # The wrapper should know how to handle the tail segment # appropriately. replace_offset = len(self.data) - 100 new_data = self.data[:replace_offset] + b"replaced" rest_offset = replace_offset + len(b"replaced") new_data += self.data[rest_offset:] d0 = self.do_upload_mdmf() def _run(ign): d = defer.succeed(None) d.addCallback(lambda ign: self.mdmf_node.get_best_mutable_version()) d.addCallback(lambda mv: mv.update(MutableData(b"replaced"), replace_offset)) d.addCallback(lambda ign: self.mdmf_node.download_best_version()) d.addCallback(lambda results: self.assertThat(results, Equals(new_data))) return d d0.addCallback(_run) return d0 def test_multiple_segment_replace(self): replace_offset = 2 * DEFAULT_MUTABLE_MAX_SEGMENT_SIZE new_data = self.data[:replace_offset] new_segment = b"a" * DEFAULT_MUTABLE_MAX_SEGMENT_SIZE new_data += 2 * new_segment new_data += b"replaced" rest_offset = len(new_data) new_data += self.data[rest_offset:] d0 = self.do_upload_mdmf() def _run(ign): d = defer.succeed(None) d.addCallback(lambda ign: self.mdmf_node.get_best_mutable_version()) d.addCallback(lambda mv: mv.update(MutableData((2 * new_segment) + b"replaced"), replace_offset)) d.addCallback(lambda ignored: self.mdmf_node.download_best_version()) d.addCallback(lambda results: self.assertThat(results, Equals(new_data))) return d d0.addCallback(_run) return d0 tahoe_lafs-1.20.0/src/allmydata/test/mutable/test_version.py0000644000000000000000000004223513615410400021072 0ustar00""" Tests related to the way ``allmydata.mutable`` handles different versions of data for an object. """ from io import StringIO import os from typing import Optional from ..common import AsyncTestCase from testtools.matchers import ( Equals, IsInstance, HasLength, Contains, ) from allmydata import uri from allmydata.interfaces import SDMF_VERSION, MDMF_VERSION from allmydata.util import base32, consumer, mathutil from allmydata.util.fileutil import abspath_expanduser_unicode from allmydata.util.deferredutil import gatherResults from allmydata.mutable.filenode import MutableFileNode from allmydata.mutable.common import MODE_WRITE, MODE_READ, UnrecoverableFileError from allmydata.mutable.publish import MutableData from allmydata.scripts import debug from ..no_network import GridTestMixin from .util import PublishMixin from .. import common_util as testutil class Version(GridTestMixin, AsyncTestCase, testutil.ShouldFailMixin, \ PublishMixin): def setUp(self): GridTestMixin.setUp(self) self.basedir = self.mktemp() self.set_up_grid() self.c = self.g.clients[0] self.nm = self.c.nodemaker self.data = b"test data" * 100000 # about 900 KiB; MDMF self.small_data = b"test data" * 10 # 90 B; SDMF async def do_upload_mdmf(self, data: Optional[bytes] = None) -> MutableFileNode: if data is None: data = self.data n = await self.nm.create_mutable_file(MutableData(data), version=MDMF_VERSION) self.assertThat(n, IsInstance(MutableFileNode)) self.assertThat(n._protocol_version, Equals(MDMF_VERSION)) self.mdmf_node = n return n async def do_upload_sdmf(self, data: Optional[bytes] = None) -> MutableFileNode: if data is None: data = self.small_data n = await self.nm.create_mutable_file(MutableData(data)) self.assertThat(n, IsInstance(MutableFileNode)) self.assertThat(n._protocol_version, Equals(SDMF_VERSION)) self.sdmf_node = n return n async def do_upload_empty_sdmf(self) -> MutableFileNode: n = await self.nm.create_mutable_file(MutableData(b"")) self.assertThat(n, IsInstance(MutableFileNode)) self.sdmf_zero_length_node = n self.assertThat(n._protocol_version, Equals(SDMF_VERSION)) return n async def do_upload(self) -> MutableFileNode: await self.do_upload_mdmf() return await self.do_upload_sdmf() async def test_debug(self) -> None: n = await self.do_upload_mdmf() fso = debug.FindSharesOptions() storage_index = base32.b2a(n.get_storage_index()) fso.si_s = str(storage_index, "utf-8") # command-line options are unicode on Python 3 fso.nodedirs = [os.path.dirname(abspath_expanduser_unicode(str(storedir))) for (i,ss,storedir) in self.iterate_servers()] # This attribute isn't defined on FindSharesOptions but `find_shares()` # definitely expects it... fso.stdout = StringIO() # type: ignore[attr-defined] debug.find_shares(fso) sharefiles = fso.stdout.getvalue().splitlines() # type: ignore[attr-defined] expected = self.nm.default_encoding_parameters["n"] self.assertThat(sharefiles, HasLength(expected)) # This attribute isn't defined on DebugOptions but `dump_share()` # definitely expects it... do = debug.DumpOptions() do["filename"] = sharefiles[0] do.stdout = StringIO() # type: ignore[attr-defined] debug.dump_share(do) output = do.stdout.getvalue() # type: ignore[attr-defined] lines = set(output.splitlines()) self.assertTrue("Mutable slot found:" in lines, output) self.assertTrue(" share_type: MDMF" in lines, output) self.assertTrue(" num_extra_leases: 0" in lines, output) self.assertTrue(" MDMF contents:" in lines, output) self.assertTrue(" seqnum: 1" in lines, output) self.assertTrue(" required_shares: 3" in lines, output) self.assertTrue(" total_shares: 10" in lines, output) self.assertTrue(" segsize: 131073" in lines, output) self.assertTrue(" datalen: %d" % len(self.data) in lines, output) vcap = str(n.get_verify_cap().to_string(), "utf-8") self.assertTrue(" verify-cap: %s" % vcap in lines, output) cso = debug.CatalogSharesOptions() cso.nodedirs = fso.nodedirs # Definitely not options on CatalogSharesOptions, but the code does use # stdout and stderr... cso.stdout = StringIO() # type: ignore[attr-defined] cso.stderr = StringIO() # type: ignore[attr-defined] debug.catalog_shares(cso) shares = cso.stdout.getvalue().splitlines() # type: ignore[attr-defined] oneshare = shares[0] # all shares should be MDMF self.failIf(oneshare.startswith("UNKNOWN"), oneshare) self.assertTrue(oneshare.startswith("MDMF"), oneshare) fields = oneshare.split() self.assertThat(fields[0], Equals("MDMF")) self.assertThat(fields[1].encode("ascii"), Equals(storage_index)) self.assertThat(fields[2], Equals("3/10")) self.assertThat(fields[3], Equals("%d" % len(self.data))) self.assertTrue(fields[4].startswith("#1:"), fields[3]) # the rest of fields[4] is the roothash, which depends upon # encryption salts and is not constant. fields[5] is the # remaining time on the longest lease, which is timing dependent. # The rest of the line is the quoted pathname to the share. async def test_get_sequence_number(self) -> None: await self.do_upload() bv = await self.mdmf_node.get_best_readable_version() self.assertThat(bv.get_sequence_number(), Equals(1)) bv = await self.sdmf_node.get_best_readable_version() self.assertThat(bv.get_sequence_number(), Equals(1)) # Now update. The sequence number in both cases should be 1 in # both cases. new_data = MutableData(b"foo bar baz" * 100000) new_small_data = MutableData(b"foo bar baz" * 10) d1 = self.mdmf_node.overwrite(new_data) d2 = self.sdmf_node.overwrite(new_small_data) await gatherResults([d1, d2]) bv = await self.mdmf_node.get_best_readable_version() self.assertThat(bv.get_sequence_number(), Equals(2)) bv = await self.sdmf_node.get_best_readable_version() self.assertThat(bv.get_sequence_number(), Equals(2)) async def test_cap_after_upload(self) -> None: # If we create a new mutable file and upload things to it, and # it's an MDMF file, we should get an MDMF cap back from that # file and should be able to use that. # That's essentially what MDMF node is, so just check that. await self.do_upload_mdmf() mdmf_uri = self.mdmf_node.get_uri() cap = uri.from_string(mdmf_uri) self.assertTrue(isinstance(cap, uri.WriteableMDMFFileURI)) readonly_mdmf_uri = self.mdmf_node.get_readonly_uri() cap = uri.from_string(readonly_mdmf_uri) self.assertTrue(isinstance(cap, uri.ReadonlyMDMFFileURI)) async def test_mutable_version(self) -> None: # assert that getting parameters from the IMutableVersion object # gives us the same data as getting them from the filenode itself await self.do_upload() bv = await self.mdmf_node.get_best_mutable_version() n = self.mdmf_node self.assertThat(bv.get_writekey(), Equals(n.get_writekey())) self.assertThat(bv.get_storage_index(), Equals(n.get_storage_index())) self.assertFalse(bv.is_readonly()) bv = await self.sdmf_node.get_best_mutable_version() n = self.sdmf_node self.assertThat(bv.get_writekey(), Equals(n.get_writekey())) self.assertThat(bv.get_storage_index(), Equals(n.get_storage_index())) self.assertFalse(bv.is_readonly()) async def test_get_readonly_version(self) -> None: await self.do_upload() bv = await self.mdmf_node.get_best_readable_version() self.assertTrue(bv.is_readonly()) # Attempting to get a mutable version of a mutable file from a # filenode initialized with a readcap should return a readonly # version of that same node. ro = self.mdmf_node.get_readonly() v = await ro.get_best_mutable_version() self.assertTrue(v.is_readonly()) bv = await self.sdmf_node.get_best_readable_version() self.assertTrue(bv.is_readonly()) ro = self.sdmf_node.get_readonly() v = await ro.get_best_mutable_version() self.assertTrue(v.is_readonly()) async def test_toplevel_overwrite(self) -> None: new_data = MutableData(b"foo bar baz" * 100000) new_small_data = MutableData(b"foo bar baz" * 10) await self.do_upload() await self.mdmf_node.overwrite(new_data) data = await self.mdmf_node.download_best_version() self.assertThat(data, Equals(b"foo bar baz" * 100000)) await self.sdmf_node.overwrite(new_small_data) data = await self.sdmf_node.download_best_version() self.assertThat(data, Equals(b"foo bar baz" * 10)) async def test_toplevel_modify(self) -> None: await self.do_upload() def modifier(old_contents, servermap, first_time): return old_contents + b"modified" await self.mdmf_node.modify(modifier) data = await self.mdmf_node.download_best_version() self.assertThat(data, Contains(b"modified")) await self.sdmf_node.modify(modifier) data = await self.sdmf_node.download_best_version() self.assertThat(data, Contains(b"modified")) async def test_version_modify(self) -> None: # TODO: When we can publish multiple versions, alter this test # to modify a version other than the best usable version, then # test to see that the best recoverable version is that. await self.do_upload() def modifier(old_contents, servermap, first_time): return old_contents + b"modified" await self.mdmf_node.modify(modifier) data = await self.mdmf_node.download_best_version() self.assertThat(data, Contains(b"modified")) await self.sdmf_node.modify(modifier) data = await self.sdmf_node.download_best_version() self.assertThat(data, Contains(b"modified")) async def test_download_version(self) -> None: await self.publish_multiple() # We want to have two recoverable versions on the grid. self._set_versions({0:0,2:0,4:0,6:0,8:0, 1:1,3:1,5:1,7:1,9:1}) # Now try to download each version. We should get the plaintext # associated with that version. smap = await self._fn.get_servermap(mode=MODE_READ) versions = smap.recoverable_versions() assert len(versions) == 2 self.servermap = smap self.version1, self.version2 = versions assert self.version1 != self.version2 self.version1_seqnum = self.version1[0] self.version2_seqnum = self.version2[0] self.version1_index = self.version1_seqnum - 1 self.version2_index = self.version2_seqnum - 1 results = await self._fn.download_version(self.servermap, self.version1) self.assertThat(self.CONTENTS[self.version1_index], Equals(results)) results = await self._fn.download_version(self.servermap, self.version2) self.assertThat(self.CONTENTS[self.version2_index], Equals(results)) async def test_download_nonexistent_version(self) -> None: await self.do_upload_mdmf() servermap = await self.mdmf_node.get_servermap(mode=MODE_WRITE) await self.shouldFail(UnrecoverableFileError, "nonexistent version", None, self.mdmf_node.download_version, servermap, "not a version") async def _test_partial_read(self, node, expected, modes, step) -> None: version = await node.get_best_readable_version() for (name, offset, length) in modes: await self._do_partial_read(version, name, expected, offset, length) # then read the whole thing, but only a few bytes at a time, and see # that the results are what we expect. c = consumer.MemoryConsumer() for i in range(0, len(expected), step): await version.read(c, i, step) self.assertThat(expected, Equals(b"".join(c.chunks))) async def _do_partial_read(self, version, name, expected, offset, length) -> None: c = consumer.MemoryConsumer() await version.read(c, offset, length) if length is None: expected_range = expected[offset:] else: expected_range = expected[offset:offset+length] results = b"".join(c.chunks) if results != expected_range: print("read([%d]+%s) got %d bytes, not %d" % \ (offset, length, len(results), len(expected_range))) print("got: %r ... %r" % (results[:20], results[-20:])) print("exp: %r ... %r" % (expected_range[:20], expected_range[-20:])) self.fail("results[%s] != expected_range" % name) async def test_partial_read_mdmf_0(self) -> None: data = b"" result = await self.do_upload_mdmf(data=data) modes = [("all1", 0,0), ("all2", 0,None), ] await self._test_partial_read(result, data, modes, 1) async def test_partial_read_mdmf_large(self) -> None: segment_boundary = mathutil.next_multiple(128 * 1024, 3) modes = [("start_on_segment_boundary", segment_boundary, 50), ("ending_one_byte_after_segment_boundary", segment_boundary-50, 51), ("zero_length_at_start", 0, 0), ("zero_length_in_middle", 50, 0), ("zero_length_at_segment_boundary", segment_boundary, 0), ("complete_file1", 0, len(self.data)), ("complete_file2", 0, None), ] result = await self.do_upload_mdmf() await self._test_partial_read(result, self.data, modes, 10000) async def test_partial_read_sdmf_0(self) -> None: data = b"" modes = [("all1", 0,0), ("all2", 0,None), ] result = await self.do_upload_sdmf(data=data) await self._test_partial_read(result, data, modes, 1) async def test_partial_read_sdmf_2(self) -> None: data = b"hi" modes = [("one_byte", 0, 1), ("last_byte", 1, 1), ("last_byte2", 1, None), ("complete_file", 0, 2), ("complete_file2", 0, None), ] result = await self.do_upload_sdmf(data=data) await self._test_partial_read(result, data, modes, 1) async def test_partial_read_sdmf_90(self) -> None: modes = [("start_at_middle", 50, 40), ("start_at_middle2", 50, None), ("zero_length_at_start", 0, 0), ("zero_length_in_middle", 50, 0), ("zero_length_at_end", 90, 0), ("complete_file1", 0, None), ("complete_file2", 0, 90), ] result = await self.do_upload_sdmf() await self._test_partial_read(result, self.small_data, modes, 10) async def test_partial_read_sdmf_100(self) -> None: data = b"test data "*10 modes = [("start_at_middle", 50, 50), ("start_at_middle2", 50, None), ("zero_length_at_start", 0, 0), ("zero_length_in_middle", 50, 0), ("complete_file1", 0, 100), ("complete_file2", 0, None), ] result = await self.do_upload_sdmf(data=data) await self._test_partial_read(result, data, modes, 10) async def _test_read_and_download(self, node, expected) -> None: version = await node.get_best_readable_version() c = consumer.MemoryConsumer() await version.read(c) self.assertThat(expected, Equals(b"".join(c.chunks))) c2 = consumer.MemoryConsumer() await version.read(c2, offset=0, size=len(expected)) self.assertThat(expected, Equals(b"".join(c2.chunks))) data = await node.download_best_version() self.assertThat(expected, Equals(data)) async def test_read_and_download_mdmf(self) -> None: result = await self.do_upload_mdmf() await self._test_read_and_download(result, self.data) async def test_read_and_download_sdmf(self) -> None: result = await self.do_upload_sdmf() await self._test_read_and_download(result, self.small_data) async def test_read_and_download_sdmf_zero_length(self) -> None: result = await self.do_upload_empty_sdmf() await self._test_read_and_download(result, b"") tahoe_lafs-1.20.0/src/allmydata/test/mutable/util.py0000644000000000000000000004051413615410400017321 0ustar00""" Ported to Python 3. """ from future.utils import bchr from io import BytesIO import attr from twisted.internet import defer, reactor from foolscap.api import eventually, fireEventually from allmydata import client from allmydata.nodemaker import NodeMaker from allmydata.interfaces import SDMF_VERSION, MDMF_VERSION from allmydata.util import base32 from allmydata.util.hashutil import tagged_hash from allmydata.storage_client import StorageFarmBroker from allmydata.mutable.layout import MDMFSlotReadProxy from allmydata.mutable.publish import MutableData from ..common import ( EMPTY_CLIENT_CONFIG, ) def eventuaaaaaly(res=None): d = fireEventually(res) d.addCallback(fireEventually) d.addCallback(fireEventually) return d # this "FakeStorage" exists to put the share data in RAM and avoid using real # network connections, both to speed up the tests and to reduce the amount of # non-mutable.py code being exercised. class FakeStorage(object): # this class replaces the collection of storage servers, allowing the # tests to examine and manipulate the published shares. It also lets us # control the order in which read queries are answered, to exercise more # of the error-handling code in Retrieve . # # Note that we ignore the storage index: this FakeStorage instance can # only be used for a single storage index. def __init__(self): self._peers = {} # _sequence is used to cause the responses to occur in a specific # order. If it is in use, then we will defer queries instead of # answering them right away, accumulating the Deferreds in a dict. We # don't know exactly how many queries we'll get, so exactly one # second after the first query arrives, we will release them all (in # order). self._sequence = None self._pending = {} self._pending_timer = None def read(self, peerid, storage_index): shares = self._peers.get(peerid, {}) if self._sequence is None: return eventuaaaaaly(shares) d = defer.Deferred() if not self._pending: self._pending_timer = reactor.callLater(1.0, self._fire_readers) if peerid not in self._pending: self._pending[peerid] = [] self._pending[peerid].append( (d, shares) ) return d def _fire_readers(self): self._pending_timer = None pending = self._pending self._pending = {} for peerid in self._sequence: if peerid in pending: for (d, shares) in pending.pop(peerid): eventually(d.callback, shares) for peerid in pending: for (d, shares) in pending[peerid]: eventually(d.callback, shares) def write(self, peerid, storage_index, shnum, offset, data): if peerid not in self._peers: self._peers[peerid] = {} shares = self._peers[peerid] f = BytesIO() f.write(shares.get(shnum, b"")) f.seek(offset) f.write(data) shares[shnum] = f.getvalue() # This doesn't actually implement the whole interface, but adding a commented # interface implementation annotation for grepping purposes. #@implementer(RIStorageServer) class FakeStorageServer(object): """ A fake Foolscap remote object, implemented by overriding callRemote() to call local methods. """ def __init__(self, peerid, storage): self.peerid = peerid self.storage = storage self.queries = 0 def callRemote(self, methname, *args, **kwargs): self.queries += 1 def _call(): meth = getattr(self, methname) return meth(*args, **kwargs) d = fireEventually() d.addCallback(lambda res: _call()) return d def callRemoteOnly(self, methname, *args, **kwargs): self.queries += 1 d = self.callRemote(methname, *args, **kwargs) d.addBoth(lambda ignore: None) pass def advise_corrupt_share(self, share_type, storage_index, shnum, reason): pass def slot_readv(self, storage_index, shnums, readv): d = self.storage.read(self.peerid, storage_index) def _read(shares): response = {} for shnum in shares: if shnums and shnum not in shnums: continue vector = response[shnum] = [] for (offset, length) in readv: assert isinstance(offset, int), offset assert isinstance(length, int), length vector.append(shares[shnum][offset:offset+length]) return response d.addCallback(_read) return d def slot_testv_and_readv_and_writev(self, storage_index, secrets, tw_vectors, read_vector): # always-pass: parrot the test vectors back to them. readv = {} for shnum, (testv, writev, new_length) in list(tw_vectors.items()): for (offset, length, op, specimen) in testv: assert op == b"eq" # TODO: this isn't right, the read is controlled by read_vector, # not by testv readv[shnum] = [ specimen for (offset, length, op, specimen) in testv ] for (offset, data) in writev: self.storage.write(self.peerid, storage_index, shnum, offset, data) answer = (True, readv) return fireEventually(answer) def flip_bit(original, byte_offset): return (original[:byte_offset] + bchr(ord(original[byte_offset:byte_offset+1]) ^ 0x01) + original[byte_offset+1:]) def add_two(original, byte_offset): # It isn't enough to simply flip the bit for the version number, # because 1 is a valid version number. So we add two instead. return (original[:byte_offset] + bchr(ord(original[byte_offset:byte_offset+1]) ^ 0x02) + original[byte_offset+1:]) def corrupt(res, s, offset, shnums_to_corrupt=None, offset_offset=0): # if shnums_to_corrupt is None, corrupt all shares. Otherwise it is a # list of shnums to corrupt. ds = [] for peerid in s._peers: shares = s._peers[peerid] for shnum in shares: if (shnums_to_corrupt is not None and shnum not in shnums_to_corrupt): continue data = shares[shnum] # We're feeding the reader all of the share data, so it # won't need to use the rref that we didn't provide, nor the # storage index that we didn't provide. We do this because # the reader will work for both MDMF and SDMF. reader = MDMFSlotReadProxy(None, None, shnum, data) # We need to get the offsets for the next part. d = reader.get_verinfo() def _do_corruption(verinfo, data, shnum, shares): (seqnum, root_hash, IV, segsize, datalen, k, n, prefix, o) = verinfo if isinstance(offset, tuple): offset1, offset2 = offset else: offset1 = offset offset2 = 0 if offset1 == "pubkey" and IV: real_offset = 107 elif offset1 in o: real_offset = o[offset1] else: real_offset = offset1 real_offset = int(real_offset) + offset2 + offset_offset assert isinstance(real_offset, int), offset if offset1 == 0: # verbyte f = add_two else: f = flip_bit shares[shnum] = f(data, real_offset) d.addCallback(_do_corruption, data, shnum, shares) ds.append(d) dl = defer.DeferredList(ds) dl.addCallback(lambda ignored: res) return dl @attr.s class Peer(object): peerid = attr.ib() storage_server = attr.ib() announcement = attr.ib() def make_peer(s, i): """ Create a "peer" suitable for use with ``make_storagebroker_with_peers`` or ``make_nodemaker_with_peers``. :param IServer s: The server with which to associate the peers. :param int i: A unique identifier for this peer within the whole group of peers to be used. For example, a sequence number. This is used to generate a unique peer id. :rtype: ``Peer`` """ peerid = base32.b2a(tagged_hash(b"peerid", b"%d" % i)[:20]) fss = FakeStorageServer(peerid, s) ann = { "anonymous-storage-FURL": "pb://%s@nowhere/fake" % (str(peerid, "utf-8"),), "permutation-seed-base32": peerid, } return Peer(peerid=peerid, storage_server=fss, announcement=ann) def make_storagebroker(s=None, num_peers=10): """ Make a ``StorageFarmBroker`` connected to some number of fake storage servers. :param IServer s: The server with which to associate the fake storage servers. :param int num_peers: The number of fake storage servers to associate with the broker. """ if not s: s = FakeStorage() peers = [] for peer_num in range(num_peers): peers.append(make_peer(s, peer_num)) return make_storagebroker_with_peers(peers) def make_storagebroker_with_peers(peers): """ Make a ``StorageFarmBroker`` connected to the given storage servers. :param list peers: The storage servers to associate with the storage broker. """ storage_broker = StorageFarmBroker(True, None, EMPTY_CLIENT_CONFIG) for peer in peers: storage_broker.test_add_rref( peer.peerid, peer.storage_server, peer.announcement, ) return storage_broker def make_nodemaker(s=None, num_peers=10): """ Make a ``NodeMaker`` connected to some number of fake storage servers. :param IServer s: The server with which to associate the fake storage servers. :param int num_peers: The number of fake storage servers to associate with the node maker. """ storage_broker = make_storagebroker(s, num_peers) return make_nodemaker_with_storage_broker(storage_broker) def make_nodemaker_with_peers(peers): """ Make a ``NodeMaker`` connected to the given storage servers. :param list peers: The storage servers to associate with the node maker. """ storage_broker = make_storagebroker_with_peers(peers) return make_nodemaker_with_storage_broker(storage_broker) def make_nodemaker_with_storage_broker(storage_broker): """ Make a ``NodeMaker`` using the given storage broker. :param StorageFarmBroker peers: The storage broker to use. """ sh = client.SecretHolder(b"lease secret", b"convergence secret") keygen = client.KeyGenerator() nodemaker = NodeMaker(storage_broker, sh, None, None, None, {"k": 3, "n": 10}, SDMF_VERSION, keygen) return nodemaker class PublishMixin(object): def publish_one(self): # publish a file and create shares, which can then be manipulated # later. self.CONTENTS = b"New contents go here" * 1000 self.uploadable = MutableData(self.CONTENTS) self._storage = FakeStorage() self._nodemaker = make_nodemaker(self._storage) self._storage_broker = self._nodemaker.storage_broker d = self._nodemaker.create_mutable_file(self.uploadable) def _created(node): self._fn = node self._fn2 = self._nodemaker.create_from_cap(node.get_uri()) d.addCallback(_created) return d def publish_mdmf(self, data=None): # like publish_one, except that the result is guaranteed to be # an MDMF file. # self.CONTENTS should have more than one segment. if data is None: data = b"This is an MDMF file" * 100000 self.CONTENTS = data self.uploadable = MutableData(self.CONTENTS) self._storage = FakeStorage() self._nodemaker = make_nodemaker(self._storage) self._storage_broker = self._nodemaker.storage_broker d = self._nodemaker.create_mutable_file(self.uploadable, version=MDMF_VERSION) def _created(node): self._fn = node self._fn2 = self._nodemaker.create_from_cap(node.get_uri()) d.addCallback(_created) return d def publish_sdmf(self, data=None): # like publish_one, except that the result is guaranteed to be # an SDMF file if data is None: data = b"This is an SDMF file" * 1000 self.CONTENTS = data self.uploadable = MutableData(self.CONTENTS) self._storage = FakeStorage() self._nodemaker = make_nodemaker(self._storage) self._storage_broker = self._nodemaker.storage_broker d = self._nodemaker.create_mutable_file(self.uploadable, version=SDMF_VERSION) def _created(node): self._fn = node self._fn2 = self._nodemaker.create_from_cap(node.get_uri()) d.addCallback(_created) return d def publish_multiple(self, version=0): self.CONTENTS = [b"Contents 0", b"Contents 1", b"Contents 2", b"Contents 3a", b"Contents 3b"] self.uploadables = [MutableData(d) for d in self.CONTENTS] self._copied_shares = {} self._storage = FakeStorage() self._nodemaker = make_nodemaker(self._storage) d = self._nodemaker.create_mutable_file(self.uploadables[0], version=version) # seqnum=1 def _created(node): self._fn = node # now create multiple versions of the same file, and accumulate # their shares, so we can mix and match them later. d = defer.succeed(None) d.addCallback(self._copy_shares, 0) d.addCallback(lambda res: node.overwrite(self.uploadables[1])) #s2 d.addCallback(self._copy_shares, 1) d.addCallback(lambda res: node.overwrite(self.uploadables[2])) #s3 d.addCallback(self._copy_shares, 2) d.addCallback(lambda res: node.overwrite(self.uploadables[3])) #s4a d.addCallback(self._copy_shares, 3) # now we replace all the shares with version s3, and upload a new # version to get s4b. rollback = dict([(i,2) for i in range(10)]) d.addCallback(lambda res: self._set_versions(rollback)) d.addCallback(lambda res: node.overwrite(self.uploadables[4])) #s4b d.addCallback(self._copy_shares, 4) # we leave the storage in state 4 return d d.addCallback(_created) return d def _copy_shares(self, ignored, index): shares = self._storage._peers # we need a deep copy new_shares = {} for peerid in shares: new_shares[peerid] = {} for shnum in shares[peerid]: new_shares[peerid][shnum] = shares[peerid][shnum] self._copied_shares[index] = new_shares def _set_versions(self, versionmap): # versionmap maps shnums to which version (0,1,2,3,4) we want the # share to be at. Any shnum which is left out of the map will stay at # its current version. shares = self._storage._peers oldshares = self._copied_shares for peerid in shares: for shnum in shares[peerid]: if shnum in versionmap: index = versionmap[shnum] shares[peerid][shnum] = oldshares[index][peerid][shnum] class CheckerMixin(object): def check_good(self, r, where): self.failUnless(r.is_healthy(), where) return r def check_bad(self, r, where): self.failIf(r.is_healthy(), where) return r def check_expected_failure(self, r, expected_exception, substring, where): for (peerid, storage_index, shnum, f) in r.get_share_problems(): if f.check(expected_exception): self.failUnless(substring in str(f), "%s: substring '%s' not in '%s'" % (where, substring, str(f))) return self.fail("%s: didn't see expected exception %s in problems %s" % (where, expected_exception, r.get_share_problems())) tahoe_lafs-1.20.0/src/allmydata/test/plugins/tahoe_lafs_dropin.py0000644000000000000000000000043013615410400022045 0ustar00from allmydata.test.common import ( AdoptedServerPort, ) from allmydata.test.storage_plugin import ( DummyStorage, ) adoptedEndpointParser = AdoptedServerPort() dummyStoragev1 = DummyStorage(u"tahoe-lafs-dummy-v1") dummyStoragev2 = DummyStorage(u"tahoe-lafs-dummy-v2") tahoe_lafs-1.20.0/src/allmydata/test/web/__init__.py0000644000000000000000000000000013615410400017211 0ustar00tahoe_lafs-1.20.0/src/allmydata/test/web/common.py0000644000000000000000000000564613615410400016767 0ustar00""" Ported to Python 3. """ import re unknown_rwcap = u"lafs://from_the_future_rw_\u263A".encode('utf-8') unknown_rocap = u"ro.lafs://readonly_from_the_future_ro_\u263A".encode('utf-8') unknown_immcap = u"imm.lafs://immutable_from_the_future_imm_\u263A".encode('utf-8') def assert_soup_has_favicon(testcase, soup): """ Using a ``TestCase`` object ``testcase``, assert that the passed in ``BeautifulSoup`` object ``soup`` contains the tahoe favicon link. """ links = soup.find_all(u'link', rel=u'shortcut icon') testcase.assertTrue( any(t[u'href'] == u'/icon.png' for t in links), soup) def assert_soup_has_tag_with_attributes(testcase, soup, tag_name, attrs): """ Using a ``TestCase`` object ``testcase``, assert that the passed in ``BeatufulSoup`` object ``soup`` contains a tag ``tag_name`` (unicode) which has all the attributes in ``attrs`` (dict). """ tags = soup.find_all(tag_name) for tag in tags: if all(v in tag.attrs.get(k, []) for k, v in attrs.items()): # we found every attr in this tag; done return tag testcase.fail( u"No <{}> tags contain attributes: {}".format(tag_name, attrs) ) def assert_soup_has_tag_with_attributes_and_content(testcase, soup, tag_name, content, attrs): """ Using a ``TestCase`` object ``testcase``, assert that the passed in ``BeatufulSoup`` object ``soup`` contains a tag ``tag_name`` (unicode) which has all the attributes in ``attrs`` (dict) and contains the string ``content`` (unicode). """ assert_soup_has_tag_with_attributes(testcase, soup, tag_name, attrs) assert_soup_has_tag_with_content(testcase, soup, tag_name, content) def _normalized_contents(tag): """ :returns: all the text contents of the tag with whitespace normalized: all newlines removed and at most one space between words. """ return u" ".join(tag.text.split()) def assert_soup_has_tag_with_content(testcase, soup, tag_name, content): """ Using a ``TestCase`` object ``testcase``, assert that the passed in ``BeatufulSoup`` object ``soup`` contains a tag ``tag_name`` (unicode) which contains the string ``content`` (unicode). """ tags = soup.find_all(tag_name) for tag in tags: if content in tag.contents: return # make these "fuzzy" options? for c in tag.contents: if content in c: return if content in _normalized_contents(tag): return testcase.fail( u"No <{}> tag contains the text '{}'".format(tag_name, content) ) def assert_soup_has_text(testcase, soup, text): """ Using a ``TestCase`` object ``testcase``, assert that the passed in ``BeautifulSoup`` object ``soup`` contains the passed in ``text`` anywhere as a text node. """ testcase.assertTrue( soup.find_all(string=re.compile(re.escape(text))), soup) tahoe_lafs-1.20.0/src/allmydata/test/web/matchers.py0000644000000000000000000000140513615410400017272 0ustar00""" Ported to Python 3. """ import attr from testtools.matchers import Mismatch @attr.s class _HasResponseCode(object): match_expected_code = attr.ib() def match(self, response): actual_code = response.code mismatch = self.match_expected_code.match(actual_code) if mismatch is None: return None return Mismatch( u"Response {} code: {}".format( response, mismatch.describe(), ), mismatch.get_details(), ) def has_response_code(match_expected_code): """ Match a Treq response with the given code. :param int expected_code: The HTTP response code expected of the response. """ return _HasResponseCode(match_expected_code) tahoe_lafs-1.20.0/src/allmydata/test/web/test_common.py0000644000000000000000000001560013615410400020015 0ustar00""" Tests for ``allmydata.web.common``. Ported to Python 3. """ import gc from bs4 import ( BeautifulSoup, ) from hyperlink import ( DecodedURL, ) from testtools.matchers import ( Equals, Contains, MatchesPredicate, AfterPreprocessing, ) from testtools.twistedsupport import ( failed, succeeded, has_no_result, ) from twisted.python.failure import ( Failure, ) from twisted.internet.error import ( ConnectionDone, ) from twisted.internet.defer import ( Deferred, fail, ) from twisted.web.server import ( NOT_DONE_YET, ) from twisted.web.resource import ( Resource, ) from ...web.common import ( render_exception, ) from ..common import ( SyncTestCase, ) from ..common_web import ( render, ) from .common import ( assert_soup_has_tag_with_attributes, ) class StaticResource(Resource, object): """ ``StaticResource`` is a resource that returns whatever Python object it is given from its render method. This is useful for testing ``render_exception``\\ 's handling of different render results. """ def __init__(self, response): Resource.__init__(self) self._response = response self._request = None @render_exception def render(self, request): self._request = request return self._response class RenderExceptionTests(SyncTestCase): """ Tests for ``render_exception`` (including the private helper ``_finish``). """ def test_exception(self): """ If the decorated method raises an exception then the exception is rendered into the response. """ class R(Resource): @render_exception def render(self, request): raise Exception("synthetic exception") self.assertThat( render(R(), {}), succeeded( Contains(b"synthetic exception"), ), ) def test_failure(self): """ If the decorated method returns a ``Deferred`` that fires with a ``Failure`` then the exception the ``Failure`` wraps is rendered into the response. """ resource = StaticResource(fail(Exception("synthetic exception"))) self.assertThat( render(resource, {}), succeeded( Contains(b"synthetic exception"), ), ) def test_resource(self): """ If the decorated method returns an ``IResource`` provider then that resource is used to render the response. """ resource = StaticResource(StaticResource(b"static result")) self.assertThat( render(resource, {}), succeeded( Equals(b"static result"), ), ) def test_unicode(self): """ If the decorated method returns a ``unicode`` string then that string is UTF-8 encoded and rendered into the response. """ text = u"\N{SNOWMAN}" resource = StaticResource(text) self.assertThat( render(resource, {}), succeeded( Equals(text.encode("utf-8")), ), ) def test_bytes(self): """ If the decorated method returns a ``bytes`` string then that string is rendered into the response. """ data = b"hello world" resource = StaticResource(data) self.assertThat( render(resource, {}), succeeded( Equals(data), ), ) def test_decodedurl(self): """ If the decorated method returns a ``DecodedURL`` then a redirect to that location is rendered into the response. """ loc = u"http://example.invalid/foo?bar=baz" resource = StaticResource(DecodedURL.from_text(loc)) self.assertThat( render(resource, {}), succeeded( MatchesPredicate( lambda value: assert_soup_has_tag_with_attributes( self, BeautifulSoup(value, 'html5lib'), "meta", {"http-equiv": "refresh", "content": "0;URL={}".format(loc), }, ) # The assertion will raise if it has a problem, otherwise # return None. Turn the None into something # MatchesPredicate recognizes as success. or True, "did not find meta refresh tag in %r", ), ), ) def test_none(self): """ If the decorated method returns ``None`` then the response is finished with no additional content. """ self.assertThat( render(StaticResource(None), {}), succeeded( Equals(b""), ), ) def test_not_done_yet(self): """ If the decorated method returns ``NOT_DONE_YET`` then the resource is responsible for finishing the request itself. """ the_request = [] class R(Resource): @render_exception def render(self, request): the_request.append(request) return NOT_DONE_YET d = render(R(), {}) self.assertThat( d, has_no_result(), ) the_request[0].write(b"some content") the_request[0].finish() self.assertThat( d, succeeded( Equals(b"some content"), ), ) def test_unknown(self): """ If the decorated method returns something which is not explicitly supported, an internal server error is rendered into the response. """ self.assertThat( render(StaticResource(object()), {}), succeeded( Equals(b"Internal Server Error"), ), ) def test_disconnected(self): """ If the transport is disconnected before the response is available, no ``RuntimeError`` is logged for finishing a disconnected request. """ result = Deferred() resource = StaticResource(result) d = render(resource, {}) resource._request.connectionLost(Failure(ConnectionDone())) result.callback(b"Some result") self.assertThat( d, failed( AfterPreprocessing( lambda reason: reason.type, Equals(ConnectionDone), ), ), ) # Since we're not a trial TestCase we don't have flushLoggedErrors. # The next best thing is to make sure any dangling Deferreds have been # garbage collected and then let the generic trial logic for failing # tests with logged errors kick in. gc.collect() tahoe_lafs-1.20.0/src/allmydata/test/web/test_grid.py0000644000000000000000000020215513615410400017455 0ustar00""" Ported to Python 3. """ import os.path, re from urllib.parse import quote as url_quote import json from io import StringIO from bs4 import BeautifulSoup from twisted.web import resource from allmydata import uri, dirnode from allmydata.util import base32 from allmydata.util.encodingutil import to_bytes from allmydata.util.consumer import download_to_data from allmydata.util.netstring import split_netstring from allmydata.unknown import UnknownNode from allmydata.storage.shares import get_share_file from allmydata.scripts.debug import CorruptShareOptions, corrupt_share from allmydata.immutable import upload from allmydata.mutable import publish from ...web.common import ( render_exception, ) from .. import common_util as testutil from ..common import WebErrorMixin, ShouldFailMixin from ..no_network import GridTestMixin from .common import ( assert_soup_has_favicon, unknown_immcap, unknown_rocap, unknown_rwcap, ) from ..common import ( AsyncTestCase, ) from testtools.matchers import ( Equals, Contains, Not, HasLength, EndsWith, ) from testtools.twistedsupport import flush_logged_errors DIR_HTML_TAG = '' class CompletelyUnhandledError(Exception): pass class ErrorBoom(resource.Resource, object): @render_exception def render(self, req): raise CompletelyUnhandledError("whoops") class Grid(GridTestMixin, WebErrorMixin, ShouldFailMixin, testutil.ReallyEqualMixin, AsyncTestCase): def CHECK(self, ign, which, args, clientnum=0): fileurl = self.fileurls[which] url = fileurl + "?" + args return self.GET_unicode(url, method="POST", clientnum=clientnum) def GET_unicode(self, *args, **kwargs): """Send an HTTP request, but convert result to Unicode string.""" d = GridTestMixin.GET(self, *args, **kwargs) d.addCallback(str, "utf-8") return d def test_filecheck(self): self.basedir = "web/Grid/filecheck" self.set_up_grid() c0 = self.g.clients[0] self.uris = {} DATA = b"data" * 100 d = c0.upload(upload.Data(DATA, convergence=b"")) def _stash_uri(ur, which): self.uris[which] = ur.get_uri() d.addCallback(_stash_uri, "good") d.addCallback(lambda ign: c0.upload(upload.Data(DATA+b"1", convergence=b""))) d.addCallback(_stash_uri, "sick") d.addCallback(lambda ign: c0.upload(upload.Data(DATA+b"2", convergence=b""))) d.addCallback(_stash_uri, "dead") def _stash_mutable_uri(n, which): self.uris[which] = n.get_uri() assert isinstance(self.uris[which], bytes) d.addCallback(lambda ign: c0.create_mutable_file(publish.MutableData(DATA+b"3"))) d.addCallback(_stash_mutable_uri, "corrupt") d.addCallback(lambda ign: c0.upload(upload.Data(b"literal", convergence=b""))) d.addCallback(_stash_uri, "small") d.addCallback(lambda ign: c0.create_immutable_dirnode({})) d.addCallback(_stash_mutable_uri, "smalldir") def _compute_fileurls(ignored): self.fileurls = {} for which in self.uris: self.fileurls[which] = "uri/" + url_quote(self.uris[which]) d.addCallback(_compute_fileurls) def _clobber_shares(ignored): good_shares = self.find_uri_shares(self.uris["good"]) self.failUnlessReallyEqual(len(good_shares), 10) sick_shares = self.find_uri_shares(self.uris["sick"]) os.unlink(sick_shares[0][2]) dead_shares = self.find_uri_shares(self.uris["dead"]) for i in range(1, 10): os.unlink(dead_shares[i][2]) c_shares = self.find_uri_shares(self.uris["corrupt"]) cso = CorruptShareOptions() cso.stdout = StringIO() cso.parseOptions([c_shares[0][2]]) corrupt_share(cso) d.addCallback(_clobber_shares) d.addCallback(self.CHECK, "good", "t=check") def _got_html_good(res): self.assertThat(res, Contains("Healthy")) self.assertThat(res, Not(Contains("Not Healthy", ))) soup = BeautifulSoup(res, 'html5lib') assert_soup_has_favicon(self, soup) d.addCallback(_got_html_good) d.addCallback(self.CHECK, "good", "t=check&return_to=somewhere") def _got_html_good_return_to(res): self.assertThat(res, Contains("Healthy")) self.assertThat(res, Not(Contains("Not Healthy"))) self.assertThat(res, Contains('Return to file')) d.addCallback(_got_html_good_return_to) d.addCallback(self.CHECK, "good", "t=check&output=json") def _got_json_good(res): r = json.loads(res) self.failUnlessEqual(r["summary"], "Healthy") self.failUnless(r["results"]["healthy"]) self.assertThat(r["results"], Not(Contains("needs-rebalancing",))) self.failUnless(r["results"]["recoverable"]) d.addCallback(_got_json_good) d.addCallback(self.CHECK, "small", "t=check") def _got_html_small(res): self.assertThat(res, Contains("Literal files are always healthy")) self.assertThat(res, Not(Contains("Not Healthy"))) d.addCallback(_got_html_small) d.addCallback(self.CHECK, "small", "t=check&return_to=somewhere") def _got_html_small_return_to(res): self.assertThat(res, Contains("Literal files are always healthy")) self.assertThat(res, Not(Contains("Not Healthy"))) self.assertThat(res, Contains('Return to file')) d.addCallback(_got_html_small_return_to) d.addCallback(self.CHECK, "small", "t=check&output=json") def _got_json_small(res): r = json.loads(res) self.failUnlessEqual(r["storage-index"], "") self.failUnless(r["results"]["healthy"]) d.addCallback(_got_json_small) d.addCallback(self.CHECK, "smalldir", "t=check") def _got_html_smalldir(res): self.assertThat(res, Contains("Literal files are always healthy")) self.assertThat(res, Not(Contains("Not Healthy"))) d.addCallback(_got_html_smalldir) d.addCallback(self.CHECK, "smalldir", "t=check&output=json") def _got_json_smalldir(res): r = json.loads(res) self.failUnlessEqual(r["storage-index"], "") self.failUnless(r["results"]["healthy"]) d.addCallback(_got_json_smalldir) d.addCallback(self.CHECK, "sick", "t=check") def _got_html_sick(res): self.assertThat(res, Contains("Not Healthy")) d.addCallback(_got_html_sick) d.addCallback(self.CHECK, "sick", "t=check&output=json") def _got_json_sick(res): r = json.loads(res) self.failUnlessEqual(r["summary"], "Not Healthy: 9 shares (enc 3-of-10)") self.assertThat(r["results"]["healthy"], Equals(False)) self.failUnless(r["results"]["recoverable"]) self.assertThat(r["results"], Not(Contains("needs-rebalancing"))) d.addCallback(_got_json_sick) d.addCallback(self.CHECK, "dead", "t=check") def _got_html_dead(res): self.assertThat(res, Contains("Not Healthy")) d.addCallback(_got_html_dead) d.addCallback(self.CHECK, "dead", "t=check&output=json") def _got_json_dead(res): r = json.loads(res) self.failUnlessEqual(r["summary"], "Not Healthy: 1 shares (enc 3-of-10)") self.assertThat(r["results"]["healthy"], Equals(False)) self.assertThat(r["results"]["recoverable"], Equals(False)) self.assertThat(r["results"], Not(Contains("needs-rebalancing"))) d.addCallback(_got_json_dead) d.addCallback(self.CHECK, "corrupt", "t=check&verify=true") def _got_html_corrupt(res): self.assertThat(res, Contains("Not Healthy! : Unhealthy")) d.addCallback(_got_html_corrupt) d.addCallback(self.CHECK, "corrupt", "t=check&verify=true&output=json") def _got_json_corrupt(res): r = json.loads(res) self.assertThat(r["summary"], Contains("Unhealthy: 9 shares (enc 3-of-10)")) self.assertThat(r["results"]["healthy"], Equals(False)) self.failUnless(r["results"]["recoverable"]) self.assertThat(r["results"], Not(Contains("needs-rebalancing"))) self.failUnlessReallyEqual(r["results"]["count-happiness"], 9) self.failUnlessReallyEqual(r["results"]["count-shares-good"], 9) self.failUnlessReallyEqual(r["results"]["count-corrupt-shares"], 1) d.addCallback(_got_json_corrupt) d.addErrback(self.explain_web_error) return d def test_repair_html(self): self.basedir = "web/Grid/repair_html" self.set_up_grid() c0 = self.g.clients[0] self.uris = {} DATA = b"data" * 100 d = c0.upload(upload.Data(DATA, convergence=b"")) def _stash_uri(ur, which): self.uris[which] = ur.get_uri() d.addCallback(_stash_uri, "good") d.addCallback(lambda ign: c0.upload(upload.Data(DATA+b"1", convergence=b""))) d.addCallback(_stash_uri, "sick") d.addCallback(lambda ign: c0.upload(upload.Data(DATA+b"2", convergence=b""))) d.addCallback(_stash_uri, "dead") def _stash_mutable_uri(n, which): self.uris[which] = n.get_uri() assert isinstance(self.uris[which], bytes) d.addCallback(lambda ign: c0.create_mutable_file(publish.MutableData(DATA+b"3"))) d.addCallback(_stash_mutable_uri, "corrupt") def _compute_fileurls(ignored): self.fileurls = {} for which in self.uris: self.fileurls[which] = "uri/" + url_quote(self.uris[which]) d.addCallback(_compute_fileurls) def _clobber_shares(ignored): good_shares = self.find_uri_shares(self.uris["good"]) self.failUnlessReallyEqual(len(good_shares), 10) sick_shares = self.find_uri_shares(self.uris["sick"]) os.unlink(sick_shares[0][2]) dead_shares = self.find_uri_shares(self.uris["dead"]) for i in range(1, 10): os.unlink(dead_shares[i][2]) c_shares = self.find_uri_shares(self.uris["corrupt"]) cso = CorruptShareOptions() cso.stdout = StringIO() cso.parseOptions([c_shares[0][2]]) corrupt_share(cso) d.addCallback(_clobber_shares) d.addCallback(self.CHECK, "good", "t=check&repair=true") def _got_html_good(res): self.assertThat(res, Contains("Healthy")) self.assertThat(res, Not(Contains("Not Healthy"))) self.assertThat(res, Contains("No repair necessary", )) soup = BeautifulSoup(res, 'html5lib') assert_soup_has_favicon(self, soup) d.addCallback(_got_html_good) d.addCallback(self.CHECK, "sick", "t=check&repair=true") def _got_html_sick(res): self.assertThat(res, Contains("Healthy : healthy")) self.assertThat(res, Not(Contains("Not Healthy"))) self.assertThat(res, Contains("Repair successful")) d.addCallback(_got_html_sick) # repair of a dead file will fail, of course, but it isn't yet # clear how this should be reported. Right now it shows up as # a "410 Gone". # #d.addCallback(self.CHECK, "dead", "t=check&repair=true") #def _got_html_dead(res): # print(res) # self.failUnlessIn("Healthy : healthy", res) # self.failIfIn("Not Healthy", res) # self.failUnlessIn("No repair necessary", res) #d.addCallback(_got_html_dead) d.addCallback(self.CHECK, "corrupt", "t=check&verify=true&repair=true") def _got_html_corrupt(res): self.assertThat(res, Contains("Healthy : Healthy")) self.assertThat(res, Not(Contains("Not Healthy"))) self.assertThat(res, Contains("Repair successful")) d.addCallback(_got_html_corrupt) d.addErrback(self.explain_web_error) return d def test_repair_json(self): self.basedir = "web/Grid/repair_json" self.set_up_grid() c0 = self.g.clients[0] self.uris = {} DATA = b"data" * 100 d = c0.upload(upload.Data(DATA+b"1", convergence=b"")) def _stash_uri(ur, which): self.uris[which] = ur.get_uri() d.addCallback(_stash_uri, "sick") def _compute_fileurls(ignored): self.fileurls = {} for which in self.uris: self.fileurls[which] = "uri/" + url_quote(self.uris[which]) d.addCallback(_compute_fileurls) def _clobber_shares(ignored): sick_shares = self.find_uri_shares(self.uris["sick"]) os.unlink(sick_shares[0][2]) d.addCallback(_clobber_shares) d.addCallback(self.CHECK, "sick", "t=check&repair=true&output=json") def _got_json_sick(res): r = json.loads(res) self.failUnlessReallyEqual(r["repair-attempted"], True) self.failUnlessReallyEqual(r["repair-successful"], True) self.failUnlessEqual(r["pre-repair-results"]["summary"], "Not Healthy: 9 shares (enc 3-of-10)") self.failIf(r["pre-repair-results"]["results"]["healthy"]) self.failUnlessEqual(r["post-repair-results"]["summary"], "healthy") self.failUnless(r["post-repair-results"]["results"]["healthy"]) d.addCallback(_got_json_sick) d.addErrback(self.explain_web_error) return d def test_unknown(self, immutable=False): self.basedir = "web/Grid/unknown" if immutable: self.basedir = "web/Grid/unknown-immutable" self.set_up_grid(oneshare=True) c0 = self.g.clients[0] self.uris = {} self.fileurls = {} # the future cap format may contain slashes, which must be tolerated expected_info_url = "uri/%s?t=info" % url_quote(unknown_rwcap, safe="") if immutable: name = u"future-imm" future_node = UnknownNode(None, unknown_immcap, deep_immutable=True) d = c0.create_immutable_dirnode({name: (future_node, {})}) else: name = u"future" future_node = UnknownNode(unknown_rwcap, unknown_rocap) d = c0.create_dirnode() def _stash_root_and_create_file(n): self.rootnode = n self.rooturl = "uri/" + url_quote(n.get_uri()) self.rourl = "uri/" + url_quote(n.get_readonly_uri()) if not immutable: return self.rootnode.set_node(name, future_node) d.addCallback(_stash_root_and_create_file) # make sure directory listing tolerates unknown nodes d.addCallback(lambda ign: self.GET(self.rooturl)) def _check_directory_html(res, expected_type_suffix): pattern = re.compile(br'\?%s[ \t\n\r]*' b'%s' % ( expected_type_suffix, name.encode("ascii")), re.DOTALL) self.failUnless(re.search(pattern, res), res) # find the More Info link for name, should be relative mo = re.search(br'More Info', res) info_url = mo.group(1) self.failUnlessReallyEqual(info_url, b"%s?t=info" % (name.encode("ascii"),)) if immutable: d.addCallback(_check_directory_html, b"-IMM") else: d.addCallback(_check_directory_html, b"") d.addCallback(lambda ign: self.GET(self.rooturl+"?t=json")) def _check_directory_json(res, expect_rw_uri): data = json.loads(res) self.failUnlessEqual(data[0], "dirnode") f = data[1]["children"][name] self.failUnlessEqual(f[0], "unknown") if expect_rw_uri: self.failUnlessReallyEqual(to_bytes(f[1]["rw_uri"]), unknown_rwcap, data) else: self.assertThat(f[1], Not(Contains("rw_uri"))) if immutable: self.failUnlessReallyEqual(to_bytes(f[1]["ro_uri"]), unknown_immcap, data) else: self.failUnlessReallyEqual(to_bytes(f[1]["ro_uri"]), unknown_rocap, data) self.assertThat(f[1], Contains("metadata")) d.addCallback(_check_directory_json, expect_rw_uri=not immutable) def _check_info(res, expect_rw_uri, expect_ro_uri): if expect_rw_uri: self.assertThat(res, Contains(unknown_rwcap)) if expect_ro_uri: if immutable: self.assertThat(res, Contains(unknown_immcap)) else: self.assertThat(res, Contains(unknown_rocap)) else: self.assertThat(res, Not(Contains(unknown_rocap))) res = str(res, "utf-8") self.assertThat(res, Contains("Object Type: unknown")) self.assertThat(res, Not(Contains("Raw data as"))) self.assertThat(res, Not(Contains("Directory writecap"))) self.assertThat(res, Not(Contains("Checker Operations"))) self.assertThat(res, Not(Contains("Mutable File Operations"))) self.assertThat(res, Not(Contains("Directory Operations"))) # FIXME: these should have expect_rw_uri=not immutable; I don't know # why they fail. Possibly related to ticket #922. d.addCallback(lambda ign: self.GET(expected_info_url)) d.addCallback(_check_info, expect_rw_uri=False, expect_ro_uri=False) d.addCallback(lambda ign: self.GET("%s/%s?t=info" % (self.rooturl, name))) d.addCallback(_check_info, expect_rw_uri=False, expect_ro_uri=True) def _check_json(res, expect_rw_uri): data = json.loads(res) self.failUnlessEqual(data[0], "unknown") if expect_rw_uri: self.failUnlessReallyEqual(to_bytes(data[1]["rw_uri"]), unknown_rwcap, data) else: self.assertThat(data[1], Not(Contains("rw_uri"))) if immutable: self.failUnlessReallyEqual(to_bytes(data[1]["ro_uri"]), unknown_immcap, data) self.failUnlessReallyEqual(data[1]["mutable"], False) elif expect_rw_uri: self.failUnlessReallyEqual(to_bytes(data[1]["ro_uri"]), unknown_rocap, data) self.failUnlessReallyEqual(data[1]["mutable"], True) else: self.failUnlessReallyEqual(to_bytes(data[1]["ro_uri"]), unknown_rocap, data) self.assertThat(data[1], Not(Contains("mutable"))) # TODO: check metadata contents self.assertThat(data[1], Contains("metadata")) d.addCallback(lambda ign: self.GET("%s/%s?t=json" % (self.rooturl, str(name)))) d.addCallback(_check_json, expect_rw_uri=not immutable) # and make sure that a read-only version of the directory can be # rendered too. This version will not have unknown_rwcap, whether # or not future_node was immutable. d.addCallback(lambda ign: self.GET(self.rourl)) if immutable: d.addCallback(_check_directory_html, b"-IMM") else: d.addCallback(_check_directory_html, b"-RO") d.addCallback(lambda ign: self.GET(self.rourl+"?t=json")) d.addCallback(_check_directory_json, expect_rw_uri=False) d.addCallback(lambda ign: self.GET("%s/%s?t=json" % (self.rourl, str(name)))) d.addCallback(_check_json, expect_rw_uri=False) # TODO: check that getting t=info from the Info link in the ro directory # works, and does not include the writecap URI. return d def test_immutable_unknown(self): return self.test_unknown(immutable=True) def test_mutant_dirnodes_are_omitted(self): self.basedir = "web/Grid/mutant_dirnodes_are_omitted" self.set_up_grid(oneshare=True) c = self.g.clients[0] nm = c.nodemaker self.uris = {} self.fileurls = {} lonely_uri = b"URI:LIT:n5xgk" # LIT for "one" mut_write_uri = b"URI:SSK:vfvcbdfbszyrsaxchgevhmmlii:euw4iw7bbnkrrwpzuburbhppuxhc3gwxv26f6imekhz7zyw2ojnq" mut_read_uri = b"URI:SSK-RO:e3mdrzfwhoq42hy5ubcz6rp3o4:ybyibhnp3vvwuq2vaw2ckjmesgkklfs6ghxleztqidihjyofgw7q" # This method tests mainly dirnode, but we'd have to duplicate code in order to # test the dirnode and web layers separately. # 'lonely' is a valid LIT child, 'ro' is a mutant child with an SSK-RO readcap, # and 'write-in-ro' is a mutant child with an SSK writecap in the ro_uri field. # When the directory is read, the mutants should be silently disposed of, leaving # their lonely sibling. # We don't test the case of a retrieving a cap from the encrypted rw_uri field, # because immutable directories don't have a writecap and therefore that field # isn't (and can't be) decrypted. # TODO: The field still exists in the netstring. Technically we should check what # happens if something is put there (_unpack_contents should raise ValueError), # but that can wait. lonely_child = nm.create_from_cap(lonely_uri) mutant_ro_child = nm.create_from_cap(mut_read_uri) mutant_write_in_ro_child = nm.create_from_cap(mut_write_uri) def _by_hook_or_by_crook(): return True for n in [mutant_ro_child, mutant_write_in_ro_child]: n.is_allowed_in_immutable_directory = _by_hook_or_by_crook mutant_write_in_ro_child.get_write_uri = lambda: None mutant_write_in_ro_child.get_readonly_uri = lambda: mut_write_uri kids = {u"lonely": (lonely_child, {}), u"ro": (mutant_ro_child, {}), u"write-in-ro": (mutant_write_in_ro_child, {}), } d = c.create_immutable_dirnode(kids) def _created(dn): self.failUnless(isinstance(dn, dirnode.DirectoryNode)) self.assertThat(dn.is_mutable(), Equals(False)) self.failUnless(dn.is_readonly()) # This checks that if we somehow ended up calling dn._decrypt_rwcapdata, it would fail. self.assertThat(hasattr(dn._node, 'get_writekey'), Equals(False)) rep = str(dn) self.assertThat(rep, Contains("RO-IMM")) cap = dn.get_cap() self.assertThat(cap.to_string(), Contains(b"CHK")) self.cap = cap self.rootnode = dn self.rooturl = "uri/" + url_quote(dn.get_uri()) return download_to_data(dn._node) d.addCallback(_created) def _check_data(data): # Decode the netstring representation of the directory to check that all children # are present. This is a bit of an abstraction violation, but there's not really # any other way to do it given that the real DirectoryNode._unpack_contents would # strip the mutant children out (which is what we're trying to test, later). position = 0 numkids = 0 while position < len(data): entries, position = split_netstring(data, 1, position) entry = entries[0] (name_utf8, ro_uri, rwcapdata, metadata_s), subpos = split_netstring(entry, 4) name = name_utf8.decode("utf-8") self.failUnlessEqual(rwcapdata, b"") self.assertThat(kids, Contains(name)) (expected_child, ign) = kids[name] self.failUnlessReallyEqual(ro_uri, expected_child.get_readonly_uri()) numkids += 1 self.failUnlessReallyEqual(numkids, 3) return self.rootnode.list() d.addCallback(_check_data) # Now when we use the real directory listing code, the mutants should be absent. def _check_kids(children): self.failUnlessReallyEqual(sorted(children.keys()), [u"lonely"]) lonely_node, lonely_metadata = children[u"lonely"] self.failUnlessReallyEqual(lonely_node.get_write_uri(), None) self.failUnlessReallyEqual(lonely_node.get_readonly_uri(), lonely_uri) d.addCallback(_check_kids) d.addCallback(lambda ign: nm.create_from_cap(self.cap.to_string())) d.addCallback(lambda n: n.list()) d.addCallback(_check_kids) # again with dirnode recreated from cap # Make sure the lonely child can be listed in HTML... d.addCallback(lambda ign: self.GET(self.rooturl)) def _check_html(res): soup = BeautifulSoup(res, 'html5lib') self.assertThat(res, Not(Contains(b"URI:SSK"))) found = False for td in soup.find_all(u"td"): if td.text != u"FILE": continue a = td.findNextSibling()(u"a")[0] self.assertThat(a[u"href"], Contains(url_quote(lonely_uri))) self.assertThat(a.text, Equals(u"lonely")) self.assertThat(a[u"rel"], Equals([u"noreferrer"])) self.assertThat(td.findNextSibling().findNextSibling().text, Equals(u"{}".format(len("one")))) found = True break self.assertThat(found, Equals(True)) infos = list( a[u"href"] for a in soup.find_all(u"a") if a.text == u"More Info" ) self.assertThat(infos, HasLength(1)) self.assertThat(infos[0], EndsWith(url_quote(lonely_uri) + "?t=info")) d.addCallback(_check_html) # ... and in JSON. d.addCallback(lambda ign: self.GET(self.rooturl+"?t=json")) def _check_json(res): data = json.loads(res) self.failUnlessEqual(data[0], "dirnode") listed_children = data[1]["children"] self.failUnlessReallyEqual(sorted(listed_children.keys()), [u"lonely"]) ll_type, ll_data = listed_children[u"lonely"] self.failUnlessEqual(ll_type, "filenode") self.assertThat(ll_data, Not(Contains("rw_uri"))) self.failUnlessReallyEqual(to_bytes(ll_data["ro_uri"]), lonely_uri) d.addCallback(_check_json) return d def test_deep_check(self): self.basedir = "web/Grid/deep_check" self.set_up_grid() c0 = self.g.clients[0] self.uris = {} self.fileurls = {} DATA = b"data" * 100 d = c0.create_dirnode() def _stash_root_and_create_file(n): self.rootnode = n self.fileurls["root"] = "uri/" + url_quote(n.get_uri()) return n.add_file(u"good", upload.Data(DATA, convergence=b"")) d.addCallback(_stash_root_and_create_file) def _stash_uri(fn, which): self.uris[which] = fn.get_uri() return fn d.addCallback(_stash_uri, "good") d.addCallback(lambda ign: self.rootnode.add_file(u"small", upload.Data(b"literal", convergence=b""))) d.addCallback(_stash_uri, "small") d.addCallback(lambda ign: self.rootnode.add_file(u"sick", upload.Data(DATA+b"1", convergence=b""))) d.addCallback(_stash_uri, "sick") # this tests that deep-check and stream-manifest will ignore # UnknownNode instances. Hopefully this will also cover deep-stats. future_node = UnknownNode(unknown_rwcap, unknown_rocap) d.addCallback(lambda ign: self.rootnode.set_node(u"future", future_node)) def _clobber_shares(ignored): self.delete_shares_numbered(self.uris["sick"], [0,1]) d.addCallback(_clobber_shares) # root # root/good # root/small # root/sick # root/future d.addCallback(self.CHECK, "root", "t=stream-deep-check") def _done(res): try: units = [json.loads(line) for line in res.splitlines() if line] except ValueError: print("response is:", res) raise self.failUnlessReallyEqual(len(units), 5+1) # should be parent-first u0 = units[0] self.failUnlessEqual(u0["path"], []) self.failUnlessEqual(u0["type"], "directory") self.failUnlessReallyEqual(to_bytes(u0["cap"]), self.rootnode.get_uri()) u0cr = u0["check-results"] self.failUnlessReallyEqual(u0cr["results"]["count-happiness"], 10) self.failUnlessReallyEqual(u0cr["results"]["count-shares-good"], 10) ugood = [u for u in units if u["type"] == "file" and u["path"] == [u"good"]][0] self.failUnlessReallyEqual(to_bytes(ugood["cap"]), self.uris["good"]) ugoodcr = ugood["check-results"] self.failUnlessReallyEqual(ugoodcr["results"]["count-happiness"], 10) self.failUnlessReallyEqual(ugoodcr["results"]["count-shares-good"], 10) stats = units[-1] self.failUnlessEqual(stats["type"], "stats") s = stats["stats"] self.failUnlessReallyEqual(s["count-immutable-files"], 2) self.failUnlessReallyEqual(s["count-literal-files"], 1) self.failUnlessReallyEqual(s["count-directories"], 1) self.failUnlessReallyEqual(s["count-unknown"], 1) d.addCallback(_done) d.addCallback(self.CHECK, "root", "t=stream-manifest") def _check_manifest(res): self.failUnless(res.endswith("\n")) units = [json.loads(t) for t in res[:-1].split("\n")] self.failUnlessReallyEqual(len(units), 5+1) self.failUnlessEqual(units[-1]["type"], "stats") first = units[0] self.failUnlessEqual(first["path"], []) self.failUnlessEqual(to_bytes(first["cap"]), self.rootnode.get_uri()) self.failUnlessEqual(first["type"], "directory") stats = units[-1]["stats"] self.failUnlessReallyEqual(stats["count-immutable-files"], 2) self.failUnlessReallyEqual(stats["count-literal-files"], 1) self.failUnlessReallyEqual(stats["count-mutable-files"], 0) self.failUnlessReallyEqual(stats["count-immutable-files"], 2) self.failUnlessReallyEqual(stats["count-unknown"], 1) d.addCallback(_check_manifest) # now add root/subdir and root/subdir/grandchild, then make subdir # unrecoverable, then see what happens d.addCallback(lambda ign: self.rootnode.create_subdirectory(u"subdir")) d.addCallback(_stash_uri, "subdir") d.addCallback(lambda subdir_node: subdir_node.add_file(u"grandchild", upload.Data(DATA+b"2", convergence=b""))) d.addCallback(_stash_uri, "grandchild") d.addCallback(lambda ign: self.delete_shares_numbered(self.uris["subdir"], list(range(1, 10)))) # root # root/good # root/small # root/sick # root/future # root/subdir [unrecoverable] # root/subdir/grandchild # how should a streaming-JSON API indicate fatal error? # answer: emit ERROR: instead of a JSON string d.addCallback(self.CHECK, "root", "t=stream-manifest") def _check_broken_manifest(res): lines = res.splitlines() error_lines = [i for (i,line) in enumerate(lines) if line.startswith("ERROR:")] if not error_lines: self.fail("no ERROR: in output: %s" % (res,)) first_error = error_lines[0] error_line = lines[first_error] error_msg = lines[first_error+1:] error_msg_s = "\n".join(error_msg) + "\n" self.assertThat(error_line, Contains("ERROR: UnrecoverableFileError(no recoverable versions)")) self.failUnless(len(error_msg) > 2, error_msg_s) # some traceback units = [json.loads(line) for line in lines[:first_error]] self.failUnlessReallyEqual(len(units), 6) # includes subdir last_unit = units[-1] self.failUnlessEqual(last_unit["path"], ["subdir"]) d.addCallback(_check_broken_manifest) d.addCallback(self.CHECK, "root", "t=stream-deep-check") def _check_broken_deepcheck(res): lines = res.splitlines() error_lines = [i for (i,line) in enumerate(lines) if line.startswith("ERROR:")] if not error_lines: self.fail("no ERROR: in output: %s" % (res,)) first_error = error_lines[0] error_line = lines[first_error] error_msg = lines[first_error+1:] error_msg_s = "\n".join(error_msg) + "\n" self.assertThat(error_line, Contains("ERROR: UnrecoverableFileError(no recoverable versions)")) self.failUnless(len(error_msg) > 2, error_msg_s) # some traceback units = [json.loads(line) for line in lines[:first_error]] self.failUnlessReallyEqual(len(units), 6) # includes subdir last_unit = units[-1] self.failUnlessEqual(last_unit["path"], ["subdir"]) r = last_unit["check-results"]["results"] self.failUnlessReallyEqual(r["count-recoverable-versions"], 0) self.failUnlessReallyEqual(r["count-happiness"], 1) self.failUnlessReallyEqual(r["count-shares-good"], 1) self.failUnlessReallyEqual(r["recoverable"], False) d.addCallback(_check_broken_deepcheck) d.addErrback(self.explain_web_error) return d def test_deep_check_and_repair(self): self.basedir = "web/Grid/deep_check_and_repair" self.set_up_grid() c0 = self.g.clients[0] self.uris = {} self.fileurls = {} DATA = b"data" * 100 d = c0.create_dirnode() def _stash_root_and_create_file(n): self.rootnode = n self.fileurls["root"] = "uri/" + url_quote(n.get_uri()) return n.add_file(u"good", upload.Data(DATA, convergence=b"")) d.addCallback(_stash_root_and_create_file) def _stash_uri(fn, which): self.uris[which] = fn.get_uri() d.addCallback(_stash_uri, "good") d.addCallback(lambda ign: self.rootnode.add_file(u"small", upload.Data(b"literal", convergence=b""))) d.addCallback(_stash_uri, "small") d.addCallback(lambda ign: self.rootnode.add_file(u"sick", upload.Data(DATA+b"1", convergence=b""))) d.addCallback(_stash_uri, "sick") #d.addCallback(lambda ign: # self.rootnode.add_file(u"dead", # upload.Data(DATA+b"2", # convergence=b""))) #d.addCallback(_stash_uri, "dead") #d.addCallback(lambda ign: c0.create_mutable_file("mutable")) #d.addCallback(lambda fn: self.rootnode.set_node(u"corrupt", fn)) #d.addCallback(_stash_uri, "corrupt") def _clobber_shares(ignored): good_shares = self.find_uri_shares(self.uris["good"]) self.failUnlessReallyEqual(len(good_shares), 10) sick_shares = self.find_uri_shares(self.uris["sick"]) os.unlink(sick_shares[0][2]) #dead_shares = self.find_uri_shares(self.uris["dead"]) #for i in range(1, 10): # os.unlink(dead_shares[i][2]) #c_shares = self.find_uri_shares(self.uris["corrupt"]) #cso = CorruptShareOptions() #cso.stdout = StringIO() #cso.parseOptions([c_shares[0][2]]) #corrupt_share(cso) d.addCallback(_clobber_shares) # root # root/good CHK, 10 shares # root/small LIT # root/sick CHK, 9 shares d.addCallback(self.CHECK, "root", "t=stream-deep-check&repair=true") def _done(res): units = [json.loads(line) for line in res.splitlines() if line] self.failUnlessReallyEqual(len(units), 4+1) # should be parent-first u0 = units[0] self.failUnlessEqual(u0["path"], []) self.failUnlessEqual(u0["type"], "directory") self.failUnlessReallyEqual(to_bytes(u0["cap"]), self.rootnode.get_uri()) u0crr = u0["check-and-repair-results"] self.failUnlessReallyEqual(u0crr["repair-attempted"], False) self.failUnlessReallyEqual(u0crr["pre-repair-results"]["results"]["count-happiness"], 10) self.failUnlessReallyEqual(u0crr["pre-repair-results"]["results"]["count-shares-good"], 10) ugood = [u for u in units if u["type"] == "file" and u["path"] == [u"good"]][0] self.failUnlessEqual(to_bytes(ugood["cap"]), self.uris["good"]) ugoodcrr = ugood["check-and-repair-results"] self.failUnlessReallyEqual(ugoodcrr["repair-attempted"], False) self.failUnlessReallyEqual(ugoodcrr["pre-repair-results"]["results"]["count-happiness"], 10) self.failUnlessReallyEqual(ugoodcrr["pre-repair-results"]["results"]["count-shares-good"], 10) usick = [u for u in units if u["type"] == "file" and u["path"] == [u"sick"]][0] self.failUnlessReallyEqual(to_bytes(usick["cap"]), self.uris["sick"]) usickcrr = usick["check-and-repair-results"] self.failUnlessReallyEqual(usickcrr["repair-attempted"], True) self.failUnlessReallyEqual(usickcrr["repair-successful"], True) self.failUnlessReallyEqual(usickcrr["pre-repair-results"]["results"]["count-happiness"], 9) self.failUnlessReallyEqual(usickcrr["pre-repair-results"]["results"]["count-shares-good"], 9) self.failUnlessReallyEqual(usickcrr["post-repair-results"]["results"]["count-happiness"], 10) self.failUnlessReallyEqual(usickcrr["post-repair-results"]["results"]["count-shares-good"], 10) stats = units[-1] self.failUnlessEqual(stats["type"], "stats") s = stats["stats"] self.failUnlessReallyEqual(s["count-immutable-files"], 2) self.failUnlessReallyEqual(s["count-literal-files"], 1) self.failUnlessReallyEqual(s["count-directories"], 1) d.addCallback(_done) d.addErrback(self.explain_web_error) return d def _count_leases(self, ignored, which): u = self.uris[which] shares = self.find_uri_shares(u) lease_counts = [] for shnum, serverid, fn in shares: sf = get_share_file(fn) num_leases = len(list(sf.get_leases())) lease_counts.append( (fn, num_leases) ) return lease_counts def _assert_leasecount(self, lease_counts, expected): for (fn, num_leases) in lease_counts: if num_leases != expected: self.fail("expected %d leases, have %d, on %s" % (expected, num_leases, fn)) def test_add_lease(self): self.basedir = "web/Grid/add_lease" self.set_up_grid(num_clients=2, oneshare=True) c0 = self.g.clients[0] self.uris = {} DATA = b"data" * 100 d = c0.upload(upload.Data(DATA, convergence=b"")) def _stash_uri(ur, which): self.uris[which] = ur.get_uri() d.addCallback(_stash_uri, "one") d.addCallback(lambda ign: c0.upload(upload.Data(DATA+b"1", convergence=b""))) d.addCallback(_stash_uri, "two") def _stash_mutable_uri(n, which): self.uris[which] = n.get_uri() assert isinstance(self.uris[which], bytes) d.addCallback(lambda ign: c0.create_mutable_file(publish.MutableData(DATA+b"2"))) d.addCallback(_stash_mutable_uri, "mutable") def _compute_fileurls(ignored): self.fileurls = {} for which in self.uris: self.fileurls[which] = "uri/" + url_quote(self.uris[which]) d.addCallback(_compute_fileurls) d.addCallback(self._count_leases, "one") d.addCallback(self._assert_leasecount, 1) d.addCallback(self._count_leases, "two") d.addCallback(self._assert_leasecount, 1) d.addCallback(self._count_leases, "mutable") d.addCallback(self._assert_leasecount, 1) d.addCallback(self.CHECK, "one", "t=check") # no add-lease def _got_html_good(res): self.assertThat(res, Contains("Healthy")) self.assertThat(res, Not(Contains("Not Healthy"))) d.addCallback(_got_html_good) d.addCallback(self._count_leases, "one") d.addCallback(self._assert_leasecount, 1) d.addCallback(self._count_leases, "two") d.addCallback(self._assert_leasecount, 1) d.addCallback(self._count_leases, "mutable") d.addCallback(self._assert_leasecount, 1) # this CHECK uses the original client, which uses the same # lease-secrets, so it will just renew the original lease d.addCallback(self.CHECK, "one", "t=check&add-lease=true") d.addCallback(_got_html_good) d.addCallback(self._count_leases, "one") d.addCallback(self._assert_leasecount, 1) d.addCallback(self._count_leases, "two") d.addCallback(self._assert_leasecount, 1) d.addCallback(self._count_leases, "mutable") d.addCallback(self._assert_leasecount, 1) # this CHECK uses an alternate client, which adds a second lease d.addCallback(self.CHECK, "one", "t=check&add-lease=true", clientnum=1) d.addCallback(_got_html_good) d.addCallback(self._count_leases, "one") d.addCallback(self._assert_leasecount, 2) d.addCallback(self._count_leases, "two") d.addCallback(self._assert_leasecount, 1) d.addCallback(self._count_leases, "mutable") d.addCallback(self._assert_leasecount, 1) d.addCallback(self.CHECK, "mutable", "t=check&add-lease=true") d.addCallback(_got_html_good) d.addCallback(self._count_leases, "one") d.addCallback(self._assert_leasecount, 2) d.addCallback(self._count_leases, "two") d.addCallback(self._assert_leasecount, 1) d.addCallback(self._count_leases, "mutable") d.addCallback(self._assert_leasecount, 1) d.addCallback(self.CHECK, "mutable", "t=check&add-lease=true", clientnum=1) d.addCallback(_got_html_good) d.addCallback(self._count_leases, "one") d.addCallback(self._assert_leasecount, 2) d.addCallback(self._count_leases, "two") d.addCallback(self._assert_leasecount, 1) d.addCallback(self._count_leases, "mutable") d.addCallback(self._assert_leasecount, 2) d.addErrback(self.explain_web_error) return d def test_deep_add_lease(self): self.basedir = "web/Grid/deep_add_lease" self.set_up_grid(num_clients=2, oneshare=True) c0 = self.g.clients[0] self.uris = {} self.fileurls = {} DATA = b"data" * 100 d = c0.create_dirnode() def _stash_root_and_create_file(n): self.rootnode = n self.uris["root"] = n.get_uri() self.fileurls["root"] = "uri/" + url_quote(n.get_uri()) return n.add_file(u"one", upload.Data(DATA, convergence=b"")) d.addCallback(_stash_root_and_create_file) def _stash_uri(fn, which): self.uris[which] = fn.get_uri() d.addCallback(_stash_uri, "one") d.addCallback(lambda ign: self.rootnode.add_file(u"small", upload.Data(b"literal", convergence=b""))) d.addCallback(_stash_uri, "small") d.addCallback(lambda ign: c0.create_mutable_file(publish.MutableData(b"mutable"))) d.addCallback(lambda fn: self.rootnode.set_node(u"mutable", fn)) d.addCallback(_stash_uri, "mutable") d.addCallback(self.CHECK, "root", "t=stream-deep-check") # no add-lease def _done(res): units = [json.loads(line) for line in res.splitlines() if line] # root, one, small, mutable, stats self.failUnlessReallyEqual(len(units), 4+1) d.addCallback(_done) d.addCallback(self._count_leases, "root") d.addCallback(self._assert_leasecount, 1) d.addCallback(self._count_leases, "one") d.addCallback(self._assert_leasecount, 1) d.addCallback(self._count_leases, "mutable") d.addCallback(self._assert_leasecount, 1) d.addCallback(self.CHECK, "root", "t=stream-deep-check&add-lease=true") d.addCallback(_done) d.addCallback(self._count_leases, "root") d.addCallback(self._assert_leasecount, 1) d.addCallback(self._count_leases, "one") d.addCallback(self._assert_leasecount, 1) d.addCallback(self._count_leases, "mutable") d.addCallback(self._assert_leasecount, 1) d.addCallback(self.CHECK, "root", "t=stream-deep-check&add-lease=true", clientnum=1) d.addCallback(_done) d.addCallback(self._count_leases, "root") d.addCallback(self._assert_leasecount, 2) d.addCallback(self._count_leases, "one") d.addCallback(self._assert_leasecount, 2) d.addCallback(self._count_leases, "mutable") d.addCallback(self._assert_leasecount, 2) d.addErrback(self.explain_web_error) return d def test_exceptions(self): self.basedir = "web/Grid/exceptions" self.set_up_grid(num_clients=1, num_servers=2) c0 = self.g.clients[0] c0.encoding_params['happy'] = 2 self.fileurls = {} DATA = b"data" * 100 d = c0.create_dirnode() def _stash_root(n): self.fileurls["root"] = "uri/" + url_quote(n.get_uri()) self.fileurls["imaginary"] = self.fileurls["root"] + "/imaginary" return n d.addCallback(_stash_root) d.addCallback(lambda ign: c0.upload(upload.Data(DATA, convergence=b""))) def _stash_bad(ur): self.fileurls["1share"] = "uri/" + url_quote(ur.get_uri()) self.delete_shares_numbered(ur.get_uri(), list(range(1,10))) u = uri.from_string(ur.get_uri()) u.key = testutil.flip_bit(u.key, 0) baduri = u.to_string() self.fileurls["0shares"] = "uri/" + url_quote(baduri) d.addCallback(_stash_bad) d.addCallback(lambda ign: c0.create_dirnode()) def _mangle_dirnode_1share(n): u = n.get_uri() url = self.fileurls["dir-1share"] = "uri/" + url_quote(u) self.fileurls["dir-1share-json"] = url + "?t=json" self.delete_shares_numbered(u, list(range(1,10))) d.addCallback(_mangle_dirnode_1share) d.addCallback(lambda ign: c0.create_dirnode()) def _mangle_dirnode_0share(n): u = n.get_uri() url = self.fileurls["dir-0share"] = "uri/" + url_quote(u) self.fileurls["dir-0share-json"] = url + "?t=json" self.delete_shares_numbered(u, list(range(0,10))) d.addCallback(_mangle_dirnode_0share) # NotEnoughSharesError should be reported sensibly, with a # text/plain explanation of the problem, and perhaps some # information on which shares *could* be found. d.addCallback(lambda ignored: self.shouldHTTPError("GET unrecoverable", 410, "Gone", "NoSharesError", self.GET, self.fileurls["0shares"])) def _check_zero_shares(body): body = str(body, "utf-8") self.assertThat(body, Not(Contains(""))) body = " ".join(body.strip().split()) exp = ("NoSharesError: no shares could be found. " "Zero shares usually indicates a corrupt URI, or that " "no servers were connected, but it might also indicate " "severe corruption. You should perform a filecheck on " "this object to learn more. The full error message is: " "no shares (need 3). Last failure: None") self.assertEqual(exp, body) d.addCallback(_check_zero_shares) d.addCallback(lambda ignored: self.shouldHTTPError("GET 1share", 410, "Gone", "NotEnoughSharesError", self.GET, self.fileurls["1share"])) def _check_one_share(body): body = str(body, "utf-8") self.assertThat(body, Not(Contains(""))) body = " ".join(body.strip().split()) msgbase = ("NotEnoughSharesError: This indicates that some " "servers were unavailable, or that shares have been " "lost to server departure, hard drive failure, or disk " "corruption. You should perform a filecheck on " "this object to learn more. The full error message is:" ) msg1 = msgbase + (" ran out of shares:" " complete=sh0" " pending=" " overdue= unused= need 3. Last failure: None") msg2 = msgbase + (" ran out of shares:" " complete=" " pending=Share(sh0-on-ysbz4st7)" " overdue= unused= need 3. Last failure: None") self.failUnless(body == msg1 or body == msg2, body) d.addCallback(_check_one_share) d.addCallback(lambda ignored: self.shouldHTTPError("GET imaginary", 404, "Not Found", None, self.GET, self.fileurls["imaginary"])) def _missing_child(body): body = str(body, "utf-8") self.assertThat(body, Contains("No such child: imaginary")) d.addCallback(_missing_child) d.addCallback(lambda ignored: self.GET_unicode(self.fileurls["dir-0share"])) def _check_0shares_dir_html(body): self.assertThat(body, Contains(DIR_HTML_TAG)) # we should see the regular page, but without the child table or # the dirops forms body = " ".join(body.strip().split()) self.assertThat(body, Contains('href="?t=info">More info on this directory')) exp = ("UnrecoverableFileError: the directory (or mutable file) " "could not be retrieved, because there were insufficient " "good shares. This might indicate that no servers were " "connected, insufficient servers were connected, the URI " "was corrupt, or that shares have been lost due to server " "departure, hard drive failure, or disk corruption. You " "should perform a filecheck on this object to learn more.") self.assertThat(body, Contains(exp)) self.assertThat(body, Contains("No upload forms: directory is unreadable")) d.addCallback(_check_0shares_dir_html) d.addCallback(lambda ignored: self.GET_unicode(self.fileurls["dir-1share"])) def _check_1shares_dir_html(body): # at some point, we'll split UnrecoverableFileError into 0-shares # and some-shares like we did for immutable files (since there # are different sorts of advice to offer in each case). For now, # they present the same way. self.assertThat(body, Contains(DIR_HTML_TAG)) body = " ".join(body.strip().split()) self.assertThat(body, Contains('href="?t=info">More info on this directory')) exp = ("UnrecoverableFileError: the directory (or mutable file) " "could not be retrieved, because there were insufficient " "good shares. This might indicate that no servers were " "connected, insufficient servers were connected, the URI " "was corrupt, or that shares have been lost due to server " "departure, hard drive failure, or disk corruption. You " "should perform a filecheck on this object to learn more.") self.assertThat(body, Contains(exp)) self.assertThat(body, Contains("No upload forms: directory is unreadable")) d.addCallback(_check_1shares_dir_html) d.addCallback(lambda ignored: self.shouldHTTPError("GET dir-0share-json", 410, "Gone", "UnrecoverableFileError", self.GET, self.fileurls["dir-0share-json"])) def _check_unrecoverable_file(body): body = str(body, "utf-8") self.assertThat(body, Not(Contains(""))) body = " ".join(body.strip().split()) exp = ("UnrecoverableFileError: the directory (or mutable file) " "could not be retrieved, because there were insufficient " "good shares. This might indicate that no servers were " "connected, insufficient servers were connected, the URI " "was corrupt, or that shares have been lost due to server " "departure, hard drive failure, or disk corruption. You " "should perform a filecheck on this object to learn more.") self.assertThat(body, Contains(exp)) d.addCallback(_check_unrecoverable_file) d.addCallback(lambda ignored: self.shouldHTTPError("GET dir-1share-json", 410, "Gone", "UnrecoverableFileError", self.GET, self.fileurls["dir-1share-json"])) d.addCallback(_check_unrecoverable_file) d.addCallback(lambda ignored: self.shouldHTTPError("GET imaginary", 404, "Not Found", None, self.GET, self.fileurls["imaginary"])) # attach a webapi child that throws a random error, to test how it # gets rendered. w = c0.getServiceNamed("webish") w.root.putChild(b"ERRORBOOM", ErrorBoom()) # "Accept: */*" : should get a text/html stack trace # "Accept: text/plain" : should get a text/plain stack trace # "Accept: text/plain, application/octet-stream" : text/plain (CLI) # no Accept header: should get a text/html stack trace d.addCallback(lambda ignored: self.shouldHTTPError("GET errorboom_html", 500, "Internal Server Error", None, self.GET, "ERRORBOOM", headers={"accept": "*/*"})) def _internal_error_html1(body): body = str(body, "utf-8") self.assertThat("expected HTML, not '%s'" % body, Contains("")) d.addCallback(_internal_error_html1) d.addCallback(lambda ignored: self.shouldHTTPError("GET errorboom_text", 500, "Internal Server Error", None, self.GET, "ERRORBOOM", headers={"accept": "text/plain"})) def _internal_error_text2(body): body = str(body, "utf-8") self.assertThat(body, Not(Contains(""))) self.failUnless(body.startswith("Traceback "), body) d.addCallback(_internal_error_text2) CLI_accepts = "text/plain, application/octet-stream" d.addCallback(lambda ignored: self.shouldHTTPError("GET errorboom_text", 500, "Internal Server Error", None, self.GET, "ERRORBOOM", headers={"accept": CLI_accepts})) def _internal_error_text3(body): body = str(body, "utf-8") self.assertThat(body, Not(Contains(""))) self.failUnless(body.startswith("Traceback "), body) d.addCallback(_internal_error_text3) d.addCallback(lambda ignored: self.shouldHTTPError("GET errorboom_text", 500, "Internal Server Error", None, self.GET, "ERRORBOOM")) def _internal_error_html4(body): self.assertThat(body, Contains(b"")) d.addCallback(_internal_error_html4) def _flush_errors(res): # Trial: please ignore the CompletelyUnhandledError in the logs flush_logged_errors(CompletelyUnhandledError) return res d.addBoth(_flush_errors) return d def test_blacklist(self): # download from a blacklisted URI, get an error self.basedir = "web/Grid/blacklist" self.set_up_grid(oneshare=True) c0 = self.g.clients[0] fn = c0.config.get_config_path("access.blacklist") self.uris = {} DATA = b"off-limits " * 50 d = c0.upload(upload.Data(DATA, convergence=b"")) def _stash_uri_and_create_dir(ur): self.uri = ur.get_uri() self.url = b"uri/"+self.uri u = uri.from_string_filenode(self.uri) self.si = u.get_storage_index() childnode = c0.create_node_from_uri(self.uri, None) return c0.create_dirnode({u"blacklisted.txt": (childnode,{}) }) d.addCallback(_stash_uri_and_create_dir) def _stash_dir(node): self.dir_node = node self.dir_uri = node.get_uri() self.dir_url = b"uri/"+self.dir_uri d.addCallback(_stash_dir) d.addCallback(lambda ign: self.GET_unicode(self.dir_url, followRedirect=True)) def _check_dir_html(body): self.assertThat(body, Contains(DIR_HTML_TAG)) self.assertThat(body, Contains("blacklisted.txt")) d.addCallback(_check_dir_html) d.addCallback(lambda ign: self.GET(self.url)) d.addCallback(lambda body: self.failUnlessEqual(DATA, body)) def _blacklist(ign): f = open(fn, "w") f.write(" # this is a comment\n") f.write(" \n") f.write("\n") # also exercise blank lines f.write("%s off-limits to you\n" % (str(base32.b2a(self.si), "ascii"),)) f.close() # clients should be checking the blacklist each time, so we don't # need to restart the client d.addCallback(_blacklist) d.addCallback(lambda ign: self.shouldHTTPError("get_from_blacklisted_uri", 403, "Forbidden", "Access Prohibited: off-limits", self.GET, self.url)) # We should still be able to list the parent directory, in HTML... d.addCallback(lambda ign: self.GET_unicode(self.dir_url, followRedirect=True)) def _check_dir_html2(body): self.assertThat(body, Contains(DIR_HTML_TAG)) self.assertThat(body, Contains("blacklisted.txt")) d.addCallback(_check_dir_html2) # ... and in JSON (used by CLI). d.addCallback(lambda ign: self.GET(self.dir_url+b"?t=json", followRedirect=True)) def _check_dir_json(res): data = json.loads(res) self.failUnless(isinstance(data, list), data) self.failUnlessEqual(data[0], "dirnode") self.failUnless(isinstance(data[1], dict), data) self.assertThat(data[1], Contains("children")) self.assertThat(data[1]["children"], Contains("blacklisted.txt")) childdata = data[1]["children"]["blacklisted.txt"] self.failUnless(isinstance(childdata, list), data) self.failUnlessEqual(childdata[0], "filenode") self.failUnless(isinstance(childdata[1], dict), data) d.addCallback(_check_dir_json) def _unblacklist(ign): open(fn, "w").close() # the Blacklist object watches mtime to tell when the file has # changed, but on windows this test will run faster than the # filesystem's mtime resolution. So we edit Blacklist.last_mtime # to force a reload. self.g.clients[0].blacklist.last_mtime -= 2.0 d.addCallback(_unblacklist) # now a read should work d.addCallback(lambda ign: self.GET(self.url)) d.addCallback(lambda body: self.failUnlessEqual(DATA, body)) # read again to exercise the blacklist-is-unchanged logic d.addCallback(lambda ign: self.GET(self.url)) d.addCallback(lambda body: self.failUnlessEqual(DATA, body)) # now add a blacklisted directory, and make sure files under it are # refused too def _add_dir(ign): childnode = c0.create_node_from_uri(self.uri, None) return c0.create_dirnode({u"child": (childnode,{}) }) d.addCallback(_add_dir) def _get_dircap(dn): self.dir_si_b32 = base32.b2a(dn.get_storage_index()) self.dir_url_base = b"uri/"+dn.get_write_uri() self.dir_url_json1 = b"uri/"+dn.get_write_uri()+b"?t=json" self.dir_url_json2 = b"uri/"+dn.get_write_uri()+b"?t=json" self.dir_url_json_ro = b"uri/"+dn.get_readonly_uri()+b"?t=json" self.child_url = b"uri/"+dn.get_readonly_uri()+b"/child" d.addCallback(_get_dircap) d.addCallback(lambda ign: self.GET(self.dir_url_base, followRedirect=True)) d.addCallback(lambda body: self.assertThat(str(body, "utf-8"), Contains(DIR_HTML_TAG))) d.addCallback(lambda ign: self.GET(self.dir_url_json1)) d.addCallback(lambda res: json.loads(res)) # just check it decodes d.addCallback(lambda ign: self.GET(self.dir_url_json2)) d.addCallback(lambda res: json.loads(res)) # just check it decodes d.addCallback(lambda ign: self.GET(self.dir_url_json_ro)) d.addCallback(lambda res: json.loads(res)) # just check it decodes d.addCallback(lambda ign: self.GET(self.child_url)) d.addCallback(lambda body: self.failUnlessEqual(DATA, body)) def _block_dir(ign): f = open(fn, "wb") f.write(b"%s %s\n" % (self.dir_si_b32, b"dir-off-limits to you")) f.close() self.g.clients[0].blacklist.last_mtime -= 2.0 d.addCallback(_block_dir) d.addCallback(lambda ign: self.shouldHTTPError("get_from_blacklisted_dir base", 403, "Forbidden", "Access Prohibited: dir-off-limits", self.GET, self.dir_url_base)) d.addCallback(lambda ign: self.shouldHTTPError("get_from_blacklisted_dir json1", 403, "Forbidden", "Access Prohibited: dir-off-limits", self.GET, self.dir_url_json1)) d.addCallback(lambda ign: self.shouldHTTPError("get_from_blacklisted_dir json2", 403, "Forbidden", "Access Prohibited: dir-off-limits", self.GET, self.dir_url_json2)) d.addCallback(lambda ign: self.shouldHTTPError("get_from_blacklisted_dir json_ro", 403, "Forbidden", "Access Prohibited: dir-off-limits", self.GET, self.dir_url_json_ro)) d.addCallback(lambda ign: self.shouldHTTPError("get_from_blacklisted_dir child", 403, "Forbidden", "Access Prohibited: dir-off-limits", self.GET, self.child_url)) return d tahoe_lafs-1.20.0/src/allmydata/test/web/test_introducer.py0000644000000000000000000001602513615410400020705 0ustar00""" Ported to Python 3. """ import json from os.path import join from bs4 import BeautifulSoup from twisted.internet import reactor from twisted.internet import defer from testtools.twistedsupport import succeeded from ..common import ( SyncTestCase, AsyncTestCase, ) from foolscap.api import ( fireEventually, flushEventualQueue, Tub, ) import allmydata from allmydata.introducer import ( create_introducer, ) from allmydata.introducer.server import ( _IntroducerNode, ) from allmydata.web.introweb import ( IntroducerRoot, ) from allmydata import node from .common import ( assert_soup_has_favicon, assert_soup_has_text, assert_soup_has_tag_with_attributes, ) from ..common import ( SameProcessStreamEndpointAssigner, ) from ..common_util import ( FakeCanary, ) from ..common_web import ( do_http, render, ) from testtools.matchers import ( Equals, AfterPreprocessing, ) @defer.inlineCallbacks def create_introducer_webish(reactor, port_assigner, basedir): """ Create and start an introducer node and return it and its ``WebishServer`` service. :param reactor: The reactor to use to allow the introducer node to use to listen for connections. :param SameProcessStreamEndpointAssigner port_assigner: The assigner to use to assign a listening port for the introducer node. :param bytes basedir: A non-existant path where the introducer node will be created. :return Deferred[(_IntroducerNode, WebishServer)]: A Deferred that fires with the node and its webish service. """ node.create_node_dir(basedir, "testing") main_tub_location, main_tub_endpoint = port_assigner.assign(reactor) _, web_port_endpoint = port_assigner.assign(reactor) with open(join(basedir, "tahoe.cfg"), "w") as f: f.write( "[node]\n" "tub.port = {main_tub_endpoint}\n" "tub.location = {main_tub_location}\n" "web.port = {web_port_endpoint}\n".format( main_tub_endpoint=main_tub_endpoint, main_tub_location=main_tub_location, web_port_endpoint=web_port_endpoint, ) ) intro_node = yield create_introducer(basedir) ws = intro_node.getServiceNamed("webish") yield fireEventually(None) intro_node.startService() defer.returnValue((intro_node, ws)) class IntroducerWeb(AsyncTestCase): """ Tests for web-facing functionality of an introducer node. """ def setUp(self): self.node = None self.port_assigner = SameProcessStreamEndpointAssigner() self.port_assigner.setUp() self.addCleanup(self.port_assigner.tearDown) # Anything using Foolscap leaves some timer trash in the reactor that # we have to arrange to have cleaned up. self.addCleanup(lambda: flushEventualQueue(None)) return super(IntroducerWeb, self).setUp() @defer.inlineCallbacks def test_welcome(self): node, ws = yield create_introducer_webish( reactor, self.port_assigner, self.mktemp(), ) self.addCleanup(node.stopService) url = "http://localhost:%d/" % (ws.getPortnum(),) res = yield do_http("get", url) soup = BeautifulSoup(res, 'html5lib') assert_soup_has_text(self, soup, u'Welcome to the Tahoe-LAFS Introducer') assert_soup_has_favicon(self, soup) assert_soup_has_text(self, soup, u'Page rendered at') assert_soup_has_text(self, soup, u'Tahoe-LAFS code imported from:') @defer.inlineCallbacks def test_basic_information(self): """ The introducer web page includes the software version and several other simple pieces of information. """ node, ws = yield create_introducer_webish( reactor, self.port_assigner, self.mktemp(), ) self.addCleanup(node.stopService) url = "http://localhost:%d/" % (ws.getPortnum(),) res = yield do_http("get", url) soup = BeautifulSoup(res, 'html5lib') assert_soup_has_text( self, soup, allmydata.__full_version__, ) assert_soup_has_text(self, soup, u"no peers!") assert_soup_has_text(self, soup, u"subscribers!") assert_soup_has_tag_with_attributes( self, soup, "link", {"href": "/tahoe.css"}, ) @defer.inlineCallbacks def test_tahoe_css(self): """ The introducer serves the css. """ node, ws = yield create_introducer_webish( reactor, self.port_assigner, self.mktemp(), ) self.addCleanup(node.stopService) url = "http://localhost:%d/tahoe.css" % (ws.getPortnum(),) # Just don't return an error. If it does, do_http will raise # something. yield do_http("get", url) @defer.inlineCallbacks def test_json_front_page(self): """ The front page can be served as json. """ node, ws = yield create_introducer_webish( reactor, self.port_assigner, self.mktemp(), ) self.addCleanup(node.stopService) url = "http://localhost:%d/?t=json" % (ws.getPortnum(),) res = yield do_http("get", url) data = json.loads(res) self.assertEqual(data["subscription_summary"], {}) self.assertEqual(data["announcement_summary"], {}) class IntroducerRootTests(SyncTestCase): """ Tests for ``IntroducerRoot``. """ def test_json(self): """ The JSON response includes totals for the number of subscriptions and announcements of each service type. """ config = node.config_from_string(self.mktemp(), "", "") config.get_private_path = lambda ignored: self.mktemp() main_tub = Tub() main_tub.listenOn(b"tcp:0") main_tub.setLocation(b"tcp:127.0.0.1:1") introducer_node = _IntroducerNode(config, main_tub, None, None) introducer_service = introducer_node.getServiceNamed("introducer") for n in range(2): introducer_service.add_subscriber( FakeCanary(), "arbitrary", {"info": "info"}, ) # It would be nice to use the publish method but then we have to # generate a correctly signed message which I don't feel like doing. ann_t = ("msg", "sig", "key") ann = {"service-name": "arbitrary"} introducer_service._announcements[("arbitrary", "key")] = ( ann_t, FakeCanary(), ann, 0, ) resource = IntroducerRoot(introducer_node) response = render(resource, {b"t": [b"json"]}) expected = { u"subscription_summary": {"arbitrary": 2}, u"announcement_summary": {"arbitrary": 1}, } self.assertThat( response, succeeded(AfterPreprocessing(json.loads, Equals(expected)))) tahoe_lafs-1.20.0/src/allmydata/test/web/test_logs.py0000644000000000000000000000576313615410400017502 0ustar00""" Tests for ``allmydata.web.logs``. Ported to Python 3. """ import json from twisted.internet.defer import inlineCallbacks from autobahn.twisted.testing import create_memory_agent, MemoryReactorClockResolver, create_pumper from testtools.matchers import ( Equals, ) from testtools.twistedsupport import ( succeeded, ) from twisted.web.http import ( OK, ) from treq.client import ( HTTPClient, ) from treq.testing import ( RequestTraversalAgent, ) from .matchers import ( has_response_code, ) from ..common import ( SyncTestCase, AsyncTestCase, ) from ...web.logs import ( create_log_resources, TokenAuthenticatedWebSocketServerProtocol, ) from eliot import log_call class StreamingEliotLogsTests(SyncTestCase): """ Tests for the log streaming resources created by ``create_log_resources``. """ def setUp(self): self.resource = create_log_resources() self.agent = RequestTraversalAgent(self.resource) self.client = HTTPClient(self.agent) return super(StreamingEliotLogsTests, self).setUp() def test_v1(self): """ There is a resource at *v1*. """ self.assertThat( self.client.get(b"http:///v1"), succeeded(has_response_code(Equals(OK))), ) class TestStreamingLogs(AsyncTestCase): """ Test websocket streaming of logs """ def setUp(self): super(TestStreamingLogs, self).setUp() self.reactor = MemoryReactorClockResolver() self.pumper = create_pumper() self.agent = create_memory_agent(self.reactor, self.pumper, TokenAuthenticatedWebSocketServerProtocol) return self.pumper.start() def tearDown(self): super(TestStreamingLogs, self).tearDown() return self.pumper.stop() @inlineCallbacks def test_one_log(self): """ Write a single Eliot log action and see it streamed via websocket. """ proto = yield self.agent.open( transport_config=u"ws://localhost:1234/ws", options={}, ) messages = [] def got_message(msg, is_binary=False): messages.append(json.loads(msg)) proto.on("message", got_message) @log_call(action_type=u"test:cli:some-exciting-action") def do_a_thing(arguments): pass do_a_thing(arguments=[u"hello", b"good-\xff-day", 123, {"a": 35}, [None]]) proto.transport.loseConnection() yield proto.is_closed self.assertThat(len(messages), Equals(3), messages) self.assertThat(messages[0]["action_type"], Equals("test:cli:some-exciting-action")) self.assertThat(messages[0]["arguments"], Equals(["hello", "good-\\xff-day", 123, {"a": 35}, [None]])) self.assertThat(messages[1]["action_type"], Equals("test:cli:some-exciting-action")) self.assertThat("started", Equals(messages[0]["action_status"])) self.assertThat("succeeded", Equals(messages[1]["action_status"])) tahoe_lafs-1.20.0/src/allmydata/test/web/test_private.py0000644000000000000000000000544313615410400020203 0ustar00""" Tests for ``allmydata.web.private``. Ported to Python 3. """ from testtools.matchers import ( Equals, ) from testtools.twistedsupport import ( succeeded, ) from twisted.web.http import ( UNAUTHORIZED, NOT_FOUND, ) from twisted.web.http_headers import ( Headers, ) from treq.client import ( HTTPClient, ) from treq.testing import ( RequestTraversalAgent, ) from ..common import ( SyncTestCase, ) from ...web.private import ( SCHEME, create_private_tree, ) from .matchers import ( has_response_code, ) class PrivacyTests(SyncTestCase): """ Tests for the privacy features of the resources created by ``create_private_tree``. """ def setUp(self): self.token = b"abcdef" self.resource = create_private_tree(lambda: self.token) self.agent = RequestTraversalAgent(self.resource) self.client = HTTPClient(self.agent) return super(PrivacyTests, self).setUp() def _authorization(self, scheme, value): value = str(value, "utf-8") return Headers({ u"authorization": [u"{} {}".format(scheme, value)], }) def test_unauthorized(self): """ A request without an *Authorization* header receives an *Unauthorized* response. """ self.assertThat( self.client.head(b"http:///foo/bar"), succeeded(has_response_code(Equals(UNAUTHORIZED))), ) def test_wrong_scheme(self): """ A request with an *Authorization* header not containing the Tahoe-LAFS scheme receives an *Unauthorized* response. """ self.assertThat( self.client.head( b"http:///foo/bar", headers=self._authorization(u"basic", self.token), ), succeeded(has_response_code(Equals(UNAUTHORIZED))), ) def test_wrong_token(self): """ A request with an *Authorization* header not containing the expected token receives an *Unauthorized* response. """ self.assertThat( self.client.head( b"http:///foo/bar", headers=self._authorization(str(SCHEME, "utf-8"), b"foo bar"), ), succeeded(has_response_code(Equals(UNAUTHORIZED))), ) def test_authorized(self): """ A request with an *Authorization* header containing the expected scheme and token does not receive an *Unauthorized* response. """ self.assertThat( self.client.head( b"http:///foo/bar", headers=self._authorization(str(SCHEME, "utf-8"), self.token), ), # It's a made up URL so we don't get a 200, either, but a 404. succeeded(has_response_code(Equals(NOT_FOUND))), ) tahoe_lafs-1.20.0/src/allmydata/test/web/test_root.py0000644000000000000000000001564313615410400017517 0ustar00""" Ported to Python 3. """ import time import json from urllib.parse import ( quote, ) from bs4 import ( BeautifulSoup, ) from twisted.web.template import Tag from twisted.web.test.requesthelper import DummyRequest from twisted.application import service from testtools.twistedsupport import succeeded from twisted.internet.defer import ( inlineCallbacks, succeed, ) from ...storage_client import ( NativeStorageServer, StorageFarmBroker, ) from ...web.root import ( RootElement, Root, ) from ...util.connection_status import ConnectionStatus from ...crypto.ed25519 import ( create_signing_keypair, ) from allmydata.web.root import URIHandler from allmydata.client import _Client from .common import ( assert_soup_has_tag_with_attributes, ) from ..common_web import ( render, ) from ..common import ( EMPTY_CLIENT_CONFIG, ) from ..common import ( SyncTestCase, AsyncTestCase, ) from testtools.matchers import ( Equals, Contains, AfterPreprocessing, ) class RenderSlashUri(SyncTestCase): """ Ensure that URIs starting with /uri?uri= only accept valid capabilities """ def setUp(self): self.client = object() self.res = URIHandler(self.client) super(RenderSlashUri, self).setUp() @inlineCallbacks def test_valid_query_redirect(self): """ A syntactically valid capability given in the ``uri`` query argument results in a redirect. """ cap = ( b"URI:CHK:nt2xxmrccp7sursd6yh2thhcky:" b"mukesarwdjxiyqsjinbfiiro6q7kgmmekocxfjcngh23oxwyxtzq:2:5:5874882" ) query_args = {b"uri": [cap]} response_body = yield render(self.res, query_args) soup = BeautifulSoup(response_body, 'html5lib') tag = assert_soup_has_tag_with_attributes( self, soup, u"meta", {u"http-equiv": "refresh"}, ) self.assertThat( tag.attrs.get(u"content"), Contains(quote(cap, safe="")), ) def test_invalid(self): """ A syntactically invalid capbility results in an error. """ query_args = {b"uri": [b"not a capability"]} response_body = render(self.res, query_args) self.assertThat( response_body, succeeded(AfterPreprocessing(bytes, Equals(b"Invalid capability"))), ) class RenderServiceRow(SyncTestCase): def test_missing(self): """ minimally-defined static servers just need anonymous-storage-FURL and permutation-seed-base32. The WUI used to have problems rendering servers that lacked nickname and version. This tests that we can render such minimal servers. """ ann = {"anonymous-storage-FURL": "pb://w2hqnbaa25yw4qgcvghl5psa3srpfgw3@tcp:127.0.0.1:51309/vucto2z4fxment3vfxbqecblbf6zyp6x", "permutation-seed-base32": "w2hqnbaa25yw4qgcvghl5psa3srpfgw3", } srv = NativeStorageServer(b"server_id", ann, None, {}, EMPTY_CLIENT_CONFIG) srv.get_connection_status = lambda: ConnectionStatus(False, "summary", {}, 0, 0) class FakeClient(_Client): def __init__(self): service.MultiService.__init__(self) self.storage_broker = StorageFarmBroker( permute_peers=True, tub_maker=None, node_config=EMPTY_CLIENT_CONFIG, ) self.storage_broker.test_add_server(b"test-srv", srv) root = RootElement(FakeClient(), time.time) req = DummyRequest(b"") tag = Tag(b"") # Pick all items from services table. items = root.services_table(req, tag).item(req, tag) # Coerce `items` to list and pick the first item from it. item = list(items)[0] self.assertThat(item.slotData.get("version"), Equals("")) self.assertThat(item.slotData.get("nickname"), Equals("")) class RenderRoot(AsyncTestCase): @inlineCallbacks def test_root_json(self): """ The 'welcome' / root page renders properly with ?t=json when some servers show None for available_space while others show a valid int See also https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3852 """ ann = { "anonymous-storage-FURL": "pb://w2hqnbaa25yw4qgcvghl5psa3srpfgw3@tcp:127.0.0.1:51309/vucto2z4fxment3vfxbqecblbf6zyp6x", "permutation-seed-base32": "w2hqnbaa25yw4qgcvghl5psa3srpfgw3", } srv0 = NativeStorageServer(b"server_id0", ann, None, {}, EMPTY_CLIENT_CONFIG) srv0.get_connection_status = lambda: ConnectionStatus(False, "summary0", {}, 0, 0) srv1 = NativeStorageServer(b"server_id1", ann, None, {}, EMPTY_CLIENT_CONFIG) srv1.get_connection_status = lambda: ConnectionStatus(False, "summary1", {}, 0, 0) # arrange for this server to have some valid available space srv1.get_available_space = lambda: 12345 class FakeClient(_Client): history = [] stats_provider = object() nickname = "" nodeid = b"asdf" _node_public_key = create_signing_keypair()[1] introducer_clients = [] helper = None def __init__(self): service.MultiService.__init__(self) self.storage_broker = StorageFarmBroker( permute_peers=True, tub_maker=None, node_config=EMPTY_CLIENT_CONFIG, ) self.storage_broker.test_add_server(b"test-srv0", srv0) self.storage_broker.test_add_server(b"test-srv1", srv1) root = Root(FakeClient(), now_fn=time.time) lines = [] req = DummyRequest(b"") req.fields = {} req.args = { b"t": [b"json"], } # for some reason, DummyRequest is already finished when we # try to add a notifyFinish handler, so override that # behavior. def nop(): return succeed(None) req.notifyFinish = nop req.write = lines.append yield root.render(req) raw_js = b"".join(lines).decode("utf8") js = json.loads(raw_js) servers = js["servers"] self.assertEquals(len(servers), 2) self.assertIn( { "connection_status": "summary0", "nodeid": "server_id0", "last_received_data": 0, "version": None, "available_space": None, "nickname": "" }, servers ) self.assertIn( { "connection_status": "summary1", "nodeid": "server_id1", "last_received_data": 0, "version": None, "available_space": 12345, "nickname": "" }, servers ) tahoe_lafs-1.20.0/src/allmydata/test/web/test_status.py0000644000000000000000000001457713615410400020064 0ustar00""" Tests for ```allmydata.web.status```. Ported to Python 3. """ from bs4 import BeautifulSoup from twisted.web.template import flattenString from allmydata.web.status import ( Status, StatusElement, ) from zope.interface import implementer from allmydata.interfaces import IDownloadResults from allmydata.web.status import DownloadStatusElement from allmydata.immutable.downloader.status import DownloadStatus from .common import ( assert_soup_has_favicon, assert_soup_has_tag_with_content, ) from ..common import TrialTestCase from .test_web import FakeHistory # Test that status.StatusElement can render HTML. class StatusTests(TrialTestCase): def _render_status_page(self, active, recent): elem = StatusElement(active, recent) d = flattenString(None, elem) return self.successResultOf(d) def test_status_page(self): status = Status(FakeHistory()) doc = self._render_status_page( status._get_active_operations(), status._get_recent_operations() ) soup = BeautifulSoup(doc, 'html5lib') assert_soup_has_favicon(self, soup) assert_soup_has_tag_with_content( self, soup, u"title", u"Tahoe-LAFS - Recent and Active Operations" ) assert_soup_has_tag_with_content( self, soup, u"h2", u"Active Operations:" ) assert_soup_has_tag_with_content( self, soup, u"td", u"retrieve" ) assert_soup_has_tag_with_content( self, soup, u"td", u"publish" ) assert_soup_has_tag_with_content( self, soup, u"td", u"download" ) assert_soup_has_tag_with_content( self, soup, u"td", u"upload" ) assert_soup_has_tag_with_content( self, soup, u"h2", "Recent Operations:" ) @implementer(IDownloadResults) class FakeDownloadResults(object): def __init__(self, file_size=0, servers_used=None, server_problems=None, servermap=None, timings=None): """ See IDownloadResults for parameters. """ self.file_size = file_size self.servers_used = servers_used self.server_problems = server_problems self.servermap = servermap self.timings = timings class FakeDownloadStatus(DownloadStatus): def __init__(self, storage_index = None, file_size = 0, servers_used = None, server_problems = None, servermap = None, timings = None): """ See IDownloadStatus and IDownloadResults for parameters. """ super(FakeDownloadStatus, self).__init__(storage_index, file_size) self.servers_used = servers_used self.server_problems = server_problems self.servermap = servermap self.timings = timings def get_results(self): return FakeDownloadResults(self.size, self.servers_used, self.server_problems, self.servermap, self.timings) class DownloadStatusElementTests(TrialTestCase): """ Tests for ```allmydata.web.status.DownloadStatusElement```. """ def _render_download_status_element(self, status): """ :param IDownloadStatus status: :return: HTML string rendered by DownloadStatusElement """ elem = DownloadStatusElement(status) d = flattenString(None, elem) return self.successResultOf(d) def test_download_status_element(self): """ See if we can render the page almost fully. """ status = FakeDownloadStatus( b"si-1", 123, [b"s-1", b"s-2", b"s-3"], {b"s-1": "unknown problem"}, {b"s-1": [1], b"s-2": [1,2], b"s-3": [2,3]}, {"fetch_per_server": {b"s-1": [1], b"s-2": [2,3], b"s-3": [3,2]}} ) result = self._render_download_status_element(status) soup = BeautifulSoup(result, 'html5lib') assert_soup_has_favicon(self, soup) assert_soup_has_tag_with_content( self, soup, u"title", u"Tahoe-LAFS - File Download Status" ) assert_soup_has_tag_with_content( self, soup, u"li", u"File Size: 123 bytes" ) assert_soup_has_tag_with_content( self, soup, u"li", u"Progress: 0.0%" ) assert_soup_has_tag_with_content( self, soup, u"li", u"Servers Used: [omwtc], [omwte], [omwtg]" ) assert_soup_has_tag_with_content( self, soup, u"li", u"Server Problems:" ) assert_soup_has_tag_with_content( self, soup, u"li", u"[omwtc]: unknown problem" ) assert_soup_has_tag_with_content(self, soup, u"li", u"Servermap:") assert_soup_has_tag_with_content( self, soup, u"li", u"[omwtc] has share: #1" ) assert_soup_has_tag_with_content( self, soup, u"li", u"[omwte] has shares: #1,#2" ) assert_soup_has_tag_with_content( self, soup, u"li", u"[omwtg] has shares: #2,#3" ) assert_soup_has_tag_with_content( self, soup, u"li", u"Per-Server Segment Fetch Response Times:" ) assert_soup_has_tag_with_content( self, soup, u"li", u"[omwtc]: 1.00s" ) assert_soup_has_tag_with_content( self, soup, u"li", u"[omwte]: 2.00s, 3.00s" ) assert_soup_has_tag_with_content( self, soup, u"li", u"[omwtg]: 3.00s, 2.00s" ) def test_download_status_element_partial(self): """ See if we can render the page with incomplete download status. """ status = FakeDownloadStatus() result = self._render_download_status_element(status) soup = BeautifulSoup(result, 'html5lib') assert_soup_has_tag_with_content( self, soup, u"li", u"Servermap: None" ) assert_soup_has_tag_with_content( self, soup, u"li", u"File Size: 0 bytes" ) assert_soup_has_tag_with_content( self, soup, u"li", u"Total: None (None)" ) tahoe_lafs-1.20.0/src/allmydata/test/web/test_util.py0000644000000000000000000001070613615410400017504 0ustar00""" Ported to Python 3. """ from twisted.trial import unittest from allmydata.web import status, common from allmydata.dirnode import ONLY_FILES from ..common import ShouldFailMixin from .. import common_util as testutil class Util(ShouldFailMixin, testutil.ReallyEqualMixin, unittest.TestCase): def test_parse_replace_arg(self): self.failUnlessReallyEqual(common.parse_replace_arg(b"true"), True) self.failUnlessReallyEqual(common.parse_replace_arg(b"false"), False) self.failUnlessReallyEqual(common.parse_replace_arg(b"only-files"), ONLY_FILES) self.failUnlessRaises(common.WebError, common.parse_replace_arg, b"only_fles") def test_abbreviate_time(self): self.failUnlessReallyEqual(common.abbreviate_time(None), "") self.failUnlessReallyEqual(common.abbreviate_time(1.234), "1.23s") self.failUnlessReallyEqual(common.abbreviate_time(0.123), "123ms") self.failUnlessReallyEqual(common.abbreviate_time(0.00123), "1.2ms") self.failUnlessReallyEqual(common.abbreviate_time(0.000123), "123us") self.failUnlessReallyEqual(common.abbreviate_time(-123000), "-123000000000us") self.failUnlessReallyEqual(common.abbreviate_time(2.5), "2.50s") self.failUnlessReallyEqual(common.abbreviate_time(0.25), "250ms") self.failUnlessReallyEqual(common.abbreviate_time(0.0021), "2.1ms") self.failUnlessReallyEqual(common.abbreviate_time(None), "") self.failUnlessReallyEqual(common.abbreviate_time(2.5), "2.50s") self.failUnlessReallyEqual(common.abbreviate_time(0.25), "250ms") self.failUnlessReallyEqual(common.abbreviate_time(0.0021), "2.1ms") self.failUnlessReallyEqual(common.abbreviate_time(0.000123), "123us") self.failUnlessReallyEqual(common.abbreviate_rate(None), "") self.failUnlessReallyEqual(common.abbreviate_rate(2500000), "2.50MBps") self.failUnlessReallyEqual(common.abbreviate_rate(30100), "30.1kBps") self.failUnlessReallyEqual(common.abbreviate_rate(123), "123Bps") def test_compute_rate(self): self.failUnlessReallyEqual(common.compute_rate(None, None), None) self.failUnlessReallyEqual(common.compute_rate(None, 1), None) self.failUnlessReallyEqual(common.compute_rate(250000, None), None) self.failUnlessReallyEqual(common.compute_rate(250000, 0), None) self.failUnlessReallyEqual(common.compute_rate(250000, 10), 25000.0) self.failUnlessReallyEqual(common.compute_rate(0, 10), 0.0) self.shouldFail(AssertionError, "test_compute_rate", "", common.compute_rate, -100, 10) self.shouldFail(AssertionError, "test_compute_rate", "", common.compute_rate, 100, -10) # Sanity check rate = common.compute_rate(10*1000*1000, 1) self.failUnlessReallyEqual(common.abbreviate_rate(rate), "10.00MBps") def test_abbreviate_rate(self): self.failUnlessReallyEqual(common.abbreviate_rate(None), "") self.failUnlessReallyEqual(common.abbreviate_rate(1234000), "1.23MBps") self.failUnlessReallyEqual(common.abbreviate_rate(12340), "12.3kBps") self.failUnlessReallyEqual(common.abbreviate_rate(123), "123Bps") self.failUnlessReallyEqual(common.abbreviate_rate(2500000), "2.50MBps") self.failUnlessReallyEqual(common.abbreviate_rate(30100), "30.1kBps") self.failUnlessReallyEqual(common.abbreviate_rate(123), "123Bps") def test_abbreviate_size(self): self.failUnlessReallyEqual(common.abbreviate_size(None), "") self.failUnlessReallyEqual(common.abbreviate_size(1.23*1000*1000*1000), "1.23GB") self.failUnlessReallyEqual(common.abbreviate_size(1.23*1000*1000), "1.23MB") self.failUnlessReallyEqual(common.abbreviate_size(1230), "1.2kB") self.failUnlessReallyEqual(common.abbreviate_size(123), "123B") def test_plural(self): def convert(s): return "%d second%s" % (s, status.plural(s)) self.failUnlessReallyEqual(convert(0), "0 seconds") self.failUnlessReallyEqual(convert(1), "1 second") self.failUnlessReallyEqual(convert(2), "2 seconds") def convert2(s): return "has share%s: %s" % (status.plural(s), ",".join(s)) self.failUnlessReallyEqual(convert2([]), "has shares: ") self.failUnlessReallyEqual(convert2(["1"]), "has share: 1") self.failUnlessReallyEqual(convert2(["1","2"]), "has shares: 1,2") tahoe_lafs-1.20.0/src/allmydata/test/web/test_web.py0000644000000000000000000066633113615410400017317 0ustar00""" Tests for a bunch of web-related APIs. """ from __future__ import annotations from six import ensure_binary import os.path, re, time import treq from urllib.parse import quote as urlquote, unquote as urlunquote from base64 import urlsafe_b64encode from bs4 import BeautifulSoup from twisted.python.filepath import ( FilePath, ) from twisted.application import service from twisted.internet import defer from twisted.internet.defer import inlineCallbacks, returnValue from twisted.internet.task import Clock from twisted.web import client, error, http from twisted.python import failure, log from allmydata import interfaces, uri, webish from allmydata.storage_client import StorageFarmBroker, StubServer from allmydata.immutable import upload from allmydata.immutable.downloader.status import DownloadStatus from allmydata.dirnode import DirectoryNode from allmydata.nodemaker import NodeMaker from allmydata.web.common import MultiFormatResource from allmydata.util import fileutil, base32, hashutil, jsonbytes as json from allmydata.util.consumer import download_to_data from allmydata.util.encodingutil import to_bytes from ...util.connection_status import ConnectionStatus from ...crypto.rsa import PublicKey, PrivateKey, create_signing_keypair, der_string_from_signing_key from ..common import ( EMPTY_CLIENT_CONFIG, FakeCHKFileNode, FakeMutableFileNode, create_chk_filenode, WebErrorMixin, make_mutable_file_uri, create_mutable_filenode, TrialTestCase, ) from .common import ( assert_soup_has_favicon, assert_soup_has_text, assert_soup_has_tag_with_attributes, assert_soup_has_tag_with_content, assert_soup_has_tag_with_attributes_and_content, unknown_rwcap, unknown_rocap, unknown_immcap, ) from allmydata.interfaces import ( IMutableFileNode, SDMF_VERSION, MDMF_VERSION, FileTooLargeError, MustBeReadonlyError, ) from allmydata.mutable import servermap, publish, retrieve from allmydata.mutable.common import derive_mutable_keys from .. import common_util as testutil from ..common_util import TimezoneMixin from ..common_web import ( do_http, Error, render, ) from ...web.common import ( humanize_exception, ) from allmydata.client import _Client, SecretHolder # create a fake uploader/downloader, and a couple of fake dirnodes, then # create a webserver that works against them class FakeStatsProvider(object): def get_stats(self): stats = {'stats': {}, 'counters': {}} return stats class FakeNodeMaker(NodeMaker): encoding_params = { 'k': 3, 'n': 10, 'happy': 7, 'max_segment_size':128*1024 # 1024=KiB } all_contents: dict[bytes, object] def _create_lit(self, cap): return FakeCHKFileNode(cap, self.all_contents) def _create_immutable(self, cap): return FakeCHKFileNode(cap, self.all_contents) def _create_mutable(self, cap): return FakeMutableFileNode(None, None, self.encoding_params, None, self.all_contents, None).init_from_cap(cap) def create_mutable_file(self, contents=None, version=None, keypair: tuple[PublicKey, PrivateKey] | None=None, ): if contents is None: contents = b"" if version is None: version = SDMF_VERSION n = FakeMutableFileNode(None, None, self.encoding_params, None, self.all_contents, keypair) return n.create(contents, version=version) class FakeUploader(service.Service): name = "uploader" # type: ignore # https://twistedmatrix.com/trac/ticket/10135 helper_furl = None helper_connected = False def upload(self, uploadable, **kw): d = uploadable.get_size() d.addCallback(lambda size: uploadable.read(size)) def _got_data(datav): data = b"".join(datav) n = create_chk_filenode(data, self.all_contents) ur = upload.UploadResults(file_size=len(data), ciphertext_fetched=0, preexisting_shares=0, pushed_shares=10, sharemap={}, servermap={}, timings={}, uri_extension_data={}, uri_extension_hash=b"fake", verifycapstr=b"fakevcap") ur.set_uri(n.get_uri()) return ur d.addCallback(_got_data) return d def get_helper_info(self): return (self.helper_furl, self.helper_connected) def build_one_ds(): ds = DownloadStatus(b"storage_index", 1234) now = time.time() serverA = StubServer(hashutil.tagged_hash(b"foo", b"serverid_a")[:20]) serverB = StubServer(hashutil.tagged_hash(b"foo", b"serverid_b")[:20]) storage_index = hashutil.storage_index_hash(b"SI") e0 = ds.add_segment_request(0, now) e0.activate(now+0.5) e0.deliver(now+1, 0, 100, 0.5) # when, start,len, decodetime e1 = ds.add_segment_request(1, now+2) e1.error(now+3) # two outstanding requests e2 = ds.add_segment_request(2, now+4) e3 = ds.add_segment_request(3, now+5) del e2,e3 # hush pyflakes # simulate a segment which gets delivered faster than a system clock tick (ticket #1166) e = ds.add_segment_request(4, now) e.activate(now) e.deliver(now, 0, 140, 0.5) e = ds.add_dyhb_request(serverA, now) e.finished([1,2], now+1) e = ds.add_dyhb_request(serverB, now+2) # left unfinished e = ds.add_read_event(0, 120, now) e.update(60, 0.5, 0.1) # bytes, decrypttime, pausetime e.finished(now+1) e = ds.add_read_event(120, 30, now+2) # left unfinished e = ds.add_block_request(serverA, 1, 100, 20, now) e.finished(20, now+1) e = ds.add_block_request(serverB, 1, 120, 30, now+1) # left unfinished # make sure that add_read_event() can come first too ds1 = DownloadStatus(storage_index, 1234) e = ds1.add_read_event(0, 120, now) e.update(60, 0.5, 0.1) # bytes, decrypttime, pausetime e.finished(now+1) return ds class FakeHistory(object): _all_upload_status = [upload.UploadStatus()] _all_download_status = [build_one_ds()] _all_mapupdate_statuses = [servermap.UpdateStatus()] _all_publish_statuses = [publish.PublishStatus()] _all_retrieve_statuses = [retrieve.RetrieveStatus()] def list_all_upload_statuses(self): return self._all_upload_status def list_all_download_statuses(self): return self._all_download_status def list_all_mapupdate_statuses(self): return self._all_mapupdate_statuses def list_all_publish_statuses(self): return self._all_publish_statuses def list_all_retrieve_statuses(self): return self._all_retrieve_statuses def list_all_helper_statuses(self): return [] class FakeDisplayableServer(StubServer): # type: ignore # tahoe-lafs/ticket/3573 def __init__(self, serverid, nickname, connected, last_connect_time, last_loss_time, last_rx_time): StubServer.__init__(self, serverid) self.announcement = {"my-version": "tahoe-lafs-fake", "service-name": "storage", "nickname": nickname} self.connected = connected self.last_loss_time = last_loss_time self.last_rx_time = last_rx_time self.last_connect_time = last_connect_time def on_status_changed(self, cb): # TODO: try to remove me cb(self) def is_connected(self): # TODO: remove me return self.connected def get_version(self): return { b"application-version": b"1.0" } def get_permutation_seed(self): return b"" def get_announcement(self): return self.announcement def get_nickname(self): return self.announcement["nickname"] def get_available_space(self): return 123456 def get_connection_status(self): return ConnectionStatus(self.connected, "summary", {}, self.last_connect_time, self.last_rx_time) class FakeBucketCounter(object): def get_state(self): return {"last-complete-bucket-count": 0} def get_progress(self): return {"estimated-time-per-cycle": 0, "cycle-in-progress": False, "remaining-wait-time": 0} class FakeLeaseChecker(object): def __init__(self): self.expiration_enabled = False self.mode = "age" self.override_lease_duration = None self.sharetypes_to_expire = {} def get_state(self): return {"history": None} def get_progress(self): return {"estimated-time-per-cycle": 0, "cycle-in-progress": False, "remaining-wait-time": 0} class FakeStorageServer(service.MultiService): name = 'storage' # type: ignore # https://twistedmatrix.com/trac/ticket/10135 def __init__(self, nodeid, nickname): service.MultiService.__init__(self) self.my_nodeid = nodeid self.nickname = nickname self.bucket_counter = FakeBucketCounter() self.lease_checker = FakeLeaseChecker() def get_stats(self): return {"storage_server.accepting_immutable_shares": False} def on_status_changed(self, cb): cb(self) class FakeClient(_Client): # type: ignore # tahoe-lafs/ticket/3573 def __init__(self): # don't upcall to Client.__init__, since we only want to initialize a # minimal subset service.MultiService.__init__(self) self.all_contents = {} self.nodeid = b"fake_nodeid" self.nickname = u"fake_nickname \u263A" self.introducer_furls = [] self.introducer_clients = [] self.stats_provider = FakeStatsProvider() self._secret_holder = SecretHolder(b"lease secret", b"convergence secret") self.helper = None self.convergence = b"some random string" self.storage_broker = StorageFarmBroker( permute_peers=True, tub_maker=None, node_config=EMPTY_CLIENT_CONFIG, ) # fake knowledge of another server self.storage_broker.test_add_server(b"other_nodeid", FakeDisplayableServer( serverid=b"other_nodeid", nickname=u"other_nickname \u263B", connected = True, last_connect_time = 10, last_loss_time = 20, last_rx_time = 30)) self.storage_broker.test_add_server(b"disconnected_nodeid", FakeDisplayableServer( serverid=b"disconnected_nodeid", nickname=u"disconnected_nickname \u263B", connected = False, last_connect_time = None, last_loss_time = 25, last_rx_time = 35)) self.introducer_client = None self.history = FakeHistory() self.uploader = FakeUploader() self.uploader.all_contents = self.all_contents self.uploader.setServiceParent(self) self.blacklist = None self.nodemaker = FakeNodeMaker(None, self._secret_holder, None, self.uploader, None, None, None, None) self.nodemaker.all_contents = self.all_contents self.mutable_file_default = SDMF_VERSION self.addService(FakeStorageServer(self.nodeid, self.nickname)) def get_long_nodeid(self): return b"v0-nodeid" def get_long_tubid(self): return u"tubid" def get_auth_token(self): return b'a fake debug auth token' def startService(self): return service.MultiService.startService(self) def stopService(self): return service.MultiService.stopService(self) MUTABLE_SIZELIMIT = FakeMutableFileNode.MUTABLE_SIZELIMIT class WebMixin(TimezoneMixin): def setUp(self): self.setTimezone('UTC-13:00') self.s = FakeClient() self.s.startService() self.staticdir = self.mktemp() self.clock = Clock() self.fakeTime = 86460 # 1d 0h 1m 0s tempdir = FilePath(self.mktemp()) tempdir.makedirs() self.ws = webish.WebishServer( self.s, "0", webish.anonymous_tempfile_factory(tempdir.path), staticdir=self.staticdir, clock=self.clock, now_fn=lambda:self.fakeTime, ) self.ws.setServiceParent(self.s) self.webish_port = self.ws.getPortnum() self.webish_url = self.ws.getURL() assert self.webish_url.endswith("/") self.webish_url = self.webish_url[:-1] # these tests add their own / l = [ self.s.create_dirnode() for x in range(6) ] d = defer.DeferredList(l) def _then(res): self.public_root = res[0][1] assert interfaces.IDirectoryNode.providedBy(self.public_root), res self.public_url = "/uri/" + str(self.public_root.get_uri(), "ascii") self.private_root = res[1][1] foo = res[2][1] self._foo_node = foo self._foo_uri = foo.get_uri() self._foo_readonly_uri = foo.get_readonly_uri() self._foo_verifycap = foo.get_verify_cap().to_string() # NOTE: we ignore the deferred on all set_uri() calls, because we # know the fake nodes do these synchronously self.public_root.set_uri(u"foo", foo.get_uri(), foo.get_readonly_uri()) self.BAR_CONTENTS, n, self._bar_txt_uri = self.makefile(0) foo.set_uri(u"bar.txt", self._bar_txt_uri, self._bar_txt_uri) self._bar_txt_verifycap = n.get_verify_cap().to_string() # sdmf # XXX: Do we ever use this? self.BAZ_CONTENTS, n, self._baz_txt_uri, self._baz_txt_readonly_uri = self.makefile_mutable(0) foo.set_uri(u"baz.txt", self._baz_txt_uri, self._baz_txt_readonly_uri) # mdmf self.QUUX_CONTENTS, n, self._quux_txt_uri, self._quux_txt_readonly_uri = self.makefile_mutable(0, mdmf=True) assert self._quux_txt_uri.startswith(b"URI:MDMF") foo.set_uri(u"quux.txt", self._quux_txt_uri, self._quux_txt_readonly_uri) foo.set_uri(u"empty", res[3][1].get_uri(), res[3][1].get_readonly_uri()) sub_uri = res[4][1].get_uri() self._sub_uri = sub_uri foo.set_uri(u"sub", sub_uri, sub_uri) sub = self.s.create_node_from_uri(sub_uri) self._sub_node = sub _ign, n, blocking_uri = self.makefile(1) foo.set_uri(u"blockingfile", blocking_uri, blocking_uri) # filenode to test for html encoding issues self._htmlname_unicode = u"<&weirdly'named\"file>>>_