X-Git-Url: https://git.mxchange.org/?p=quix0rs-apt-p2p.git;a=blobdiff_plain;f=test.py;h=d697b371a4d9aaf529798bfb510b398cd6d50b3d;hp=f66c7fbc544074a805f2f63b6db6e2956f80ad18;hb=d33a19a8a5945ddef5401c74b37797146b3d353a;hpb=357aca42afe56dad70569d82782d6902df5656eb diff --git a/test.py b/test.py index f66c7fb..d697b37 100755 --- a/test.py +++ b/test.py @@ -59,15 +59,15 @@ tests = {'1': ('Start a single bootstrap and downloader, test updating and downl {1: {}, 2: {}}, [(1, ['update']), + (2, ['update']), (1, ['install', 'aboot-base']), + (2, ['install', 'aboot-base']), (1, ['install', 'aap-doc']), (1, ['install', 'ada-reference-manual']), (1, ['install', 'fop-doc']), (1, ['install', 'jswat-doc']), (1, ['install', 'bison-doc']), (1, ['install', 'crash-whitepaper']), - (2, ['update']), - (2, ['install', 'aboot-base']), (2, ['install', 'aap-doc']), (2, ['install', 'ada-reference-manual']), (2, ['install', 'fop-doc']), @@ -332,6 +332,15 @@ CACHE_DIR = %(CACHE_DIR)s # Whether it's OK to use an IP addres from a known local/private range LOCAL_OK = yes +# Unload the packages cache after an interval of inactivity this long. +# The packages cache uses a lot of memory, and only takes a few seconds +# to reload when a new request arrives. +UNLOAD_PACKAGES_CACHE = 5m + +# Refresh the DHT keys after this much time has passed. +# This should be a time slightly less than the DHT's KEY_EXPIRE value. +KEY_REFRESH = 57m + # Which DHT implementation to use. # It must be possile to do "from .DHT import DHT" to get a class that # implements the IDHT interface. @@ -354,7 +363,7 @@ K = 8 HASH_LENGTH = 160 # checkpoint every this many seconds -CHECKPOINT_INTERVAL = 15m +CHECKPOINT_INTERVAL = 5m # concurrent xmlrpc calls per find node/value request! CONCURRENT_REQS = 4 @@ -362,6 +371,14 @@ CONCURRENT_REQS = 4 # how many hosts to post to STORE_REDUNDANCY = 3 +# How many values to attempt to retrieve from the DHT. +# Setting this to 0 will try and get all values (which could take a while if +# a lot of nodes have values). Setting it negative will try to get that +# number of results from only the closest STORE_REDUNDANCY nodes to the hash. +# The default is a large negative number so all values from the closest +# STORE_REDUNDANCY nodes will be retrieved. +RETRIEVE_VALUES = -10000 + # how many times in a row a node can fail to respond before it's booted from the routing table MAX_FAILURES = 3 @@ -371,14 +388,8 @@ MIN_PING_INTERVAL = 15m # refresh buckets that haven't been touched in this long BUCKET_STALENESS = 1h -# time before expirer starts running -KEINITIAL_DELAY = 15s - -# time between expirer runs -KE_DELAY = 20m - # expire entries older than this -KE_AGE = 1h +KEY_EXPIRE = 1h # whether to spew info about the requests/responses in the protocol SPEW = yes @@ -511,7 +522,7 @@ def apt_get(num_down, cmd): """ - print '************************** apt-get (' + str(num_down) + ') ' + ' '.join(cmd) + ' **************************' + print '*************** apt-get (' + str(num_down) + ') ' + ' '.join(cmd) + ' ****************' apt_conf = join([down_dir(num_down), 'etc', 'apt', 'apt.conf']) dpkg_status = join([down_dir(num_down), 'var', 'lib', 'dpkg', 'status']) args = ['-d', '-c', apt_conf, '-o', 'Dir::state::status='+dpkg_status] + cmd @@ -735,9 +746,9 @@ def run_test(bootstraps, downloaders, apt_get_queue): apt_get_results.append((elapsed, r_value)) if r_value == 0: - print '********************** apt-get completed successfully in ' + str(elapsed) + ' sec. **************************' + print '********** apt-get completed successfully in ' + str(elapsed) + ' sec. *****************' else: - print '********************** apt-get finished with status ' + str(r_value) + ' in ' + str(elapsed) + ' sec. **************************' + print '********** apt-get finished with status ' + str(r_value) + ' in ' + str(elapsed) + ' sec. ************' sleep(5)