from twisted.web2.http import Response, splitHostPort
from HTTPDownloader import Peer
+from Streams import GrowingFileStream, StreamToFile
from util import uncompact
from Hash import PIECE_SIZE
from apt_p2p_Khashmir.bencode import bdecode
+from apt_p2p_conf import config
-class GrowingFileStream(stream.FileStream):
- """Modified to stream data from a file as it becomes available.
-
- @ivar CHUNK_SIZE: the maximum size of chunks of data to send at a time
- @ivar deferred: waiting for the result of the last read attempt
- @ivar available: the number of bytes that are currently available to read
- @ivar position: the current position in the file where the next read will begin
- @ivar finished: True when no more data will be coming available
- """
-
- CHUNK_SIZE = 4*1024
-
- def __init__(self, f, length = None):
- stream.FileStream.__init__(self, f)
- self.length = length
- self.deferred = None
- self.available = 0L
- self.position = 0L
- self.finished = False
-
- def updateAvailable(self, newlyAvailable):
- """Update the number of bytes that are available.
-
- Call it with 0 to trigger reading of a fully read file.
-
- @param newlyAvailable: the number of bytes that just became available
- """
- assert not self.finished
- self.available += newlyAvailable
-
- # If a read is pending, let it go
- if self.deferred and self.position < self.available:
- # Try to read some data from the file
- length = self.available - self.position
- readSize = min(length, self.CHUNK_SIZE)
- self.f.seek(self.position)
- b = self.f.read(readSize)
- bytesRead = len(b)
-
- # Check if end of file was reached
- if bytesRead:
- self.position += bytesRead
- deferred = self.deferred
- self.deferred = None
- deferred.callback(b)
-
- def allAvailable(self):
- """Indicate that no more data will be coming available."""
- self.finished = True
-
- # If a read is pending, let it go
- if self.deferred:
- if self.position < self.available:
- # Try to read some data from the file
- length = self.available - self.position
- readSize = min(length, self.CHUNK_SIZE)
- self.f.seek(self.position)
- b = self.f.read(readSize)
- bytesRead = len(b)
-
- # Check if end of file was reached
- if bytesRead:
- self.position += bytesRead
- deferred = self.deferred
- self.deferred = None
- deferred.callback(b)
- else:
- # We're done
- deferred = self.deferred
- self.deferred = None
- deferred.callback(None)
- else:
- # We're done
- deferred = self.deferred
- self.deferred = None
- deferred.callback(None)
-
- def read(self, sendfile=False):
- assert not self.deferred, "A previous read is still deferred."
-
- if self.f is None:
- return None
-
- length = self.available - self.position
- readSize = min(length, self.CHUNK_SIZE)
-
- # If we don't have any available, we're done or deferred
- if readSize <= 0:
- if self.finished:
- return None
- else:
- self.deferred = defer.Deferred()
- return self.deferred
-
- # Try to read some data from the file
- self.f.seek(self.position)
- b = self.f.read(readSize)
- bytesRead = len(b)
- if not bytesRead:
- # End of file was reached, we're done or deferred
- if self.finished:
- return None
- else:
- self.deferred = defer.Deferred()
- return self.deferred
- else:
- self.position += bytesRead
- return b
-
-class StreamToFile:
- """Save a stream to a partial file and hash it.
-
- @type stream: L{twisted.web2.stream.IByteStream}
- @ivar stream: the input stream being read
- @type outFile: L{twisted.python.filepath.FilePath}
- @ivar outFile: the file being written
- @type hash: C{sha1}
- @ivar hash: the hash object for the data
- @type position: C{int}
- @ivar position: the current file position to write the next data to
- @type length: C{int}
- @ivar length: the position in the file to not write beyond
- @type doneDefer: L{twisted.internet.defer.Deferred}
- @ivar doneDefer: the deferred that will fire when done writing
- """
-
- def __init__(self, inputStream, outFile, start = 0, length = None):
- """Initializes the file.
-
- @type inputStream: L{twisted.web2.stream.IByteStream}
- @param inputStream: the input stream to read from
- @type outFile: L{twisted.python.filepath.FilePath}
- @param outFile: the file to write to
- @type start: C{int}
- @param start: the file position to start writing at
- (optional, defaults to the start of the file)
- @type length: C{int}
- @param length: the maximum amount of data to write to the file
- (optional, defaults to not limiting the writing to the file
- """
- self.stream = inputStream
- self.outFile = outFile
- self.hash = sha.new()
- self.position = start
- self.length = None
- if length is not None:
- self.length = start + length
- self.doneDefer = None
-
- def run(self):
- """Start the streaming.
-
- @rtype: L{twisted.internet.defer.Deferred}
- """
- log.msg('Started streaming %r bytes to file at position %d' % (self.length, self.position))
- self.doneDefer = stream.readStream(self.stream, self._gotData)
- self.doneDefer.addCallbacks(self._done, self._error)
- return self.doneDefer
-
- def _gotData(self, data):
- """Process the received data."""
- if self.outFile.closed:
- raise Exception, "outFile was unexpectedly closed"
-
- if data is None:
- raise Exception, "Data is None?"
-
- # Make sure we don't go too far
- if self.length is not None and self.position + len(data) > self.length:
- data = data[:(self.length - self.position)]
-
- # Write and hash the streamed data
- self.outFile.seek(self.position)
- self.outFile.write(data)
- self.hash.update(data)
- self.position += len(data)
-
- def _done(self, result):
- """Return the result."""
- log.msg('Streaming is complete')
- return self.hash.digest()
-
- def _error(self, err):
- """Log the error."""
- log.msg('Streaming error')
- log.err(err)
- return err
+class PeerError(Exception):
+ """An error occurred downloading from peers."""
class FileDownload:
"""Manage a download from a list of peers or a mirror.
self.compact_peers = compact_peers
self.path = '/~/' + quote_plus(hash.expected())
+ self.defer = None
+ self.mirror_path = None
self.pieces = None
self.started = False
if max_found == no_pieces:
# The file is not split into pieces
log.msg('No pieces were found for the file')
- self.pieces = []
+ self.pieces = [self.hash.expected()]
self.startDownload()
elif max_found == max(pieces_string.values()):
# Small number of pieces in a string
# del self.peers[site]
# Start the DHT lookup
- lookupDefer = self.manager.dht.getValue(key)
- lookupDefer.addCallback(self._getDHTPieces, key)
+ lookupDefer = self.manager.dht.get(key)
+ lookupDefer.addBoth(self._getDHTPieces, key)
def _getDHTPieces(self, results, key):
"""Check the retrieved values."""
- for result in results:
- # Make sure the hash matches the key
- result_hash = sha.new(result.get('t', '')).digest()
- if result_hash == key:
- pieces = result['t']
- self.pieces = [pieces[x:x+20] for x in xrange(0, len(pieces), 20)]
- log.msg('Retrieved %d piece hashes from the DHT' % len(self.pieces))
- self.startDownload()
- return
+ if isinstance(results, list):
+ for result in results:
+ # Make sure the hash matches the key
+ result_hash = sha.new(result.get('t', '')).digest()
+ if result_hash == key:
+ pieces = result['t']
+ self.pieces = [pieces[x:x+20] for x in xrange(0, len(pieces), 20)]
+ log.msg('Retrieved %d piece hashes from the DHT' % len(self.pieces))
+ self.startDownload()
+ return
+
+ log.msg('Could not retrieve the piece hashes from the DHT')
+ else:
+ log.msg('Looking up piece hashes in the DHT resulted in an error: %r' % (result, ))
# Continue without the piece hashes
- log.msg('Could not retrieve the piece hashes from the DHT')
- self.pieces = []
+ self.pieces = [None for x in xrange(0, self.hash.expSize, PIECE_SIZE)]
self.startDownload()
def getPeerPieces(self, key, failedSite = None):
if self.pieces is None:
# Send a request to one or more peers
- log.msg('Checking for a peer to request piece hashes from')
for site in self.peers:
if self.peers[site].get('failed', False) != True:
log.msg('Sending a piece hash request to %r' % (site, ))
**{'callbackArgs': (key, site),
'errbackArgs': (key, site)})
self.outstanding += 1
- if self.outstanding >= 3:
+ if self.outstanding >= 4:
break
- log.msg('Done sending piece hash requests for now, %d outstanding' % self.outstanding)
if self.pieces is None and self.outstanding <= 0:
# Continue without the piece hashes
log.msg('Could not retrieve the piece hashes from the peers')
- self.pieces = []
+ self.pieces = [None for x in xrange(0, self.hash.expSize, PIECE_SIZE)]
self.startDownload()
def _getPeerPieces(self, response, key, site):
log.msg('Got a piece hash response %d from %r' % (response.code, site))
if response.code != 200:
# Request failed, try a different peer
- log.msg('Did not like response %d from %r' % (response.code, site))
self.getPeerPieces(key, site)
else:
# Read the response stream to a string
"""Sort the peers by their rank (highest ranked at the end)."""
def sort(a, b):
"""Sort peers by their rank."""
- if a.rank > b.rank:
+ if self.peers[a]['peer'].rank > self.peers[b]['peer'].rank:
return 1
- elif a.rank < b.rank:
+ elif self.peers[a]['peer'].rank < self.peers[b]['peer'].rank:
return -1
return 0
- self.peerlist.sort(sort)
+ self.sitelist.sort(sort)
def startDownload(self):
"""Start the download from the peers."""
log.msg('Starting to download %s' % self.path)
self.started = True
- assert self.pieces is not None, "You must initialize the piece hashes first"
- self.peerlist = [self.peers[site]['peer'] for site in self.peers]
+ assert self.pieces, "You must initialize the piece hashes first"
+
+ self.sitelist = self.peers.keys()
# Special case if there's only one good peer left
- if len(self.peerlist) == 1:
- log.msg('Downloading from peer %r' % (self.peerlist[0], ))
- self.defer.callback(self.peerlist[0].get(self.path))
- return
+# if len(self.sitelist) == 1:
+# log.msg('Downloading from peer %r' % (self.peers[self.sitelist[0]]['peer'], ))
+# self.defer.callback(self.peers[self.sitelist[0]]['peer'].get(self.path))
+# return
- # Start sending the return file
- self.stream = GrowingFileStream(self.file, self.hash.expSize)
- resp = Response(200, {}, self.stream)
- self.defer.callback(resp)
-
# Begin to download the pieces
self.outstanding = 0
self.nextFinish = 0
- if self.pieces:
- self.completePieces = [False for piece in self.pieces]
- else:
- self.completePieces = [False]
+ self.completePieces = [False for piece in self.pieces]
+ self.addedMirror = False
+ self.addMirror()
self.getPieces()
+
+ def addMirror(self):
+ """Use the mirror if there are few peers."""
+ if not self.addedMirror and len(self.sitelist) + self.outstanding < config.getint('DEFAULT', 'MIN_DOWNLOAD_PEERS'):
+ self.addedMirror = True
+ parsed = urlparse(self.mirror)
+ if parsed[0] == "http":
+ site = splitHostPort(parsed[0], parsed[1])
+ self.mirror_path = urlunparse(('', '') + parsed[2:])
+ peer = self.manager.getPeer(site, mirror = True)
+ self.peers[site] = {}
+ self.peers[site]['peer'] = peer
+ self.sitelist.append(site)
#{ Downloading the pieces
def getPieces(self):
"""Download the next pieces from the peers."""
- log.msg('Checking for more piece requests to send')
+ if self.file.closed:
+ log.msg('Download has been aborted for %s' % self.path)
+ self.stream.allAvailable(remove = True)
+ return
+
self.sort()
piece = self.nextFinish
- while self.outstanding < 4 and self.peerlist and piece < len(self.completePieces):
- log.msg('Checking piece %d' % piece)
+ while self.outstanding < 4 and self.sitelist and piece < len(self.completePieces):
if self.completePieces[piece] == False:
# Send a request to the highest ranked peer
- peer = self.peerlist.pop()
- self.completePieces[piece] = peer
- log.msg('Sending a request for piece %d to peer %r' % (piece, peer))
+ site = self.sitelist.pop()
+ self.completePieces[piece] = site
+ log.msg('Sending a request for piece %d to peer %r' % (piece, self.peers[site]['peer']))
self.outstanding += 1
- if self.pieces:
- df = peer.getRange(self.path, piece*PIECE_SIZE, (piece+1)*PIECE_SIZE - 1)
+ path = self.path
+ if self.peers[site]['peer'].mirror:
+ path = self.mirror_path
+ if len(self.completePieces) > 1:
+ df = self.peers[site]['peer'].getRange(path, piece*PIECE_SIZE, (piece+1)*PIECE_SIZE - 1)
else:
- df = peer.get(self.path)
+ df = self.peers[site]['peer'].get(path)
reactor.callLater(0, df.addCallbacks,
*(self._getPiece, self._getError),
- **{'callbackArgs': (piece, peer),
- 'errbackArgs': (piece, peer)})
+ **{'callbackArgs': (piece, site),
+ 'errbackArgs': (piece, site)})
piece += 1
- log.msg('Finished checking pieces, %d outstanding, next piece %d of %d' % (self.outstanding, self.nextFinish, len(self.completePieces)))
# Check if we're done
if self.outstanding <= 0 and self.nextFinish >= len(self.completePieces):
- log.msg('We seem to be done with all pieces')
- self.stream.allAvailable()
+ log.msg('Download is complete for %s' % self.path)
+ self.stream.allAvailable(remove = True)
+
+ # Check if we ran out of peers
+ if self.outstanding <= 0 and not self.sitelist and False in self.completePieces:
+ log.msg("Download failed, no peers left to try.")
+ if self.defer:
+ # Send a return error
+ df = self.defer
+ self.defer = None
+ resp = Response(500, {}, None)
+ df.callback(resp)
+ else:
+ # Already streaming the response, try and abort
+ self.stream.allAvailable(remove = True)
- def _getPiece(self, response, piece, peer):
+ def _getPiece(self, response, piece, site):
"""Process the retrieved headers from the peer."""
- log.msg('Got response for piece %d from peer %r' % (piece, peer))
- if ((len(self.completePieces) > 1 and response.code != 206) or
+ if response.code == 404:
+ # Peer no longer has this file, move on
+ log.msg('Peer sharing piece %d no longer has it: %r' % (piece, self.peers[site]['peer']))
+ self.completePieces[piece] = False
+ if response.stream and response.stream.length:
+ stream.readAndDiscard(response.stream)
+
+ # Don't add the site back, just move on
+ site = None
+ elif ((len(self.completePieces) > 1 and response.code != 206) or
(response.code not in (200, 206))):
# Request failed, try a different peer
- log.msg('Wrong response type %d for piece %d from peer %r' % (response.code, piece, peer))
- peer.hashError('Peer responded with the wrong type of download: %r' % response.code)
+ log.msg('Wrong response type %d for piece %d from peer %r' % (response.code, piece, self.peers[site]['peer']))
+ self.peers[site]['peer'].hashError('Peer responded with the wrong type of download: %r' % response.code)
self.completePieces[piece] = False
+ self.peers[site]['errors'] = self.peers[site].get('errors', 0) + 1
if response.stream and response.stream.length:
stream.readAndDiscard(response.stream)
+
+ # After 3 errors in a row, drop the peer
+ if self.peers[site]['errors'] >= 3:
+ site = None
else:
+ if self.defer:
+ # Start sending the return file
+ df = self.defer
+ self.defer = None
+ self.stream = GrowingFileStream(self.file, self.hash.expSize)
+
+ # Get the headers from the peer's response
+ headers = {}
+ if response.headers.hasHeader('last-modified'):
+ headers['last-modified'] = response.headers.getHeader('last-modified')
+ resp = Response(200, headers, self.stream)
+ df.callback(resp)
+
# Read the response stream to the file
- log.msg('Streaming piece %d from peer %r' % (piece, peer))
+ log.msg('Streaming piece %d from peer %r' % (piece, self.peers[site]['peer']))
if response.code == 206:
- df = StreamToFile(response.stream, self.file, piece*PIECE_SIZE, PIECE_SIZE).run()
+ df = StreamToFile(self.hash.newPieceHasher(), response.stream,
+ self.file, piece*PIECE_SIZE, PIECE_SIZE).run()
else:
- df = StreamToFile(response.stream, self.file).run()
- df.addCallbacks(self._gotPiece, self._gotError,
- callbackArgs=(piece, peer), errbackArgs=(piece, peer))
+ df = StreamToFile(self.hash.newHasher(), response.stream,
+ self.file).run()
+ reactor.callLater(0, df.addCallbacks,
+ *(self._gotPiece, self._gotError),
+ **{'callbackArgs': (piece, site),
+ 'errbackArgs': (piece, site)})
self.outstanding -= 1
- self.peerlist.append(peer)
+ if site:
+ self.sitelist.append(site)
+ else:
+ self.addMirror()
self.getPieces()
- def _getError(self, err, piece, peer):
+ def _getError(self, err, piece, site):
"""Peer failed, try again."""
- log.msg('Got error for piece %d from peer %r' % (piece, peer))
+ log.msg('Got error for piece %d from peer %r' % (piece, self.peers[site]['peer']))
self.outstanding -= 1
- self.peerlist.append(peer)
+ self.peers[site]['errors'] = self.peers[site].get('errors', 0) + 1
+ if self.peers[site]['errors'] < 3:
+ self.sitelist.append(site)
+ else:
+ self.addMirror()
self.completePieces[piece] = False
self.getPieces()
log.err(err)
- def _gotPiece(self, response, piece, peer):
+ def _gotPiece(self, hash, piece, site):
"""Process the retrieved piece from the peer."""
- log.msg('Finished streaming piece %d from peer %r: %r' % (piece, peer, response))
- if ((self.pieces and response != self.pieces[piece]) or
- (len(self.pieces) == 0 and response != self.hash.expected())):
+ if self.pieces[piece] and hash.digest() != self.pieces[piece]:
# Hash doesn't match
- log.msg('Hash error for piece %d from peer %r' % (piece, peer))
- peer.hashError('Piece received from peer does not match expected')
+ log.msg('Hash error for piece %d from peer %r' % (piece, self.peers[site]['peer']))
+ self.peers[site]['peer'].hashError('Piece received from peer does not match expected')
+ self.peers[site]['errors'] = self.peers[site].get('errors', 0) + 1
self.completePieces[piece] = False
- elif self.pieces:
+ else:
# Successfully completed one of several pieces
- log.msg('Finished with piece %d from peer %r' % (piece, peer))
+ log.msg('Finished with piece %d from peer %r' % (piece, self.peers[site]['peer']))
self.completePieces[piece] = True
+ self.peers[site]['errors'] = 0
while (self.nextFinish < len(self.completePieces) and
self.completePieces[self.nextFinish] == True):
self.nextFinish += 1
self.stream.updateAvailable(PIECE_SIZE)
- else:
- # Whole download (only one piece) is complete
- log.msg('Piece %d from peer %r is the last piece' % (piece, peer))
- self.completePieces[piece] = True
- self.nextFinish = 1
- self.stream.updateAvailable(2**30)
self.getPieces()
- def _gotError(self, err, piece, peer):
+ def _gotError(self, err, piece, site):
"""Piece download failed, try again."""
- log.msg('Error streaming piece %d from peer %r: %r' % (piece, peer, response))
+ log.msg('Error streaming piece %d from peer %r: %r' % (piece, self.peers[site]['peer'], err))
log.err(err)
+ self.peers[site]['errors'] = self.peers[site].get('errors', 0) + 1
self.completePieces[piece] = False
self.getPieces()
@type cache_dir: L{twisted.python.filepath.FilePath}
@ivar cache_dir: the directory to use for storing all files
- @type dht: L{interfaces.IDHT}
+ @type dht: L{DHTManager.DHT}
@ivar dht: the DHT instance
+ @type stats: L{stats.StatsLogger}
+ @ivar stats: the statistics logger to record sent data to
@type clients: C{dictionary}
@ivar clients: the available peers that have been previously contacted
"""
- def __init__(self, cache_dir, dht):
+ def __init__(self, cache_dir, dht, stats):
"""Initialize the instance."""
self.cache_dir = cache_dir
self.cache_dir.restat(False)
if not self.cache_dir.exists():
self.cache_dir.makedirs()
self.dht = dht
+ self.stats = stats
self.clients = {}
def get(self, hash, mirror, peers = [], method="GET", modtime=None):
assert parsed[0] == "http", "Only HTTP is supported, not '%s'" % parsed[0]
site = splitHostPort(parsed[0], parsed[1])
path = urlunparse(('', '') + parsed[2:])
- peer = self.getPeer(site)
+ peer = self.getPeer(site, mirror = True)
return peer.get(path, method, modtime)
- elif len(peers) == 1:
- site = uncompact(peers[0]['c'])
- log.msg('Downloading from peer %r' % (site, ))
- path = '/~/' + quote_plus(hash.expected())
- peer = self.getPeer(site)
- return peer.get(path)
+# elif len(peers) == 1:
+# site = uncompact(peers[0]['c'])
+# log.msg('Downloading from peer %r' % (site, ))
+# path = '/~/' + quote_plus(hash.expected())
+# peer = self.getPeer(site)
+# return peer.get(path)
else:
tmpfile = self.cache_dir.child(hash.hexexpected())
return FileDownload(self, hash, mirror, peers, tmpfile).run()
- def getPeer(self, site):
+ def getPeer(self, site, mirror = False):
"""Create a new peer if necessary and return it.
@type site: (C{string}, C{int})
@param site: the IP address and port of the peer
+ @param mirror: whether the peer is actually a mirror
+ (optional, defaults to False)
"""
if site not in self.clients:
- self.clients[site] = Peer(site[0], site[1])
+ self.clients[site] = Peer(site[0], site[1], self.stats)
+ if mirror:
+ self.clients[site].mirror = True
return self.clients[site]
def close(self):
manager = None
pending_calls = []
- def gotResp(self, resp, num, expect):
- self.failUnless(resp.code >= 200 and resp.code < 300, "Got a non-200 response: %r" % resp.code)
- if expect is not None:
- self.failUnless(resp.stream.length == expect, "Length was incorrect, got %r, expected %r" % (resp.stream.length, expect))
- def print_(n):
- pass
- def printdone(n):
- pass
- stream.readStream(resp.stream, print_).addCallback(printdone)
-
- def test_download(self):
- """Tests a normal download."""
- self.manager = PeerManager()
- self.timeout = 10
-
- host = 'www.ietf.org'
- d = self.manager.get('', 'http://' + host + '/rfc/rfc0013.txt')
- d.addCallback(self.gotResp, 1, 1070)
- return d
-
- def test_head(self):
- """Tests a 'HEAD' request."""
- self.manager = PeerManager()
- self.timeout = 10
-
- host = 'www.ietf.org'
- d = self.manager.get('', 'http://' + host + '/rfc/rfc0013.txt', method = "HEAD")
- d.addCallback(self.gotResp, 1, 0)
- return d
-
- def test_multiple_downloads(self):
- """Tests multiple downloads with queueing and connection closing."""
- self.manager = PeerManager()
- self.timeout = 120
- lastDefer = defer.Deferred()
-
- def newRequest(host, path, num, expect, last=False):
- d = self.manager.get('', 'http://' + host + ':' + str(80) + path)
- d.addCallback(self.gotResp, num, expect)
- if last:
- d.addBoth(lastDefer.callback)
-
- newRequest('www.ietf.org', "/rfc/rfc0006.txt", 1, 1776)
- newRequest('www.ietf.org', "/rfc/rfc2362.txt", 2, 159833)
- newRequest('www.google.ca', "/", 3, None)
- self.pending_calls.append(reactor.callLater(1, newRequest, 'www.sfu.ca', '/', 4, None))
- self.pending_calls.append(reactor.callLater(10, newRequest, 'www.ietf.org', '/rfc/rfc0048.txt', 5, 41696))
- self.pending_calls.append(reactor.callLater(30, newRequest, 'www.ietf.org', '/rfc/rfc0022.txt', 6, 4606))
- self.pending_calls.append(reactor.callLater(31, newRequest, 'www.sfu.ca', '/studentcentral/index.html', 7, None))
- self.pending_calls.append(reactor.callLater(32, newRequest, 'www.ietf.org', '/rfc/rfc0014.txt', 8, 27))
- self.pending_calls.append(reactor.callLater(32, newRequest, 'www.ietf.org', '/rfc/rfc0001.txt', 9, 21088))
- self.pending_calls.append(reactor.callLater(62, newRequest, 'www.google.ca', '/intl/en/options/', 0, None, True))
- return lastDefer
-
def tearDown(self):
for p in self.pending_calls:
if p.active():