X-Git-Url: https://git.mxchange.org/?a=blobdiff_plain;f=simgear%2Fio%2FHTTPClient.cxx;h=d5ef1ea5108039ff2dd8e01eea27f0141d69be03;hb=8d93206dd33ed0079af6670a0ecd41a3b203d9a0;hp=2a6d41e2feb92d511553107ce44bbdbe7306fc0c;hpb=a57e969639575643d6961344f4904c04d8415de0;p=simgear.git diff --git a/simgear/io/HTTPClient.cxx b/simgear/io/HTTPClient.cxx index 2a6d41e2..d5ef1ea5 100644 --- a/simgear/io/HTTPClient.cxx +++ b/simgear/io/HTTPClient.cxx @@ -68,7 +68,6 @@ namespace HTTP extern const int DEFAULT_HTTP_PORT = 80; const char* CONTENT_TYPE_URL_ENCODED = "application/x-www-form-urlencoded"; -const unsigned int MAX_INFLIGHT_REQUESTS = 32; class Connection; typedef std::multimap ConnectionDict; @@ -79,7 +78,24 @@ class Client::ClientPrivate public: #if defined(ENABLE_CURL) CURLM* curlMulti; - bool haveActiveRequests; + + void createCurlMulti() + { + curlMulti = curl_multi_init(); + // see https://curl.haxx.se/libcurl/c/CURLMOPT_PIPELINING.html + // we request HTTP 1.1 pipelining + curl_multi_setopt(curlMulti, CURLMOPT_PIPELINING, 1 /* aka CURLPIPE_HTTP1 */); + curl_multi_setopt(curlMulti, CURLMOPT_MAX_TOTAL_CONNECTIONS, (long) maxConnections); + curl_multi_setopt(curlMulti, CURLMOPT_MAX_PIPELINE_LENGTH, + (long) maxPipelineDepth); + curl_multi_setopt(curlMulti, CURLMOPT_MAX_HOST_CONNECTIONS, + (long) maxHostConnections); + + + } + + typedef std::map RequestCurlMap; + RequestCurlMap requests; #else NetChannelPoller poller; // connections by host (potentially more than one) @@ -91,11 +107,11 @@ public: int proxyPort; std::string proxyAuth; unsigned int maxConnections; + unsigned int maxHostConnections; + unsigned int maxPipelineDepth; RequestList pendingRequests; - - SGTimeStamp timeTransferSample; unsigned int bytesTransferred; unsigned int lastTransferRate; @@ -106,10 +122,12 @@ public: class Connection : public NetChat { public: - Connection(Client* pr) : + Connection(Client* pr, const std::string& conId) : client(pr), state(STATE_CLOSED), - port(DEFAULT_HTTP_PORT) + port(DEFAULT_HTTP_PORT), + _connectionId(conId), + _maxPipelineLength(255) { } @@ -126,7 +144,7 @@ public: // force the state to GETTING_BODY, to simplify logic in // responseComplete and handleClose - state = STATE_GETTING_BODY; + setState(STATE_GETTING_BODY); responseComplete(); } @@ -136,10 +154,20 @@ public: port = p; } + void setMaxPipelineLength(unsigned int m) + { + _maxPipelineLength = m; + } + // socket-level errors virtual void handleError(int error) { const char* errStr = strerror(error); + SG_LOG(SG_IO, SG_WARN, _connectionId << " handleError:" << error << " (" + << errStr << ")"); + + debugDumpRequests(); + if (!activeRequest) { // connection level failure, eg name lookup or routing @@ -165,7 +193,12 @@ public: _contentDecoder.reset(); } - state = STATE_SOCKET_ERROR; + setState(STATE_SOCKET_ERROR); + } + + void handleTimeout() + { + handleError(ETIMEDOUT); } virtual void handleClose() @@ -174,13 +207,33 @@ public: // closing of the connection from the server side when getting the body, bool canCloseState = (state == STATE_GETTING_BODY); + bool isCancelling = (state == STATE_CANCELLING); + if (canCloseState && activeRequest) { + // check bodyTransferSize matches how much we actually transferred + if (bodyTransferSize > 0) { + if (_contentDecoder.getTotalReceivedBytes() != bodyTransferSize) { + SG_LOG(SG_IO, SG_WARN, _connectionId << " saw connection close while still receiving bytes for:" << activeRequest->url() + << "\n\thave:" << _contentDecoder.getTotalReceivedBytes() << " of " << bodyTransferSize); + } + } + // force state here, so responseComplete can avoid closing the // socket again - state = STATE_CLOSED; + SG_LOG(SG_IO, SG_DEBUG, _connectionId << " saw connection close after getting:" << activeRequest->url()); + setState(STATE_CLOSED); responseComplete(); } else { - if (activeRequest) { + if (state == STATE_WAITING_FOR_RESPONSE) { + SG_LOG(SG_IO, SG_DEBUG, _connectionId << ":close while waiting for response, front request is:" + << sentRequests.front()->url()); + assert(!sentRequests.empty()); + sentRequests.front()->setFailure(500, "server closed connection unexpectedly"); + // no active request, but don't restore the front sent one + sentRequests.erase(sentRequests.begin()); + } + + if (activeRequest && !isCancelling) { activeRequest->setFailure(500, "server closed connection"); // remove the failed request from sentRequests, so it does // not get restored @@ -193,7 +246,7 @@ public: _contentDecoder.reset(); } - state = STATE_CLOSED; + setState(STATE_CLOSED); } if (sentRequests.empty()) { @@ -207,25 +260,45 @@ public: sentRequests.clear(); } - void handleTimeout() - { - NetChat::handleError(ETIMEDOUT); - if (activeRequest) { - SG_LOG(SG_IO, SG_DEBUG, "HTTP socket timeout"); - activeRequest->setFailure(ETIMEDOUT, "socket timeout"); - activeRequest = NULL; - _contentDecoder.reset(); - } - - state = STATE_SOCKET_ERROR; - } - void queueRequest(const Request_ptr& r) { queuedRequests.push_back(r); tryStartNextRequest(); } + void cancelRequest(const Request_ptr& r) + { + RequestList::iterator it = std::find(sentRequests.begin(), + sentRequests.end(), r); + if (it != sentRequests.end()) { + sentRequests.erase(it); + + if ((r == activeRequest) || !activeRequest) { + // either the cancelling request is active, or we're in waiting + // for response state - close now + setState(STATE_CANCELLING); + close(); + + setState(STATE_CLOSED); + activeRequest = NULL; + _contentDecoder.reset(); + } else if (activeRequest) { + SG_LOG(SG_IO, SG_INFO, "con:" << _connectionId << " cancelling non-active: " << r->url()); + + // has been sent but not active, let the active finish and + // then close. Otherwise cancelling request #2 would mess up + // active transfer #1 + activeRequest->setCloseAfterComplete(); + } + } // of request has been sent + + // simpler case, not sent yet just remove from the queue + it = std::find(queuedRequests.begin(), queuedRequests.end(), r); + if (it != queuedRequests.end()) { + queuedRequests.erase(it); + } + } + void beginResponse() { assert(!sentRequests.empty()); @@ -233,13 +306,14 @@ public: activeRequest = sentRequests.front(); try { + SG_LOG(SG_IO, SG_DEBUG, "con:" << _connectionId << " saw start of response for " << activeRequest->url()); activeRequest->responseStart(buffer); } catch (sg_exception& e) { handleError(EIO); return; } - state = STATE_GETTING_HEADERS; + setState(STATE_GETTING_HEADERS); buffer.clear(); if (activeRequest->responseCode() == 204) { noMessageBody = true; @@ -265,18 +339,19 @@ public: return; } - if (sentRequests.size() > MAX_INFLIGHT_REQUESTS) { + if (sentRequests.size() >= _maxPipelineLength) { return; } if (state == STATE_CLOSED) { if (!connectToHost()) { - + setState(STATE_SOCKET_ERROR); return; } + SG_LOG(SG_IO, SG_DEBUG, "connection " << _connectionId << " connected."); setTerminator("\r\n"); - state = STATE_IDLE; + setState(STATE_IDLE); } Request_ptr r = queuedRequests.front(); @@ -356,13 +431,13 @@ public: } } - // SG_LOG(SG_IO, SG_INFO, "did start request:" << r->url() << - // "\n\t @ " << reinterpret_cast(r.ptr()) << - // "\n\t on connection " << this); + SG_LOG(SG_IO, SG_DEBUG, "con:" << _connectionId << " did send request:" << r->url()); // successfully sent, remove from queue, and maybe send the next queuedRequests.pop_front(); sentRequests.push_back(r); - state = STATE_WAITING_FOR_RESPONSE; + if (state == STATE_IDLE) { + setState(STATE_WAITING_FOR_RESPONSE); + } // pipelining, let's maybe send the next request right away tryStartNextRequest(); @@ -403,7 +478,7 @@ public: case STATE_GETTING_CHUNKED_BYTES: setTerminator("\r\n"); - state = STATE_GETTING_CHUNKED; + setState(STATE_GETTING_CHUNKED); buffer.clear(); break; @@ -423,21 +498,23 @@ public: bool hasIdleTimeout() const { - if (state != STATE_IDLE) { + if ((state != STATE_IDLE) && (state != STATE_CLOSED)) { return false; } assert(sentRequests.empty()); - return idleTime.elapsedMSec() > 1000 * 10; // ten seconds + bool isTimedOut = (idleTime.elapsedMSec() > (1000 * 10)); // 10 seconds + return isTimedOut; } bool hasErrorTimeout() const { - if (state == STATE_IDLE) { + if ((state == STATE_IDLE) || (state == STATE_CLOSED)) { return false; } - return idleTime.elapsedMSec() > (1000 * 30); // 30 seconds + bool isTimedOut = (idleTime.elapsedMSec() > (1000 * 30)); // 30 seconds + return isTimedOut; } bool hasError() const @@ -447,24 +524,71 @@ public: bool shouldStartNext() const { - return !queuedRequests.empty() && (sentRequests.size() < MAX_INFLIGHT_REQUESTS); + return !queuedRequests.empty() && (sentRequests.size() < _maxPipelineLength); } bool isActive() const { return !queuedRequests.empty() || !sentRequests.empty(); } + + std::string connectionId() const + { + return _connectionId; + } + + void debugDumpRequests() const + { + SG_LOG(SG_IO, SG_DEBUG, "requests for:" << host << ":" << port << " (conId=" << _connectionId + << "; state=" << state << ")"); + if (activeRequest) { + SG_LOG(SG_IO, SG_DEBUG, "\tactive:" << activeRequest->url()); + } else { + SG_LOG(SG_IO, SG_DEBUG, "\tNo active request"); + } + + BOOST_FOREACH(Request_ptr req, sentRequests) { + SG_LOG(SG_IO, SG_DEBUG, "\tsent:" << req->url()); + } + + BOOST_FOREACH(Request_ptr req, queuedRequests) { + SG_LOG(SG_IO, SG_DEBUG, "\tqueued:" << req->url()); + } + } private: + enum ConnectionState { + STATE_IDLE = 0, + STATE_WAITING_FOR_RESPONSE, + STATE_GETTING_HEADERS, + STATE_GETTING_BODY, + STATE_GETTING_CHUNKED, + STATE_GETTING_CHUNKED_BYTES, + STATE_GETTING_TRAILER, + STATE_SOCKET_ERROR, + STATE_CANCELLING, ///< cancelling an acitve request + STATE_CLOSED ///< connection should be closed now + }; + + void setState(ConnectionState newState) + { + if (state == newState) { + return; + } + + state = newState; + } + bool connectToHost() { SG_LOG(SG_IO, SG_DEBUG, "HTTP connecting to " << host << ":" << port); if (!open()) { - SG_LOG(SG_ALL, SG_WARN, "HTTP::Connection: connectToHost: open() failed"); + SG_LOG(SG_IO, SG_WARN, "HTTP::Connection: connectToHost: open() failed"); return false; } if (connect(host.c_str(), port) != 0) { + SG_LOG(SG_IO, SG_WARN, "HTTP::Connection: connectToHost: connect() failed"); return false; } @@ -540,11 +664,11 @@ private: buffer.clear(); if (chunkSize == 0) { // trailer start - state = STATE_GETTING_TRAILER; + setState(STATE_GETTING_TRAILER); return; } - state = STATE_GETTING_CHUNKED_BYTES; + setState(STATE_GETTING_CHUNKED_BYTES); setByteCount(chunkSize); } @@ -565,16 +689,21 @@ private: activeRequest->responseHeadersComplete(); _contentDecoder.initWithRequest(activeRequest); + if (!activeRequest->serverSupportsPipelining()) { + SG_LOG(SG_IO, SG_DEBUG, _connectionId << " disabling pipelining since server does not support it"); + _maxPipelineLength = 1; + } + if (chunkedTransfer) { - state = STATE_GETTING_CHUNKED; + setState(STATE_GETTING_CHUNKED); } else if (noMessageBody || (bodyTransferSize == 0)) { // force the state to GETTING_BODY, to simplify logic in // responseComplete and handleClose - state = STATE_GETTING_BODY; + setState(STATE_GETTING_BODY); responseComplete(); } else { setByteCount(bodyTransferSize); // may be -1, that's fine - state = STATE_GETTING_BODY; + setState(STATE_GETTING_BODY); } } @@ -590,6 +719,7 @@ private: if ((state == STATE_GETTING_BODY) || (state == STATE_GETTING_TRAILER)) { if (doClose) { + SG_LOG(SG_IO, SG_DEBUG, _connectionId << " doClose requested"); // this will bring us into handleClose() above, which updates // state to STATE_CLOSED close(); @@ -600,30 +730,17 @@ private: } if (state != STATE_CLOSED) { - state = sentRequests.empty() ? STATE_IDLE : STATE_WAITING_FOR_RESPONSE; + setState(sentRequests.empty() ? STATE_IDLE : STATE_WAITING_FOR_RESPONSE); } // notify request after we change state, so this connection is idle // if completion triggers other requests (which is likely) - // SG_LOG(SG_IO, SG_INFO, "*** responseComplete:" << activeRequest->url()); completedRequest->responseComplete(); client->requestFinished(this); setTerminator("\r\n"); } - enum ConnectionState { - STATE_IDLE = 0, - STATE_WAITING_FOR_RESPONSE, - STATE_GETTING_HEADERS, - STATE_GETTING_BODY, - STATE_GETTING_CHUNKED, - STATE_GETTING_CHUNKED_BYTES, - STATE_GETTING_TRAILER, - STATE_SOCKET_ERROR, - STATE_CLOSED ///< connection should be closed now - }; - Client* client; Request_ptr activeRequest; ConnectionState state; @@ -639,6 +756,8 @@ private: RequestList sentRequests; ContentDecoder _contentDecoder; + std::string _connectionId; + unsigned int _maxPipelineLength; }; #endif // of !ENABLE_CURL @@ -647,11 +766,12 @@ Client::Client() : { d->proxyPort = 0; d->maxConnections = 4; + d->maxHostConnections = 4; d->bytesTransferred = 0; d->lastTransferRate = 0; d->timeTransferSample.stamp(); d->totalBytesDownloaded = 0; - + d->maxPipelineDepth = 5; setUserAgent("SimGear-" SG_STRINGIZE(SIMGEAR_VERSION)); #if defined(ENABLE_CURL) static bool didInitCurlGlobal = false; @@ -660,7 +780,7 @@ Client::Client() : didInitCurlGlobal = true; } - d->curlMulti = curl_multi_init(); + d->createCurlMulti(); #endif } @@ -673,13 +793,30 @@ Client::~Client() void Client::setMaxConnections(unsigned int maxCon) { - if (maxCon < 1) { - throw sg_range_exception("illegal HTTP::Client::setMaxConnections value"); - } - d->maxConnections = maxCon; #if defined(ENABLE_CURL) - curl_multi_setopt(d->curlMulti, CURLMOPT_MAXCONNECTS, (long) maxCon); + curl_multi_setopt(d->curlMulti, CURLMOPT_MAX_TOTAL_CONNECTIONS, (long) maxCon); +#endif +} + +void Client::setMaxHostConnections(unsigned int maxHostCon) +{ + d->maxHostConnections = maxHostCon; +#if defined(ENABLE_CURL) + curl_multi_setopt(d->curlMulti, CURLMOPT_MAX_HOST_CONNECTIONS, (long) maxHostCon); +#endif +} + +void Client::setMaxPipelineDepth(unsigned int depth) +{ + d->maxPipelineDepth = depth; +#if defined(ENABLE_CURL) + curl_multi_setopt(d->curlMulti, CURLMOPT_MAX_PIPELINE_LENGTH, (long) depth); +#else + ConnectionDict::iterator it = d->connections.begin(); + for (; it != d->connections.end(); ) { + it->second->setMaxPipelineLength(depth); + } #endif } @@ -688,36 +825,46 @@ void Client::update(int waitTimeout) #if defined(ENABLE_CURL) int remainingActive, messagesInQueue; curl_multi_perform(d->curlMulti, &remainingActive); - d->haveActiveRequests = (remainingActive > 0); CURLMsg* msg; while ((msg = curl_multi_info_read(d->curlMulti, &messagesInQueue))) { if (msg->msg == CURLMSG_DONE) { - Request* req; + Request* rawReq = 0; CURL *e = msg->easy_handle; - curl_easy_getinfo(e, CURLINFO_PRIVATE, &req); + curl_easy_getinfo(e, CURLINFO_PRIVATE, &rawReq); + + // ensure request stays valid for the moment + // eg if responseComplete cancels us + Request_ptr req(rawReq); long responseCode; curl_easy_getinfo(e, CURLINFO_RESPONSE_CODE, &responseCode); + // remove from the requests map now, + // in case the callbacks perform a cancel. We'll use + // the absence from the request dict in cancel to avoid + // a double remove + ClientPrivate::RequestCurlMap::iterator it = d->requests.find(req); + assert(it != d->requests.end()); + assert(it->second == e); + d->requests.erase(it); + if (msg->data.result == 0) { req->responseComplete(); } else { - fprintf(stderr, "Result: %d - %s\n", - msg->data.result, curl_easy_strerror(msg->data.result)); + SG_LOG(SG_IO, SG_WARN, "CURL Result:" << msg->data.result << " " << curl_easy_strerror(msg->data.result)); req->setFailure(msg->data.result, curl_easy_strerror(msg->data.result)); } curl_multi_remove_handle(d->curlMulti, e); - - // balance the reference we take in makeRequest - SGReferenced::put(req); curl_easy_cleanup(e); - } - else { - SG_LOG(SG_IO, SG_ALERT, "CurlMSG:" << msg->msg); + } else { + // should never happen since CURLMSG_DONE is the only code + // defined! + SG_LOG(SG_IO, SG_ALERT, "unknown CurlMSG:" << msg->msg); } } // of curl message processing loop + SGTimeStamp::sleepForMSec(waitTimeout); #else if (!d->poller.hasChannels() && (waitTimeout > 0)) { SGTimeStamp::sleepForMSec(waitTimeout); @@ -777,12 +924,17 @@ void Client::makeRequest(const Request_ptr& r) return; } + r->_client = this; + #if defined(ENABLE_CURL) + ClientPrivate::RequestCurlMap::iterator rit = d->requests.find(r); + assert(rit == d->requests.end()); + CURL* curlRequest = curl_easy_init(); curl_easy_setopt(curlRequest, CURLOPT_URL, r->url().c_str()); - // manually increase the ref count of the request - SGReferenced::get(r.get()); + d->requests[r] = curlRequest; + curl_easy_setopt(curlRequest, CURLOPT_PRIVATE, r.get()); // disable built-in libCurl progress feedback curl_easy_setopt(curlRequest, CURLOPT_NOPROGRESS, 1); @@ -793,6 +945,7 @@ void Client::makeRequest(const Request_ptr& r) curl_easy_setopt(curlRequest, CURLOPT_HEADERDATA, r.get()); curl_easy_setopt(curlRequest, CURLOPT_USERAGENT, d->userAgent.c_str()); + curl_easy_setopt(curlRequest, CURLOPT_HTTP_VERSION, CURL_HTTP_VERSION_1_1); if (!d->proxy.empty()) { curl_easy_setopt(curlRequest, CURLOPT_PROXY, d->proxy.c_str()); @@ -847,9 +1000,9 @@ void Client::makeRequest(const Request_ptr& r) } curl_multi_add_handle(d->curlMulti, curlRequest); - d->haveActiveRequests = true; -// FIXME - premature? +// this seems premature, but we don't have a callback from Curl we could +// use to trigger when the requst is actually sent. r->requestStart(); #else @@ -898,29 +1051,66 @@ void Client::makeRequest(const Request_ptr& r) } } - if (!con && atConnectionsLimit) { + bool atHostConnectionsLimit = (count >= d->maxHostConnections); + + if (!con && (atConnectionsLimit || atHostConnectionsLimit)) { // all current connections are busy (active), and we don't // have free connections to allocate, so let's assign to // an existing one randomly. Ideally we'd used whichever one will // complete first but we don't have that info. int index = rand() % count; - for (it = d->connections.find(connectionId); index > 0; --index) { ; } + for (it = d->connections.find(connectionId); index > 0; --index, ++it) { ; } con = it->second; } // allocate a new connection object if (!con) { - con = new Connection(this); + static int connectionSuffx = 0; + + std::stringstream ss; + ss << connectionId << "-" << connectionSuffx++; + + SG_LOG(SG_IO, SG_DEBUG, "allocating new connection for ID:" << ss.str()); + con = new Connection(this, ss.str()); con->setServer(host, port); + con->setMaxPipelineLength(d->maxPipelineDepth); d->poller.addChannel(con); d->connections.insert(d->connections.end(), ConnectionDict::value_type(connectionId, con)); } + SG_LOG(SG_IO, SG_DEBUG, "queing request for " << r->url() << " on:" << con->connectionId()); con->queueRequest(r); #endif } +void Client::cancelRequest(const Request_ptr &r, std::string reason) +{ +#if defined(ENABLE_CURL) + ClientPrivate::RequestCurlMap::iterator it = d->requests.find(r); + if(it == d->requests.end()) { + // already being removed, presumably inside ::update() + // nothing more to do + return; + } + + CURLMcode err = curl_multi_remove_handle(d->curlMulti, it->second); + assert(err == CURLM_OK); + + // clear the request pointer form the curl-easy object + curl_easy_setopt(it->second, CURLOPT_PRIVATE, 0); + + curl_easy_cleanup(it->second); + d->requests.erase(it); +#else + ConnectionDict::iterator it = d->connections.begin(); + for (; it != d->connections.end(); ++it) { + (it->second)->cancelRequest(r); + } +#endif + r->setFailure(-1, reason); +} + //------------------------------------------------------------------------------ FileRequestRef Client::save( const std::string& url, const std::string& filename ) @@ -975,7 +1165,7 @@ void Client::setProxy( const std::string& proxy, bool Client::hasActiveRequests() const { #if defined(ENABLE_CURL) - return d->haveActiveRequests; + return !d->requests.empty(); #else ConnectionDict::const_iterator it = d->connections.begin(); for (; it != d->connections.end(); ++it) { @@ -1026,9 +1216,14 @@ uint64_t Client::totalBytesDownloaded() const size_t Client::requestWriteCallback(char *ptr, size_t size, size_t nmemb, void *userdata) { size_t byteSize = size * nmemb; - Request* req = static_cast(userdata); req->processBodyBytes(ptr, byteSize); + + Client* cl = req->http(); + if (cl) { + cl->receivedBytes(byteSize); + } + return byteSize; } @@ -1066,7 +1261,7 @@ size_t Client::requestHeaderCallback(char *rawBuffer, size_t size, size_t nitems return byteSize; // skip headers associated with 100-continue status } - int colonPos = h.find(':'); + size_t colonPos = h.find(':'); if (colonPos == std::string::npos) { SG_LOG(SG_IO, SG_WARN, "malformed HTTP response header:" << h); return byteSize; @@ -1080,6 +1275,39 @@ size_t Client::requestHeaderCallback(char *rawBuffer, size_t size, size_t nitems return byteSize; } +void Client::debugDumpRequests() +{ +#if defined(ENABLE_CURL) + SG_LOG(SG_IO, SG_INFO, "== HTTP request dump"); + ClientPrivate::RequestCurlMap::iterator it = d->requests.begin(); + for (; it != d->requests.end(); ++it) { + SG_LOG(SG_IO, SG_INFO, "\t" << it->first->url()); + } + SG_LOG(SG_IO, SG_INFO, "=="); +#else + SG_LOG(SG_IO, SG_INFO, "== HTTP connection dump"); + ConnectionDict::iterator it = d->connections.begin(); + for (; it != d->connections.end(); ++it) { + it->second->debugDumpRequests(); + } + SG_LOG(SG_IO, SG_INFO, "=="); +#endif +} + +void Client::clearAllConnections() +{ +#if defined(ENABLE_CURL) + curl_multi_cleanup(d->curlMulti); + d->createCurlMulti(); +#else + ConnectionDict::iterator it = d->connections.begin(); + for (; it != d->connections.end(); ++it) { + delete it->second; + } + d->connections.clear(); +#endif +} + } // of namespace HTTP } // of namespace simgear