From 626752beab7c12a355ab707d70aba6f4fe096c10 Mon Sep 17 00:00:00 2001 From: Monty Brandenberg Date: Wed, 19 Jun 2013 13:55:54 -0400 Subject: SH-4252 Add second policy class for large mesh asset downloads Added second mesh class as well as an asset upload class. Refactored initialization to use less code and more data to cleanly get http started. Modified mesh to use the new http class for large requests (>2MB for now). Added additional timeout setting to llcorehttp to distinguish connection timeout from transport timeout and are now using transport timeout values for large asset downloads that may need more time. --- indra/llcorehttp/_httppolicy.cpp | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'indra/llcorehttp/_httppolicy.cpp') diff --git a/indra/llcorehttp/_httppolicy.cpp b/indra/llcorehttp/_httppolicy.cpp index 76c1e22431..54c9c6bb1b 100755 --- a/indra/llcorehttp/_httppolicy.cpp +++ b/indra/llcorehttp/_httppolicy.cpp @@ -140,6 +140,7 @@ void HttpPolicy::addOp(HttpOpRequest * op) const int policy_class(op->mReqPolicy); op->mPolicyRetries = 0; + op->mPolicy503Retries = 0; mState[policy_class].mReadyQueue.push(op); } @@ -155,6 +156,7 @@ void HttpPolicy::retryOp(HttpOpRequest * op) 5000000 // ... to every 5.0 S. }; static const int delta_max(int(LL_ARRAY_SIZE(retry_deltas)) - 1); + static const HttpStatus error_503(503); const HttpTime now(totalTime()); const int policy_class(op->mReqPolicy); @@ -162,6 +164,10 @@ void HttpPolicy::retryOp(HttpOpRequest * op) const HttpTime delta(retry_deltas[llclamp(op->mPolicyRetries, 0, delta_max)]); op->mPolicyRetryAt = now + delta; ++op->mPolicyRetries; + if (error_503 == op->mStatus) + { + ++op->mPolicy503Retries; + } LL_WARNS("CoreHttp") << "HTTP request " << static_cast(op) << " retry " << op->mPolicyRetries << " scheduled for +" << (delta / HttpTime(1000)) -- cgit v1.2.3 From d6cbcd591aea32357d50b266efe8a95754302cbf Mon Sep 17 00:00:00 2001 From: Monty Brandenberg Date: Thu, 20 Jun 2013 19:18:39 -0400 Subject: SH-4257 Preparation for a new cap grant: GetMesh2 Mesh repo is using three policy classes now: one for large objects, one for GetMesh2 regions, one for GetMesh regions. It's also detecting the presence of the cap and using the correct class. Class initialization cleaned up significantly in llappcorehttp using data-directed code. Pulled in the changes to HttpHeader done for sunshine-internal then did a refactoring pass on the header callback which now uses a unified approach to clean up and deliver header information to all interested parties. Added support for using Retry-After header information on 503 retries. --- indra/llcorehttp/_httppolicy.cpp | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) (limited to 'indra/llcorehttp/_httppolicy.cpp') diff --git a/indra/llcorehttp/_httppolicy.cpp b/indra/llcorehttp/_httppolicy.cpp index 54c9c6bb1b..5f303dd0fe 100755 --- a/indra/llcorehttp/_httppolicy.cpp +++ b/indra/llcorehttp/_httppolicy.cpp @@ -160,8 +160,12 @@ void HttpPolicy::retryOp(HttpOpRequest * op) const HttpTime now(totalTime()); const int policy_class(op->mReqPolicy); - - const HttpTime delta(retry_deltas[llclamp(op->mPolicyRetries, 0, delta_max)]); + HttpTime delta(retry_deltas[llclamp(op->mPolicyRetries, 0, delta_max)]); + + if (op->mReplyRetryAfter > 0 && op->mReplyRetryAfter < 30) + { + delta = op->mReplyRetryAfter * U64L(1000000); + } op->mPolicyRetryAt = now + delta; ++op->mPolicyRetries; if (error_503 == op->mStatus) @@ -170,10 +174,10 @@ void HttpPolicy::retryOp(HttpOpRequest * op) } LL_WARNS("CoreHttp") << "HTTP request " << static_cast(op) << " retry " << op->mPolicyRetries - << " scheduled for +" << (delta / HttpTime(1000)) + << " scheduled in " << (delta / HttpTime(1000)) << " mS. Status: " << op->mStatus.toHex() << LL_ENDL; - if (op->mTracing > 0) + if (op->mTracing > HTTP_TRACE_OFF) { LL_INFOS("CoreHttp") << "TRACE, ToRetryQueue, Handle: " << static_cast(op) -- cgit v1.2.3 From eff651cffca60f2b69f6c596a8e9aa9e1ab44d3c Mon Sep 17 00:00:00 2001 From: Monty Brandenberg Date: Fri, 12 Jul 2013 15:00:24 -0400 Subject: SH-4312 Configuration data between viewer and llcorehttp is clumsy. Much improved. Unified the global and class options into a single option list. Implemented static and dynamic setting paths as much as possible. Dynamic path does require packet/RPC but otherwise there's near unification. Dynamic modes can't get values back yet due to the response/notifier scheme but this doesn't bother me. Flatten global and class options into simpler struct-like entities. Setter/getter available on these when needed (external APIs) but code can otherwise fiddle directly when it knows what to do. Much duplicated options/state removed from HttpPolicy. Comments cleaned up. Threads better described and consistently mentioned in API docs. Integration test extended for 503 responses with Reply-After headers. --- indra/llcorehttp/_httppolicy.cpp | 110 +++++++++++++++++++-------------------- 1 file changed, 54 insertions(+), 56 deletions(-) (limited to 'indra/llcorehttp/_httppolicy.cpp') diff --git a/indra/llcorehttp/_httppolicy.cpp b/indra/llcorehttp/_httppolicy.cpp index 5f303dd0fe..2754e8ef07 100755 --- a/indra/llcorehttp/_httppolicy.cpp +++ b/indra/llcorehttp/_httppolicy.cpp @@ -4,7 +4,7 @@ * * $LicenseInfo:firstyear=2012&license=viewerlgpl$ * Second Life Viewer Source Code - * Copyright (C) 2012, Linden Research, Inc. + * Copyright (C) 2012-2013, Linden Research, Inc. * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public @@ -41,57 +41,64 @@ namespace LLCore // Per-policy-class data for a running system. -// Collection of queues, parameters, history, metrics, etc. +// Collection of queues, options and other data // for a single policy class. // // Threading: accessed only by worker thread -struct HttpPolicy::State +struct HttpPolicy::ClassState { public: - State() - : mConnMax(HTTP_CONNECTION_LIMIT_DEFAULT), - mConnAt(HTTP_CONNECTION_LIMIT_DEFAULT), - mConnMin(1), - mNextSample(0), - mErrorCount(0), - mErrorFactor(0) + ClassState() {} HttpReadyQueue mReadyQueue; HttpRetryQueue mRetryQueue; HttpPolicyClass mOptions; - - long mConnMax; - long mConnAt; - long mConnMin; - - HttpTime mNextSample; - unsigned long mErrorCount; - unsigned long mErrorFactor; }; HttpPolicy::HttpPolicy(HttpService * service) - : mActiveClasses(0), - mState(NULL), - mService(service) -{} + : mService(service) +{ + // Create default class + mClasses.push_back(new ClassState()); +} HttpPolicy::~HttpPolicy() { shutdown(); + + for (class_list_t::iterator it(mClasses.begin()); it != mClasses.end(); ++it) + { + delete (*it); + } + mClasses.clear(); mService = NULL; } +HttpRequest::policy_t HttpPolicy::createPolicyClass() +{ + const HttpRequest::policy_t policy_class(mClasses.size()); + if (policy_class >= HTTP_POLICY_CLASS_LIMIT) + { + return HttpRequest::INVALID_POLICY_ID; + } + mClasses.push_back(new ClassState()); + return policy_class; +} + + void HttpPolicy::shutdown() { - for (int policy_class(0); policy_class < mActiveClasses; ++policy_class) + for (int policy_class(0); policy_class < mClasses.size(); ++policy_class) { - HttpRetryQueue & retryq(mState[policy_class].mRetryQueue); + ClassState & state(*mClasses[policy_class]); + + HttpRetryQueue & retryq(state.mRetryQueue); while (! retryq.empty()) { HttpOpRequest * op(retryq.top()); @@ -101,7 +108,7 @@ void HttpPolicy::shutdown() op->release(); } - HttpReadyQueue & readyq(mState[policy_class].mReadyQueue); + HttpReadyQueue & readyq(state.mReadyQueue); while (! readyq.empty()) { HttpOpRequest * op(readyq.top()); @@ -111,28 +118,11 @@ void HttpPolicy::shutdown() op->release(); } } - delete [] mState; - mState = NULL; - mActiveClasses = 0; } -void HttpPolicy::start(const HttpPolicyGlobal & global, - const std::vector & classes) -{ - llassert_always(! mState); - - mGlobalOptions = global; - mActiveClasses = classes.size(); - mState = new State [mActiveClasses]; - for (int i(0); i < mActiveClasses; ++i) - { - mState[i].mOptions = classes[i]; - mState[i].mConnMax = classes[i].mConnectionLimit; - mState[i].mConnAt = mState[i].mConnMax; - mState[i].mConnMin = 2; - } -} +void HttpPolicy::start() +{} void HttpPolicy::addOp(HttpOpRequest * op) @@ -141,7 +131,7 @@ void HttpPolicy::addOp(HttpOpRequest * op) op->mPolicyRetries = 0; op->mPolicy503Retries = 0; - mState[policy_class].mReadyQueue.push(op); + mClasses[policy_class]->mReadyQueue.push(op); } @@ -183,7 +173,7 @@ void HttpPolicy::retryOp(HttpOpRequest * op) << static_cast(op) << LL_ENDL; } - mState[policy_class].mRetryQueue.push(op); + mClasses[policy_class]->mRetryQueue.push(op); } @@ -204,11 +194,11 @@ HttpService::ELoopSpeed HttpPolicy::processReadyQueue() HttpService::ELoopSpeed result(HttpService::REQUEST_SLEEP); HttpLibcurl & transport(mService->getTransport()); - for (int policy_class(0); policy_class < mActiveClasses; ++policy_class) + for (int policy_class(0); policy_class < mClasses.size(); ++policy_class) { - State & state(mState[policy_class]); + ClassState & state(*mClasses[policy_class]); int active(transport.getActiveCountInClass(policy_class)); - int needed(state.mConnAt - active); // Expect negatives here + int needed(state.mOptions.mConnectionLimit - active); // Expect negatives here HttpRetryQueue & retryq(state.mRetryQueue); HttpReadyQueue & readyq(state.mReadyQueue); @@ -256,9 +246,9 @@ HttpService::ELoopSpeed HttpPolicy::processReadyQueue() bool HttpPolicy::changePriority(HttpHandle handle, HttpRequest::priority_t priority) { - for (int policy_class(0); policy_class < mActiveClasses; ++policy_class) + for (int policy_class(0); policy_class < mClasses.size(); ++policy_class) { - State & state(mState[policy_class]); + ClassState & state(*mClasses[policy_class]); // We don't scan retry queue because a priority change there // is meaningless. The request will be issued based on retry // intervals not priority value, which is now moot. @@ -286,9 +276,9 @@ bool HttpPolicy::changePriority(HttpHandle handle, HttpRequest::priority_t prior bool HttpPolicy::cancel(HttpHandle handle) { - for (int policy_class(0); policy_class < mActiveClasses; ++policy_class) + for (int policy_class(0); policy_class < mClasses.size(); ++policy_class) { - State & state(mState[policy_class]); + ClassState & state(*mClasses[policy_class]); // Scan retry queue HttpRetryQueue::container_type & c1(state.mRetryQueue.get_container()); @@ -382,13 +372,21 @@ bool HttpPolicy::stageAfterCompletion(HttpOpRequest * op) return false; // not active } + +HttpPolicyClass & HttpPolicy::getClassOptions(HttpRequest::policy_t pclass) +{ + llassert_always(pclass >= 0 && pclass < mClasses.size()); + + return mClasses[pclass]->mOptions; +} + int HttpPolicy::getReadyCount(HttpRequest::policy_t policy_class) const { - if (policy_class < mActiveClasses) + if (policy_class < mClasses.size()) { - return (mState[policy_class].mReadyQueue.size() - + mState[policy_class].mRetryQueue.size()); + return (mClasses[policy_class]->mReadyQueue.size() + + mClasses[policy_class]->mRetryQueue.size()); } return 0; } -- cgit v1.2.3 From 46dd3df73370590f61eb9a2cffcd732463a4319b Mon Sep 17 00:00:00 2001 From: Monty Brandenberg Date: Mon, 29 Jul 2013 16:26:02 -0400 Subject: Reduce HTTP request retry log spam. Thought I'd done this in an earlier maintenance branch but the code never showed up. I'll do it again. Spam is still available by bumping 'CoreHttp' tag up to DEBUGS level logging. Needed for QA. Can also get this data from tracing. --- indra/llcorehttp/_httppolicy.cpp | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) (limited to 'indra/llcorehttp/_httppolicy.cpp') diff --git a/indra/llcorehttp/_httppolicy.cpp b/indra/llcorehttp/_httppolicy.cpp index 2754e8ef07..32a9ba282a 100755 --- a/indra/llcorehttp/_httppolicy.cpp +++ b/indra/llcorehttp/_httppolicy.cpp @@ -162,15 +162,17 @@ void HttpPolicy::retryOp(HttpOpRequest * op) { ++op->mPolicy503Retries; } - LL_WARNS("CoreHttp") << "HTTP request " << static_cast(op) - << " retry " << op->mPolicyRetries - << " scheduled in " << (delta / HttpTime(1000)) - << " mS. Status: " << op->mStatus.toHex() - << LL_ENDL; + LL_DEBUGS("CoreHttp") << "HTTP request " << static_cast(op) + << " retry " << op->mPolicyRetries + << " scheduled in " << (delta / HttpTime(1000)) + << " mS. Status: " << op->mStatus.toHex() + << LL_ENDL; if (op->mTracing > HTTP_TRACE_OFF) { LL_INFOS("CoreHttp") << "TRACE, ToRetryQueue, Handle: " << static_cast(op) + << ", Delta: " << (delta / HttpTime(1000)) + << ", Retries: " << op->mPolicyRetries << LL_ENDL; } mClasses[policy_class]->mRetryQueue.push(op); @@ -362,9 +364,9 @@ bool HttpPolicy::stageAfterCompletion(HttpOpRequest * op) } else if (op->mPolicyRetries) { - LL_WARNS("CoreHttp") << "HTTP request " << static_cast(op) - << " succeeded on retry " << op->mPolicyRetries << "." - << LL_ENDL; + LL_DEBUGS("CoreHttp") << "HTTP request " << static_cast(op) + << " succeeded on retry " << op->mPolicyRetries << "." + << LL_ENDL; } op->stageFromActive(mService); -- cgit v1.2.3 From f3927c6ca2aad757fe88fdd59b87986ca8b207a8 Mon Sep 17 00:00:00 2001 From: Monty Brandenberg Date: Tue, 30 Jul 2013 15:21:31 -0400 Subject: SH-4371 Reduce 22mS inter-connection latency. This really extended into the client-side request throttling. Moved this from llmeshrepository (which doesn't really want to do connection management) into llcorehttp. It's now a class option with configurable rate. This still isn't the right thing to do as it creates coupling between viewer and services. When we get to pipelining, this notion becomes invalid. --- indra/llcorehttp/_httppolicy.cpp | 67 +++++++++++++++++++++++++++++++++++++--- 1 file changed, 63 insertions(+), 4 deletions(-) (limited to 'indra/llcorehttp/_httppolicy.cpp') diff --git a/indra/llcorehttp/_httppolicy.cpp b/indra/llcorehttp/_httppolicy.cpp index 32a9ba282a..808eebc6cc 100755 --- a/indra/llcorehttp/_httppolicy.cpp +++ b/indra/llcorehttp/_httppolicy.cpp @@ -49,12 +49,18 @@ struct HttpPolicy::ClassState { public: ClassState() + : mThrottleEnd(0), + mThrottleLeft(0L), + mRequestCount(0L) {} HttpReadyQueue mReadyQueue; HttpRetryQueue mRetryQueue; HttpPolicyClass mOptions; + HttpTime mThrottleEnd; + long mThrottleLeft; + long mRequestCount; }; @@ -190,6 +196,13 @@ void HttpPolicy::retryOp(HttpOpRequest * op) // the worker thread may sleep hard otherwise will ask for // normal polling frequency. // +// Implements a client-side request rate throttle as well. +// This is intended to mimic and predict throttling behavior +// of grid services but that is difficult to do with different +// time bases. This also represents a rigid coupling between +// viewer and server that makes it hard to change parameters +// and I hope we can make this go away with pipelining. +// HttpService::ELoopSpeed HttpPolicy::processReadyQueue() { const HttpTime now(totalTime()); @@ -199,12 +212,22 @@ HttpService::ELoopSpeed HttpPolicy::processReadyQueue() for (int policy_class(0); policy_class < mClasses.size(); ++policy_class) { ClassState & state(*mClasses[policy_class]); + const bool throttle_enabled(state.mOptions.mThrottleRate > 0L); + const bool throttle_current(throttle_enabled && now < state.mThrottleEnd); + + if (throttle_current && state.mThrottleLeft <= 0) + { + // Throttled condition, don't serve this class but don't sleep hard. + result = HttpService::NORMAL; + continue; + } + int active(transport.getActiveCountInClass(policy_class)); int needed(state.mOptions.mConnectionLimit - active); // Expect negatives here HttpRetryQueue & retryq(state.mRetryQueue); HttpReadyQueue & readyq(state.mReadyQueue); - + if (needed > 0) { // First see if we have any retries... @@ -218,10 +241,27 @@ HttpService::ELoopSpeed HttpPolicy::processReadyQueue() op->stageFromReady(mService); op->release(); - + + ++state.mRequestCount; --needed; + if (throttle_enabled) + { + if (now >= state.mThrottleEnd) + { + // Throttle expired, move to next window + LL_DEBUGS("CoreHttp") << "Throttle expired with " << state.mThrottleLeft + << " requests to go and " << state.mRequestCount + << " requests issued." << LL_ENDL; + state.mThrottleLeft = state.mOptions.mThrottleRate; + state.mThrottleEnd = now + HttpTime(1000000); + } + if (--state.mThrottleLeft <= 0) + { + goto throttle_on; + } + } } - + // Now go on to the new requests... while (needed > 0 && ! readyq.empty()) { @@ -231,10 +271,29 @@ HttpService::ELoopSpeed HttpPolicy::processReadyQueue() op->stageFromReady(mService); op->release(); + ++state.mRequestCount; --needed; + if (throttle_enabled) + { + if (now >= state.mThrottleEnd) + { + // Throttle expired, move to next window + LL_DEBUGS("CoreHttp") << "Throttle expired with " << state.mThrottleLeft + << " requests to go and " << state.mRequestCount + << " requests issued." << LL_ENDL; + state.mThrottleLeft = state.mOptions.mThrottleRate; + state.mThrottleEnd = now + HttpTime(1000000); + } + if (--state.mThrottleLeft <= 0) + { + goto throttle_on; + } + } } } - + + throttle_on: + if (! readyq.empty() || ! retryq.empty()) { // If anything is ready, continue looping... -- cgit v1.2.3 From e764a2a565e18ce2157788f634e85bc3641976b3 Mon Sep 17 00:00:00 2001 From: Monty Brandenberg Date: Fri, 16 Aug 2013 18:07:49 -0400 Subject: SH-4407 Tuning to get new code working as well. Do some runtime code avoidance and skip unnecessary libcurl and syscall invocations. --- indra/llcorehttp/_httppolicy.cpp | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) (limited to 'indra/llcorehttp/_httppolicy.cpp') diff --git a/indra/llcorehttp/_httppolicy.cpp b/indra/llcorehttp/_httppolicy.cpp index 808eebc6cc..c4758aee88 100755 --- a/indra/llcorehttp/_httppolicy.cpp +++ b/indra/llcorehttp/_httppolicy.cpp @@ -212,6 +212,14 @@ HttpService::ELoopSpeed HttpPolicy::processReadyQueue() for (int policy_class(0); policy_class < mClasses.size(); ++policy_class) { ClassState & state(*mClasses[policy_class]); + HttpRetryQueue & retryq(state.mRetryQueue); + HttpReadyQueue & readyq(state.mReadyQueue); + + if (retryq.empty() && readyq.empty()) + { + continue; + } + const bool throttle_enabled(state.mOptions.mThrottleRate > 0L); const bool throttle_current(throttle_enabled && now < state.mThrottleEnd); @@ -225,9 +233,6 @@ HttpService::ELoopSpeed HttpPolicy::processReadyQueue() int active(transport.getActiveCountInClass(policy_class)); int needed(state.mOptions.mConnectionLimit - active); // Expect negatives here - HttpRetryQueue & retryq(state.mRetryQueue); - HttpReadyQueue & readyq(state.mReadyQueue); - if (needed > 0) { // First see if we have any retries... -- cgit v1.2.3 From 146a5c3f6c3d1b8e9e92f71dce1e7f058091ea20 Mon Sep 17 00:00:00 2001 From: Monty Brandenberg Date: Mon, 19 Aug 2013 12:01:26 -0400 Subject: Add 'internal'/'external' token to DEBUG retry message so that dev/QA can know exactly where a retry value was sourced. --- indra/llcorehttp/_httppolicy.cpp | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'indra/llcorehttp/_httppolicy.cpp') diff --git a/indra/llcorehttp/_httppolicy.cpp b/indra/llcorehttp/_httppolicy.cpp index c4758aee88..ac79a77659 100755 --- a/indra/llcorehttp/_httppolicy.cpp +++ b/indra/llcorehttp/_httppolicy.cpp @@ -153,14 +153,16 @@ void HttpPolicy::retryOp(HttpOpRequest * op) }; static const int delta_max(int(LL_ARRAY_SIZE(retry_deltas)) - 1); static const HttpStatus error_503(503); - + const HttpTime now(totalTime()); const int policy_class(op->mReqPolicy); HttpTime delta(retry_deltas[llclamp(op->mPolicyRetries, 0, delta_max)]); + bool external_delta(false); if (op->mReplyRetryAfter > 0 && op->mReplyRetryAfter < 30) { delta = op->mReplyRetryAfter * U64L(1000000); + external_delta = true; } op->mPolicyRetryAt = now + delta; ++op->mPolicyRetries; @@ -171,7 +173,8 @@ void HttpPolicy::retryOp(HttpOpRequest * op) LL_DEBUGS("CoreHttp") << "HTTP request " << static_cast(op) << " retry " << op->mPolicyRetries << " scheduled in " << (delta / HttpTime(1000)) - << " mS. Status: " << op->mStatus.toHex() + << " mS (" << (external_delta ? "external" : "internal") + << "). Status: " << op->mStatus.toHex() << LL_ENDL; if (op->mTracing > HTTP_TRACE_OFF) { -- cgit v1.2.3 From 622eae65551df9a4ca6843a6a657777ff5e2140e Mon Sep 17 00:00:00 2001 From: Monty Brandenberg Date: Wed, 11 Sep 2013 19:21:31 -0400 Subject: SH-4490 More 'humane' error code presentation from llcorehttp callers Added toTerseString() conversion on HttpStatus to generate a string that's more descriptive than the hex value of the HttpStatus value but still forms a short, searchable token (e.g. "Http_503" or "Core_7"). Using this throughout the viewer now, no live cases of toHex(), I believe. --- indra/llcorehttp/_httppolicy.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'indra/llcorehttp/_httppolicy.cpp') diff --git a/indra/llcorehttp/_httppolicy.cpp b/indra/llcorehttp/_httppolicy.cpp index ac79a77659..edaf0a5307 100755 --- a/indra/llcorehttp/_httppolicy.cpp +++ b/indra/llcorehttp/_httppolicy.cpp @@ -174,7 +174,7 @@ void HttpPolicy::retryOp(HttpOpRequest * op) << " retry " << op->mPolicyRetries << " scheduled in " << (delta / HttpTime(1000)) << " mS (" << (external_delta ? "external" : "internal") - << "). Status: " << op->mStatus.toHex() + << "). Status: " << op->mStatus.toTerseString() << LL_ENDL; if (op->mTracing > HTTP_TRACE_OFF) { @@ -426,7 +426,7 @@ bool HttpPolicy::stageAfterCompletion(HttpOpRequest * op) LL_WARNS("CoreHttp") << "HTTP request " << static_cast(op) << " failed after " << op->mPolicyRetries << " retries. Reason: " << op->mStatus.toString() - << " (" << op->mStatus.toHex() << ")" + << " (" << op->mStatus.toTerseString() << ")" << LL_ENDL; } else if (op->mPolicyRetries) -- cgit v1.2.3