summaryrefslogtreecommitdiff
path: root/indra/newview/lleventpoll.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'indra/newview/lleventpoll.cpp')
-rwxr-xr-xindra/newview/lleventpoll.cpp477
1 files changed, 232 insertions, 245 deletions
diff --git a/indra/newview/lleventpoll.cpp b/indra/newview/lleventpoll.cpp
index 4de6ad4d2f..021d17251d 100755
--- a/indra/newview/lleventpoll.cpp
+++ b/indra/newview/lleventpoll.cpp
@@ -30,261 +30,248 @@
#include "llappviewer.h"
#include "llagent.h"
-#include "llhttpclient.h"
-#include "llhttpconstants.h"
#include "llsdserialize.h"
#include "lleventtimer.h"
#include "llviewerregion.h"
#include "message.h"
#include "lltrans.h"
+#include "llcoros.h"
+#include "lleventcoro.h"
+#include "llcorehttputil.h"
+#include "lleventfilter.h"
-namespace
+namespace LLEventPolling
{
- // We will wait RETRY_SECONDS + (errorCount * RETRY_SECONDS_INC) before retrying after an error.
- // This means we attempt to recover relatively quickly but back off giving more time to recover
- // until we finally give up after MAX_EVENT_POLL_HTTP_ERRORS attempts.
- const F32 EVENT_POLL_ERROR_RETRY_SECONDS = 15.f; // ~ half of a normal timeout.
- const F32 EVENT_POLL_ERROR_RETRY_SECONDS_INC = 5.f; // ~ half of a normal timeout.
- const S32 MAX_EVENT_POLL_HTTP_ERRORS = 10; // ~5 minutes, by the above rules.
-
- class LLEventPollResponder : public LLHTTPClient::Responder
- {
- LOG_CLASS(LLEventPollResponder);
- public:
-
- static LLHTTPClient::ResponderPtr start(const std::string& pollURL, const LLHost& sender);
- void stop();
-
- void makeRequest();
-
- /* virtual */ void completedRaw(const LLChannelDescriptors& channels,
- const LLIOPipe::buffer_ptr_t& buffer);
-
- private:
- LLEventPollResponder(const std::string& pollURL, const LLHost& sender);
- ~LLEventPollResponder();
-
-
- void handleMessage(const LLSD& content);
-
- /* virtual */ void httpFailure();
- /* virtual */ void httpSuccess();
-
- private:
-
- bool mDone;
-
- std::string mPollURL;
- std::string mSender;
-
- LLSD mAcknowledge;
-
- // these are only here for debugging so we can see which poller is which
- static int sCount;
- int mCount;
- S32 mErrorCount;
- };
-
- class LLEventPollEventTimer : public LLEventTimer
- {
- typedef LLPointer<LLEventPollResponder> EventPollResponderPtr;
-
- public:
- LLEventPollEventTimer(F32 period, EventPollResponderPtr responder)
- : LLEventTimer(period), mResponder(responder)
- { }
-
- virtual BOOL tick()
- {
- mResponder->makeRequest();
- return TRUE; // Causes this instance to be deleted.
- }
-
- private:
-
- EventPollResponderPtr mResponder;
- };
-
- //static
- LLHTTPClient::ResponderPtr LLEventPollResponder::start(
- const std::string& pollURL, const LLHost& sender)
- {
- LLHTTPClient::ResponderPtr result = new LLEventPollResponder(pollURL, sender);
- LL_INFOS() << "LLEventPollResponder::start <" << sCount << "> "
- << pollURL << LL_ENDL;
- return result;
- }
-
- void LLEventPollResponder::stop()
- {
- LL_INFOS() << "LLEventPollResponder::stop <" << mCount << "> "
- << mPollURL << LL_ENDL;
- // there should be a way to stop a LLHTTPClient request in progress
- mDone = true;
- }
-
- int LLEventPollResponder::sCount = 0;
-
- LLEventPollResponder::LLEventPollResponder(const std::string& pollURL, const LLHost& sender)
- : mDone(false),
- mPollURL(pollURL),
- mCount(++sCount),
- mErrorCount(0)
- {
- //extract host and port of simulator to set as sender
- LLViewerRegion *regionp = gAgent.getRegion();
- if (!regionp)
- {
- LL_ERRS() << "LLEventPoll initialized before region is added." << LL_ENDL;
- }
- mSender = sender.getIPandPort();
- LL_INFOS() << "LLEventPoll initialized with sender " << mSender << LL_ENDL;
- makeRequest();
- }
-
- LLEventPollResponder::~LLEventPollResponder()
- {
- stop();
- LL_DEBUGS() << "LLEventPollResponder::~Impl <" << mCount << "> "
- << mPollURL << LL_ENDL;
- }
-
- // virtual
- void LLEventPollResponder::completedRaw(const LLChannelDescriptors& channels,
- const LLIOPipe::buffer_ptr_t& buffer)
- {
- if (getStatus() == HTTP_BAD_GATEWAY)
- {
- // These errors are not parsable as LLSD,
- // which LLHTTPClient::Responder::completedRaw will try to do.
- httpCompleted();
- }
- else
- {
- LLHTTPClient::Responder::completedRaw(channels,buffer);
- }
- }
-
- void LLEventPollResponder::makeRequest()
- {
- LLSD request;
- request["ack"] = mAcknowledge;
- request["done"] = mDone;
-
- LL_DEBUGS() << "LLEventPollResponder::makeRequest <" << mCount << "> ack = "
- << LLSDXMLStreamer(mAcknowledge) << LL_ENDL;
- LLHTTPClient::post(mPollURL, request, this);
- }
-
- void LLEventPollResponder::handleMessage(const LLSD& content)
- {
- std::string msg_name = content["message"];
- LLSD message;
- message["sender"] = mSender;
- message["body"] = content["body"];
- LLMessageSystem::dispatch(msg_name, message);
- }
-
- //virtual
- void LLEventPollResponder::httpFailure()
- {
- if (mDone) return;
-
- // A HTTP_BAD_GATEWAY (502) error is our standard timeout response
- // we get this when there are no events.
- if ( getStatus() == HTTP_BAD_GATEWAY )
- {
- mErrorCount = 0;
- makeRequest();
- }
- else if (mErrorCount < MAX_EVENT_POLL_HTTP_ERRORS)
- {
- ++mErrorCount;
-
- // The 'tick' will return TRUE causing the timer to delete this.
- new LLEventPollEventTimer(EVENT_POLL_ERROR_RETRY_SECONDS
- + mErrorCount * EVENT_POLL_ERROR_RETRY_SECONDS_INC
- , this);
-
- LL_WARNS() << dumpResponse() << LL_ENDL;
- }
- else
- {
- LL_WARNS() << dumpResponse()
- << " [count:" << mCount << "] "
- << (mDone ? " -- done" : "") << LL_ENDL;
- stop();
-
- // At this point we have given up and the viewer will not receive HTTP messages from the simulator.
- // IMs, teleports, about land, selecing land, region crossing and more will all fail.
- // They are essentially disconnected from the region even though some things may still work.
- // Since things won't get better until they relog we force a disconnect now.
-
- // *NOTE:Mani - The following condition check to see if this failing event poll
- // is attached to the Agent's main region. If so we disconnect the viewer.
- // Else... its a child region and we just leave the dead event poll stopped and
- // continue running.
- if(gAgent.getRegion() && gAgent.getRegion()->getHost().getIPandPort() == mSender)
- {
- LL_WARNS() << "Forcing disconnect due to stalled main region event poll." << LL_ENDL;
- LLAppViewer::instance()->forceDisconnect(LLTrans::getString("AgentLostConnection"));
- }
- }
- }
-
- //virtual
- void LLEventPollResponder::httpSuccess()
- {
- LL_DEBUGS() << "LLEventPollResponder::result <" << mCount << ">"
- << (mDone ? " -- done" : "") << LL_ENDL;
-
- if (mDone) return;
-
- mErrorCount = 0;
-
- const LLSD& content = getContent();
- if (!content.isMap() ||
- !content.get("events") ||
- !content.get("id"))
- {
- LL_WARNS() << "received event poll with no events or id key: " << dumpResponse() << LL_ENDL;
- makeRequest();
- return;
- }
-
- mAcknowledge = content["id"];
- LLSD events = content["events"];
-
- if(mAcknowledge.isUndefined())
- {
- LL_WARNS() << "LLEventPollResponder: id undefined" << LL_ENDL;
- }
-
- // was LL_INFOS() but now that CoarseRegionUpdate is TCP @ 1/second, it'd be too verbose for viewer logs. -MG
- LL_DEBUGS() << "LLEventPollResponder::httpSuccess <" << mCount << "> " << events.size() << "events (id "
- << LLSDXMLStreamer(mAcknowledge) << ")" << LL_ENDL;
-
- LLSD::array_const_iterator i = events.beginArray();
- LLSD::array_const_iterator end = events.endArray();
- for (; i != end; ++i)
- {
- if (i->has("message"))
- {
- handleMessage(*i);
- }
- }
-
- makeRequest();
- }
+namespace Details
+{
+
+ class LLEventPollImpl
+ {
+ public:
+ LLEventPollImpl(const LLHost &sender);
+
+ void start(const std::string &url);
+ void stop();
+
+ private:
+ // We will wait RETRY_SECONDS + (errorCount * RETRY_SECONDS_INC) before retrying after an error.
+ // This means we attempt to recover relatively quickly but back off giving more time to recover
+ // until we finally give up after MAX_EVENT_POLL_HTTP_ERRORS attempts.
+ static const F32 EVENT_POLL_ERROR_RETRY_SECONDS;
+ static const F32 EVENT_POLL_ERROR_RETRY_SECONDS_INC;
+ static const S32 MAX_EVENT_POLL_HTTP_ERRORS;
+
+ void eventPollCoro(std::string url);
+
+ void handleMessage(const LLSD &content);
+
+ bool mDone;
+ LLCore::HttpRequest::ptr_t mHttpRequest;
+ LLCore::HttpRequest::policy_t mHttpPolicy;
+ std::string mSenderIp;
+ int mCounter;
+ LLCoreHttpUtil::HttpCoroutineAdapter::wptr_t mAdapter;
+
+ static int sNextCounter;
+ };
+
+
+ const F32 LLEventPollImpl::EVENT_POLL_ERROR_RETRY_SECONDS = 15.f; // ~ half of a normal timeout.
+ const F32 LLEventPollImpl::EVENT_POLL_ERROR_RETRY_SECONDS_INC = 5.f; // ~ half of a normal timeout.
+ const S32 LLEventPollImpl::MAX_EVENT_POLL_HTTP_ERRORS = 10; // ~5 minutes, by the above rules.
+
+ int LLEventPollImpl::sNextCounter = 1;
+
+
+ LLEventPollImpl::LLEventPollImpl(const LLHost &sender) :
+ mDone(false),
+ mHttpRequest(),
+ mHttpPolicy(LLCore::HttpRequest::DEFAULT_POLICY_ID),
+ mSenderIp(),
+ mCounter(sNextCounter++)
+
+ {
+ LLAppCoreHttp & app_core_http(LLAppViewer::instance()->getAppCoreHttp());
+
+ mHttpRequest = LLCore::HttpRequest::ptr_t(new LLCore::HttpRequest);
+ mHttpPolicy = app_core_http.getPolicy(LLAppCoreHttp::AP_LONG_POLL);
+ mSenderIp = sender.getIPandPort();
+ }
+
+ void LLEventPollImpl::handleMessage(const LLSD& content)
+ {
+ std::string msg_name = content["message"];
+ LLSD message;
+ message["sender"] = mSenderIp;
+ message["body"] = content["body"];
+ LLMessageSystem::dispatch(msg_name, message);
+ }
+
+ void LLEventPollImpl::start(const std::string &url)
+ {
+ if (!url.empty())
+ {
+ std::string coroname =
+ LLCoros::instance().launch("LLEventPollImpl::eventPollCoro",
+ boost::bind(&LLEventPollImpl::eventPollCoro, this, url));
+ LL_INFOS("LLEventPollImpl") << coroname << " with url '" << url << LL_ENDL;
+ }
+ }
+
+ void LLEventPollImpl::stop()
+ {
+ LL_INFOS() << "requesting stop for event poll coroutine <" << mCounter << ">" << LL_ENDL;
+ mDone = true;
+
+ LLCoreHttpUtil::HttpCoroutineAdapter::ptr_t adapter = mAdapter.lock();
+ if (adapter)
+ {
+ // cancel the yielding operation if any.
+ adapter->cancelSuspendedOperation();
+ }
+ }
+
+ void LLEventPollImpl::eventPollCoro(std::string url)
+ {
+ LLCoreHttpUtil::HttpCoroutineAdapter::ptr_t httpAdapter(new LLCoreHttpUtil::HttpCoroutineAdapter("EventPoller", mHttpPolicy));
+ LLSD acknowledge;
+ int errorCount = 0;
+ int counter = mCounter; // saved on the stack for logging.
+
+ LL_INFOS("LLEventPollImpl") << " <" << counter << "> entering coroutine." << LL_ENDL;
+
+ mAdapter = httpAdapter;
+
+ // continually poll for a server update until we've been flagged as
+ // finished
+ while (!mDone)
+ {
+ LLSD request;
+ request["ack"] = acknowledge;
+ request["done"] = mDone;
+
+// LL_DEBUGS("LLEventPollImpl::eventPollCoro") << "<" << counter << "> request = "
+// << LLSDXMLStreamer(request) << LL_ENDL;
+
+ LL_DEBUGS("LLEventPollImpl") << " <" << counter << "> posting and yielding." << LL_ENDL;
+ LLSD result = httpAdapter->postAndSuspend(mHttpRequest, url, request);
+
+// LL_DEBUGS("LLEventPollImpl::eventPollCoro") << "<" << counter << "> result = "
+// << LLSDXMLStreamer(result) << LL_ENDL;
+
+ LLSD httpResults = result["http_result"];
+ LLCore::HttpStatus status = LLCoreHttpUtil::HttpCoroutineAdapter::getStatusFromLLSD(httpResults);
+
+ if (!status)
+ {
+ if (status == LLCore::HttpStatus(HTTP_BAD_GATEWAY))
+ { // A HTTP_BAD_GATEWAY (502) error is our standard timeout response
+ // we get this when there are no events.
+ errorCount = 0;
+ continue;
+ }
+ else if ((status == LLCore::HttpStatus(LLCore::HttpStatus::LLCORE, LLCore::HE_OP_CANCELED)) ||
+ (status == LLCore::HttpStatus(HTTP_NOT_FOUND)))
+ { // Event polling for this server has been canceled. In
+ // some cases the server gets ahead of the viewer and will
+ // return a 404 error (Not Found) before the cancel event
+ // comes back in the queue
+ LL_WARNS() << "Canceling coroutine" << LL_ENDL;
+ break;
+ }
+ LL_WARNS("LLEventPollImpl") << "<" << counter << "> Error result from LLCoreHttpUtil::HttpCoroHandler. Code "
+ << status.toTerseString() << ": '" << httpResults["message"] << "'" << LL_ENDL;
+
+ if (errorCount < MAX_EVENT_POLL_HTTP_ERRORS)
+ { // An unanticipated error has been received from our poll
+ // request. Calculate a timeout and wait for it to expire(sleep)
+ // before trying again. The sleep time is increased by 5 seconds
+ // for each consecutive error.
+ LLEventTimeout timeout;
+ ++errorCount;
+
+ F32 waitToRetry = EVENT_POLL_ERROR_RETRY_SECONDS
+ + errorCount * EVENT_POLL_ERROR_RETRY_SECONDS_INC;
+
+ LL_WARNS("LLEventPollImpl") << "<" << counter << "> Retrying in " << waitToRetry <<
+ " seconds, error count is now " << errorCount << LL_ENDL;
+
+ timeout.eventAfter(waitToRetry, LLSD());
+ llcoro::suspendUntilEventOn(timeout);
+
+ if (mDone)
+ break;
+ LL_INFOS("LLEventPollImpl") << "<" << counter << "> About to retry request." << LL_ENDL;
+ continue;
+ }
+ else
+ {
+ // At this point we have given up and the viewer will not receive HTTP messages from the simulator.
+ // IMs, teleports, about land, selecting land, region crossing and more will all fail.
+ // They are essentially disconnected from the region even though some things may still work.
+ // Since things won't get better until they relog we force a disconnect now.
+ mDone = true;
+
+ // *NOTE:Mani - The following condition check to see if this failing event poll
+ // is attached to the Agent's main region. If so we disconnect the viewer.
+ // Else... its a child region and we just leave the dead event poll stopped and
+ // continue running.
+ if (gAgent.getRegion() && gAgent.getRegion()->getHost().getIPandPort() == mSenderIp)
+ {
+ LL_WARNS("LLEventPollImpl") << "< " << counter << "> Forcing disconnect due to stalled main region event poll." << LL_ENDL;
+ LLAppViewer::instance()->forceDisconnect(LLTrans::getString("AgentLostConnection"));
+ }
+ break;
+ }
+ }
+
+ errorCount = 0;
+
+ if (!result.isMap() ||
+ !result.get("events") ||
+ !result.get("id"))
+ {
+ LL_WARNS("LLEventPollImpl") << " <" << counter << "> received event poll with no events or id key: " << LLSDXMLStreamer(result) << LL_ENDL;
+ continue;
+ }
+
+ acknowledge = result["id"];
+ LLSD events = result["events"];
+
+ if (acknowledge.isUndefined())
+ {
+ LL_WARNS("LLEventPollImpl") << " id undefined" << LL_ENDL;
+ }
+
+ // was LL_INFOS() but now that CoarseRegionUpdate is TCP @ 1/second, it'd be too verbose for viewer logs. -MG
+ LL_DEBUGS("LLEventPollImpl") << " <" << counter << "> " << events.size() << "events (id " << LLSDXMLStreamer(acknowledge) << ")" << LL_ENDL;
+
+ LLSD::array_const_iterator i = events.beginArray();
+ LLSD::array_const_iterator end = events.endArray();
+ for (; i != end; ++i)
+ {
+ if (i->has("message"))
+ {
+ handleMessage(*i);
+ }
+ }
+ }
+ LL_INFOS("LLEventPollImpl") << " <" << counter << "> Leaving coroutine." << LL_ENDL;
+ }
+
+}
}
-LLEventPoll::LLEventPoll(const std::string& poll_url, const LLHost& sender)
- : mImpl(LLEventPollResponder::start(poll_url, sender))
- { }
+LLEventPoll::LLEventPoll(const std::string& poll_url, const LLHost& sender):
+ mImpl()
+{
+ mImpl = boost::unique_ptr<LLEventPolling::Details::LLEventPollImpl>
+ (new LLEventPolling::Details::LLEventPollImpl(sender));
+ mImpl->start(poll_url);
+}
LLEventPoll::~LLEventPoll()
{
- LLHTTPClient::Responder* responderp = mImpl.get();
- LLEventPollResponder* event_poll_responder = dynamic_cast<LLEventPollResponder*>(responderp);
- if (event_poll_responder) event_poll_responder->stop();
+ mImpl->stop();
+
}