From 57a2a2beff565c9a2e2ed0937df178c38d114cb3 Mon Sep 17 00:00:00 2001 From: Monroe Linden Date: Fri, 23 Apr 2010 15:50:22 -0700 Subject: Add a way for plugins to send a message and block waiting for the response This requires some cooperation between the plugin and the host, and will only work for specific messages. The way it works is as follows: * the plugin sends a message containing the key "blocking_request" (with any value) * this will cause the "send message" function to block (continuing to pull incoming messages off the network socket and queue them) until it receives a message from the host containing the key "blocking_response" ** NOTE: if the plugin sends a blocking_request that isn't set up to cause the host to send back a blocking_response, it will block forever * the blocking_response message will be handed to the plugin's "receive message" function _immediately_ (before the "send message" function returns) ** this means that the plugin can extract response data for use by the the code that sent the message (but is still blocked inside the "send message" call) ** NOTE: this BREAKS the invariant stating that the plugin's "receive message" function will never be called recursively, and the plugin MUST be prepared to deal with this * after the plugin finishes processing the blocking_response message, the "send message" function that was called with the blocking_request message will return to the plugin * any queued messages will be delivered after the outer invocation of the plugin's "receive message" function returns (as normal) Inside the viewer, the code can tell when a plugin is in this blocked state by calling LLPluginProcessParent::isBlocked(). LLPluginClassMedia uses this to avoid sending mouse-move and size-change messages to blocked plugins. --- indra/llplugin/llpluginclassmedia.cpp | 7 +++- indra/llplugin/llpluginmessagepipe.cpp | 17 ++++---- indra/llplugin/llpluginprocesschild.cpp | 68 +++++++++++++++++++++++++++++--- indra/llplugin/llpluginprocesschild.h | 5 +++ indra/llplugin/llpluginprocessparent.cpp | 38 ++++++++++++------ indra/llplugin/llpluginprocessparent.h | 4 ++ 6 files changed, 112 insertions(+), 27 deletions(-) (limited to 'indra/llplugin') diff --git a/indra/llplugin/llpluginclassmedia.cpp b/indra/llplugin/llpluginclassmedia.cpp index e09b511a6e..24c671437e 100644 --- a/indra/llplugin/llpluginclassmedia.cpp +++ b/indra/llplugin/llpluginclassmedia.cpp @@ -160,7 +160,7 @@ void LLPluginClassMedia::idle(void) mPlugin->idle(); } - if((mMediaWidth == -1) || (!mTextureParamsReceived) || (mPlugin == NULL)) + if((mMediaWidth == -1) || (!mTextureParamsReceived) || (mPlugin == NULL) || (mPlugin->isBlocked())) { // Can't process a size change at this time } @@ -437,6 +437,11 @@ void LLPluginClassMedia::mouseEvent(EMouseEventType type, int button, int x, int { if(type == MOUSE_EVENT_MOVE) { + if(!mPlugin || !mPlugin->isRunning() || mPlugin->isBlocked()) + { + // Don't queue up mouse move events that can't be delivered. + } + if((x == mLastMouseX) && (y == mLastMouseY)) { // Don't spam unnecessary mouse move events. diff --git a/indra/llplugin/llpluginmessagepipe.cpp b/indra/llplugin/llpluginmessagepipe.cpp index 1d7ddc5592..e524c88cf8 100644 --- a/indra/llplugin/llpluginmessagepipe.cpp +++ b/indra/llplugin/llpluginmessagepipe.cpp @@ -299,26 +299,23 @@ bool LLPluginMessagePipe::pump(F64 timeout) void LLPluginMessagePipe::processInput(void) { // Look for input delimiter(s) in the input buffer. - int start = 0; int delim; - while((delim = mInput.find(MESSAGE_DELIMITER, start)) != std::string::npos) + while((delim = mInput.find(MESSAGE_DELIMITER)) != std::string::npos) { // Let the owner process this message if (mOwner) { - mOwner->receiveMessageRaw(mInput.substr(start, delim - start)); + // Pull the message out of the input buffer before calling receiveMessageRaw. + // It's now possible for this function to get called recursively (in the case where the plugin makes a blocking request) + // and this guarantees that the messages will get dequeued correctly. + std::string message(mInput, 0, delim); + mInput.erase(0, delim + 1); + mOwner->receiveMessageRaw(message); } else { LL_WARNS("Plugin") << "!mOwner" << LL_ENDL; } - - start = delim + 1; } - - // Remove delivered messages from the input buffer. - if(start != 0) - mInput = mInput.substr(start); - } diff --git a/indra/llplugin/llpluginprocesschild.cpp b/indra/llplugin/llpluginprocesschild.cpp index ccaf95b36d..2d078cd6ed 100644 --- a/indra/llplugin/llpluginprocesschild.cpp +++ b/indra/llplugin/llpluginprocesschild.cpp @@ -48,6 +48,8 @@ LLPluginProcessChild::LLPluginProcessChild() mSocket = LLSocket::create(gAPRPoolp, LLSocket::STREAM_TCP); mSleepTime = PLUGIN_IDLE_SECONDS; // default: send idle messages at 100Hz mCPUElapsed = 0.0f; + mBlockingRequest = false; + mBlockingResponseReceived = false; } LLPluginProcessChild::~LLPluginProcessChild() @@ -226,6 +228,7 @@ void LLPluginProcessChild::idle(void) void LLPluginProcessChild::sleep(F64 seconds) { + deliverQueuedMessages(); if(mMessagePipe) { mMessagePipe->pump(seconds); @@ -238,6 +241,7 @@ void LLPluginProcessChild::sleep(F64 seconds) void LLPluginProcessChild::pump(void) { + deliverQueuedMessages(); if(mMessagePipe) { mMessagePipe->pump(0.0f); @@ -309,15 +313,32 @@ void LLPluginProcessChild::receiveMessageRaw(const std::string &message) LL_DEBUGS("Plugin") << "Received from parent: " << message << LL_ENDL; + // Decode this message + LLPluginMessage parsed; + parsed.parse(message); + + if(mBlockingRequest) + { + // We're blocking the plugin waiting for a response. + + if(parsed.hasValue("blocking_response")) + { + // This is the message we've been waiting for -- fall through and send it immediately. + mBlockingResponseReceived = true; + } + else + { + // Still waiting. Queue this message and don't process it yet. + mMessageQueue.push(message); + return; + } + } + bool passMessage = true; // FIXME: how should we handle queueing here? { - // Decode this message - LLPluginMessage parsed; - parsed.parse(message); - std::string message_class = parsed.getClass(); if(message_class == LLPLUGIN_MESSAGE_CLASS_INTERNAL) { @@ -425,7 +446,13 @@ void LLPluginProcessChild::receiveMessageRaw(const std::string &message) void LLPluginProcessChild::receivePluginMessage(const std::string &message) { LL_DEBUGS("Plugin") << "Received from plugin: " << message << LL_ENDL; - + + if(mBlockingRequest) + { + // + LL_ERRS("Plugin") << "Can't send a message while already waiting on a blocking request -- aborting!" << LL_ENDL; + } + // Incoming message from the plugin instance bool passMessage = true; @@ -436,6 +463,12 @@ void LLPluginProcessChild::receivePluginMessage(const std::string &message) // Decode this message LLPluginMessage parsed; parsed.parse(message); + + if(parsed.hasValue("blocking_request")) + { + mBlockingRequest = true; + } + std::string message_class = parsed.getClass(); if(message_class == "base") { @@ -494,6 +527,19 @@ void LLPluginProcessChild::receivePluginMessage(const std::string &message) LL_DEBUGS("Plugin") << "Passing through to parent: " << message << LL_ENDL; writeMessageRaw(message); } + + while(mBlockingRequest) + { + // The plugin wants to block and wait for a response to this message. + sleep(mSleepTime); // this will pump the message pipe and process messages + + if(mBlockingResponseReceived || mSocketError != APR_SUCCESS || (mMessagePipe == NULL)) + { + // Response has been received, or we've hit an error state. Stop waiting. + mBlockingRequest = false; + mBlockingResponseReceived = false; + } + } } @@ -502,3 +548,15 @@ void LLPluginProcessChild::setState(EState state) LL_DEBUGS("Plugin") << "setting state to " << state << LL_ENDL; mState = state; }; + +void LLPluginProcessChild::deliverQueuedMessages() +{ + if(!mBlockingRequest) + { + while(!mMessageQueue.empty()) + { + receiveMessageRaw(mMessageQueue.front()); + mMessageQueue.pop(); + } + } +} diff --git a/indra/llplugin/llpluginprocesschild.h b/indra/llplugin/llpluginprocesschild.h index 0e5e85406a..1430ad7a5d 100644 --- a/indra/llplugin/llpluginprocesschild.h +++ b/indra/llplugin/llpluginprocesschild.h @@ -106,6 +106,11 @@ private: LLTimer mHeartbeat; F64 mSleepTime; F64 mCPUElapsed; + bool mBlockingRequest; + bool mBlockingResponseReceived; + std::queue mMessageQueue; + + void deliverQueuedMessages(); }; diff --git a/indra/llplugin/llpluginprocessparent.cpp b/indra/llplugin/llpluginprocessparent.cpp index 895c858979..e273410a1d 100644 --- a/indra/llplugin/llpluginprocessparent.cpp +++ b/indra/llplugin/llpluginprocessparent.cpp @@ -54,6 +54,7 @@ LLPluginProcessParent::LLPluginProcessParent(LLPluginProcessParentOwner *owner) mCPUUsage = 0.0; mDisableTimeout = false; mDebug = false; + mBlocked = false; mPluginLaunchTimeout = 60.0f; mPluginLockupTimeout = 15.0f; @@ -479,6 +480,13 @@ void LLPluginProcessParent::setSleepTime(F64 sleep_time, bool force_send) void LLPluginProcessParent::sendMessage(const LLPluginMessage &message) { + if(message.hasValue("blocking_response")) + { + mBlocked = false; + + // reset the heartbeat timer, since there will have been no heartbeats while the plugin was blocked. + mHeartbeat.setTimerExpirySec(mPluginLockupTimeout); + } std::string buffer = message.generate(); LL_DEBUGS("Plugin") << "Sending: " << buffer << LL_ENDL; @@ -501,6 +509,11 @@ void LLPluginProcessParent::receiveMessageRaw(const std::string &message) void LLPluginProcessParent::receiveMessage(const LLPluginMessage &message) { + if(message.hasValue("blocking_request")) + { + mBlocked = true; + } + std::string message_class = message.getClass(); if(message_class == LLPLUGIN_MESSAGE_CLASS_INTERNAL) { @@ -689,18 +702,15 @@ bool LLPluginProcessParent::pluginLockedUpOrQuit() { bool result = false; - if(!mDisableTimeout && !mDebug) + if(!mProcess.isRunning()) { - if(!mProcess.isRunning()) - { - LL_WARNS("Plugin") << "child exited" << llendl; - result = true; - } - else if(pluginLockedUp()) - { - LL_WARNS("Plugin") << "timeout" << llendl; - result = true; - } + LL_WARNS("Plugin") << "child exited" << llendl; + result = true; + } + else if(pluginLockedUp()) + { + LL_WARNS("Plugin") << "timeout" << llendl; + result = true; } return result; @@ -708,6 +718,12 @@ bool LLPluginProcessParent::pluginLockedUpOrQuit() bool LLPluginProcessParent::pluginLockedUp() { + if(mDisableTimeout || mDebug || mBlocked) + { + // Never time out a plugin process in these cases. + return false; + } + // If the timer is running and has expired, the plugin has locked up. return (mHeartbeat.getStarted() && mHeartbeat.hasExpired()); } diff --git a/indra/llplugin/llpluginprocessparent.h b/indra/llplugin/llpluginprocessparent.h index cc6c513615..31f125bfb3 100644 --- a/indra/llplugin/llpluginprocessparent.h +++ b/indra/llplugin/llpluginprocessparent.h @@ -74,6 +74,9 @@ public: // returns true if the process has exited or we've had a fatal error bool isDone(void); + // returns true if the process is currently waiting on a blocking request + bool isBlocked(void) { return mBlocked; }; + void killSockets(void); // Go to the proper error state @@ -160,6 +163,7 @@ private: bool mDisableTimeout; bool mDebug; + bool mBlocked; LLProcessLauncher mDebugger; -- cgit v1.2.3 From dacc5afbf0f1bde7454c1eadf56edb669d0741a9 Mon Sep 17 00:00:00 2001 From: Monroe Linden Date: Tue, 27 Apr 2010 17:25:01 -0700 Subject: Architectural changes to LLPlugin message processing. LLPluginProcessParent can now optionally use a separate thread for reading messages from plugin sockets. If this is enabled, it will spawn a single thread and use apr_pollset_poll to wake up the thread when incoming data arrives instead of polling all the descriptors round-robin every frame. This should be somewhat more efficient, and should also allow blocking requests from plugins to be serviced much more quickly (once we start using them). This is currently disabled by default, until it's had a bit more focused testing on multiple platforms. Hooked up the switch to use the message read thread to the PluginUseReadThread debug setting and an item in the Advanced menu in the viewer, and to a checkbox in the UI in llmediaplugintest. Updated some debug logging in the plugin system to have appropriate tags and not log dire-looking warnings during normal operation. LLPluginProcessParent now once again explicitly kills plugin processes (instead of just closing their sockets and waiting for them to exit). The problem we were attempting to solve by not doing the kill (letting the webkit plugin write its cookie file on exit) has been solved another way. LLPluginProcessParent::sendMessage() now attempts to write the outgoing message to the socket immediately instead of waiting for the next frame. This should reduce the latency of sending plugin messages. Added a separate fast timer for LLViewerMedia::updateMedia(). --- indra/llplugin/llpluginmessagepipe.cpp | 107 ++++++-- indra/llplugin/llpluginmessagepipe.h | 9 +- indra/llplugin/llpluginprocesschild.cpp | 9 +- indra/llplugin/llpluginprocessparent.cpp | 414 +++++++++++++++++++++++++++++-- indra/llplugin/llpluginprocessparent.h | 26 +- 5 files changed, 515 insertions(+), 50 deletions(-) (limited to 'indra/llplugin') diff --git a/indra/llplugin/llpluginmessagepipe.cpp b/indra/llplugin/llpluginmessagepipe.cpp index e524c88cf8..a62534de95 100644 --- a/indra/llplugin/llpluginmessagepipe.cpp +++ b/indra/llplugin/llpluginmessagepipe.cpp @@ -96,11 +96,14 @@ void LLPluginMessagePipeOwner::killMessagePipe(void) } } -LLPluginMessagePipe::LLPluginMessagePipe(LLPluginMessagePipeOwner *owner, LLSocket::ptr_t socket) +LLPluginMessagePipe::LLPluginMessagePipe(LLPluginMessagePipeOwner *owner, LLSocket::ptr_t socket): + mInputMutex(gAPRPoolp), + mOutputMutex(gAPRPoolp), + mOwner(owner), + mSocket(socket) { - mOwner = owner; + mOwner->setMessagePipe(this); - mSocket = socket; } LLPluginMessagePipe::~LLPluginMessagePipe() @@ -114,8 +117,10 @@ LLPluginMessagePipe::~LLPluginMessagePipe() bool LLPluginMessagePipe::addMessage(const std::string &message) { // queue the message for later output + mOutputMutex.lock(); mOutput += message; mOutput += MESSAGE_DELIMITER; // message separator + mOutputMutex.unlock(); return true; } @@ -148,6 +153,18 @@ void LLPluginMessagePipe::setSocketTimeout(apr_interval_time_t timeout_usec) } bool LLPluginMessagePipe::pump(F64 timeout) +{ + bool result = pumpOutput(); + + if(result) + { + result = pumpInput(timeout); + } + + return result; +} + +bool LLPluginMessagePipe::pumpOutput() { bool result = true; @@ -156,6 +173,7 @@ bool LLPluginMessagePipe::pump(F64 timeout) apr_status_t status; apr_size_t size; + mOutputMutex.lock(); if(!mOutput.empty()) { // write any outgoing messages @@ -183,6 +201,17 @@ bool LLPluginMessagePipe::pump(F64 timeout) // remove the written part from the buffer and try again later. mOutput = mOutput.substr(size); } + else if(APR_STATUS_IS_EOF(status)) + { + // This is what we normally expect when a plugin exits. + llinfos << "Got EOF from plugin socket. " << llendl; + + if(mOwner) + { + mOwner->socketError(status); + } + result = false; + } else { // some other error @@ -196,6 +225,20 @@ bool LLPluginMessagePipe::pump(F64 timeout) result = false; } } + mOutputMutex.unlock(); + } + + return result; +} + +bool LLPluginMessagePipe::pumpInput(F64 timeout) +{ + bool result = true; + + if(mSocket) + { + apr_status_t status; + apr_size_t size; // FIXME: For some reason, the apr timeout stuff isn't working properly on windows. // Until such time as we figure out why, don't try to use the socket timeout -- just sleep here instead. @@ -216,8 +259,16 @@ bool LLPluginMessagePipe::pump(F64 timeout) char input_buf[1024]; apr_size_t request_size; - // Start out by reading one byte, so that any data received will wake us up. - request_size = 1; + if(timeout == 0.0f) + { + // If we have no timeout, start out with a full read. + request_size = sizeof(input_buf); + } + else + { + // Start out by reading one byte, so that any data received will wake us up. + request_size = 1; + } // and use the timeout so we'll sleep if no data is available. setSocketTimeout((apr_interval_time_t)(timeout * 1000000)); @@ -236,11 +287,15 @@ bool LLPluginMessagePipe::pump(F64 timeout) // LL_INFOS("Plugin") << "after apr_socket_recv, size = " << size << LL_ENDL; if(size > 0) + { + mInputMutex.lock(); mInput.append(input_buf, size); + mInputMutex.unlock(); + } if(status == APR_SUCCESS) { -// llinfos << "success, read " << size << llendl; + LL_DEBUGS("PluginSocket") << "success, read " << size << LL_ENDL; if(size != request_size) { @@ -250,16 +305,28 @@ bool LLPluginMessagePipe::pump(F64 timeout) } else if(APR_STATUS_IS_TIMEUP(status)) { -// llinfos << "TIMEUP, read " << size << llendl; + LL_DEBUGS("PluginSocket") << "TIMEUP, read " << size << LL_ENDL; // Timeout was hit. Since the initial read is 1 byte, this should never be a partial read. break; } else if(APR_STATUS_IS_EAGAIN(status)) { -// llinfos << "EAGAIN, read " << size << llendl; + LL_DEBUGS("PluginSocket") << "EAGAIN, read " << size << LL_ENDL; - // We've been doing partial reads, and we're done now. + // Non-blocking read returned immediately. + break; + } + else if(APR_STATUS_IS_EOF(status)) + { + // This is what we normally expect when a plugin exits. + LL_INFOS("PluginSocket") << "Got EOF from plugin socket. " << LL_ENDL; + + if(mOwner) + { + mOwner->socketError(status); + } + result = false; break; } else @@ -276,22 +343,18 @@ bool LLPluginMessagePipe::pump(F64 timeout) break; } - // Second and subsequent reads should not use the timeout - setSocketTimeout(0); - // and should try to fill the input buffer - request_size = sizeof(input_buf); + if(timeout != 0.0f) + { + // Second and subsequent reads should not use the timeout + setSocketTimeout(0); + // and should try to fill the input buffer + request_size = sizeof(input_buf); + } } processInput(); } } - - if(!result) - { - // If we got an error, we're done. - LL_INFOS("Plugin") << "Error from socket, cleaning up." << LL_ENDL; - delete this; - } return result; } @@ -300,6 +363,7 @@ void LLPluginMessagePipe::processInput(void) { // Look for input delimiter(s) in the input buffer. int delim; + mInputMutex.lock(); while((delim = mInput.find(MESSAGE_DELIMITER)) != std::string::npos) { // Let the owner process this message @@ -310,12 +374,15 @@ void LLPluginMessagePipe::processInput(void) // and this guarantees that the messages will get dequeued correctly. std::string message(mInput, 0, delim); mInput.erase(0, delim + 1); + mInputMutex.unlock(); mOwner->receiveMessageRaw(message); + mInputMutex.lock(); } else { LL_WARNS("Plugin") << "!mOwner" << LL_ENDL; } } + mInputMutex.unlock(); } diff --git a/indra/llplugin/llpluginmessagepipe.h b/indra/llplugin/llpluginmessagepipe.h index 1ddb38de68..1b0a08254b 100644 --- a/indra/llplugin/llpluginmessagepipe.h +++ b/indra/llplugin/llpluginmessagepipe.h @@ -35,6 +35,7 @@ #define LL_LLPLUGINMESSAGEPIPE_H #include "lliosocket.h" +#include "llthread.h" class LLPluginMessagePipe; @@ -51,7 +52,7 @@ public: virtual apr_status_t socketError(apr_status_t error); // called from LLPluginMessagePipe to manage the connection with LLPluginMessagePipeOwner -- do not use! - virtual void setMessagePipe(LLPluginMessagePipe *message_pipe) ; + virtual void setMessagePipe(LLPluginMessagePipe *message_pipe); protected: // returns false if writeMessageRaw() would drop the message @@ -76,14 +77,18 @@ public: void clearOwner(void); bool pump(F64 timeout = 0.0f); - + bool pumpOutput(); + bool pumpInput(F64 timeout = 0.0f); + protected: void processInput(void); // used internally by pump() void setSocketTimeout(apr_interval_time_t timeout_usec); + LLMutex mInputMutex; std::string mInput; + LLMutex mOutputMutex; std::string mOutput; LLPluginMessagePipeOwner *mOwner; diff --git a/indra/llplugin/llpluginprocesschild.cpp b/indra/llplugin/llpluginprocesschild.cpp index 2d078cd6ed..d1cf91b253 100644 --- a/indra/llplugin/llpluginprocesschild.cpp +++ b/indra/llplugin/llpluginprocesschild.cpp @@ -85,9 +85,14 @@ void LLPluginProcessChild::idle(void) bool idle_again; do { - if(mSocketError != APR_SUCCESS) + if(APR_STATUS_IS_EOF(mSocketError)) { - LL_INFOS("Plugin") << "message pipe is in error state, moving to STATE_ERROR"<< LL_ENDL; + // Plugin socket was closed. This covers both normal plugin termination and host crashes. + setState(STATE_ERROR); + } + else if(mSocketError != APR_SUCCESS) + { + LL_INFOS("Plugin") << "message pipe is in error state (" << mSocketError << "), moving to STATE_ERROR"<< LL_ENDL; setState(STATE_ERROR); } diff --git a/indra/llplugin/llpluginprocessparent.cpp b/indra/llplugin/llpluginprocessparent.cpp index e273410a1d..c61a903a2c 100644 --- a/indra/llplugin/llpluginprocessparent.cpp +++ b/indra/llplugin/llpluginprocessparent.cpp @@ -45,8 +45,51 @@ LLPluginProcessParentOwner::~LLPluginProcessParentOwner() } -LLPluginProcessParent::LLPluginProcessParent(LLPluginProcessParentOwner *owner) +bool LLPluginProcessParent::sUseReadThread = false; +apr_pollset_t *LLPluginProcessParent::sPollSet = NULL; +bool LLPluginProcessParent::sPollsetNeedsRebuild = false; +LLMutex *LLPluginProcessParent::sInstancesMutex; +std::list LLPluginProcessParent::sInstances; +LLThread *LLPluginProcessParent::sPollThread = NULL; + + +class LLPluginProcessParentPollThread: public LLThread +{ +public: + LLPluginProcessParentPollThread() : + LLThread("LLPluginProcessParentPollThread", gAPRPoolp) + { + } +protected: + // Inherited from LLThread + /*virtual*/ void run(void) + { + while(!isQuitting() && LLPluginProcessParent::getUseReadThread()) + { + LLPluginProcessParent::poll(0.1f); + checkPause(); + } + + // Final poll to clean up the pollset, etc. + LLPluginProcessParent::poll(0.0f); + } + + // Inherited from LLThread + /*virtual*/ bool runCondition(void) + { + return(LLPluginProcessParent::canPollThreadRun()); + } + +}; + +LLPluginProcessParent::LLPluginProcessParent(LLPluginProcessParentOwner *owner): + mIncomingQueueMutex(gAPRPoolp) { + if(!sInstancesMutex) + { + sInstancesMutex = new LLMutex(gAPRPoolp); + } + mOwner = owner; mBoundPort = 0; mState = STATE_UNINITIALIZED; @@ -55,18 +98,30 @@ LLPluginProcessParent::LLPluginProcessParent(LLPluginProcessParentOwner *owner) mDisableTimeout = false; mDebug = false; mBlocked = false; + mPolledInput = false; + mPollFD.client_data = NULL; mPluginLaunchTimeout = 60.0f; mPluginLockupTimeout = 15.0f; // Don't start the timer here -- start it when we actually launch the plugin process. mHeartbeat.stop(); + + // Don't add to the global list until fully constructed. + sInstancesMutex->lock(); + sInstances.push_back(this); + sInstancesMutex->unlock(); } LLPluginProcessParent::~LLPluginProcessParent() { LL_DEBUGS("Plugin") << "destructor" << LL_ENDL; + // Remove from the global list before beginning destruction. + sInstancesMutex->lock(); + sInstances.remove(this); + sInstancesMutex->unlock(); + // Destroy any remaining shared memory regions sharedMemoryRegionsType::iterator iter; while((iter = mSharedMemoryRegions.begin()) != mSharedMemoryRegions.end()) @@ -78,15 +133,15 @@ LLPluginProcessParent::~LLPluginProcessParent() mSharedMemoryRegions.erase(iter); } - // orphaning the process means it won't be killed when the LLProcessLauncher is destructed. - // This is what we want -- it should exit cleanly once it notices the sockets have been closed. - mProcess.orphan(); + mProcess.kill(); killSockets(); } void LLPluginProcessParent::killSockets(void) { + mIncomingQueueMutex.lock(); killMessagePipe(); + mIncomingQueueMutex.unlock(); mListenSocket.reset(); mSocket.reset(); } @@ -160,21 +215,47 @@ void LLPluginProcessParent::idle(void) do { + // process queued messages + mIncomingQueueMutex.lock(); + while(!mIncomingQueue.empty()) + { + LLPluginMessage message = mIncomingQueue.front(); + mIncomingQueue.pop(); + mIncomingQueueMutex.unlock(); + + receiveMessage(message); + + mIncomingQueueMutex.lock(); + } + + mIncomingQueueMutex.unlock(); + // Give time to network processing if(mMessagePipe) { - if(!mMessagePipe->pump()) + // Drain any queued outgoing messages + mMessagePipe->pumpOutput(); + + // Only do input processing here if this instance isn't in a pollset. + if(!mPolledInput) { -// LL_WARNS("Plugin") << "Message pipe hit an error state" << LL_ENDL; - errorState(); + mMessagePipe->pumpInput(); } } - - if((mSocketError != APR_SUCCESS) && (mState <= STATE_RUNNING)) + + if(mState <= STATE_RUNNING) { - // The socket is in an error state -- the plugin is gone. - LL_WARNS("Plugin") << "Socket hit an error state (" << mSocketError << ")" << LL_ENDL; - errorState(); + if(APR_STATUS_IS_EOF(mSocketError)) + { + // Plugin socket was closed. This covers both normal plugin termination and plugin crashes. + errorState(); + } + else if(mSocketError != APR_SUCCESS) + { + // The socket is in an error state -- the plugin is gone. + LL_WARNS("Plugin") << "Socket hit an error state (" << mSocketError << ")" << LL_ENDL; + errorState(); + } } // If a state needs to go directly to another state (as a performance enhancement), it can set idle_again to true after calling setState(). @@ -355,7 +436,7 @@ void LLPluginProcessParent::idle(void) break; case STATE_HELLO: - LL_DEBUGS("Plugin") << "received hello message" << llendl; + LL_DEBUGS("Plugin") << "received hello message" << LL_ENDL; // Send the message to load the plugin { @@ -389,7 +470,7 @@ void LLPluginProcessParent::idle(void) } else if(pluginLockedUp()) { - LL_WARNS("Plugin") << "timeout in exiting state, bailing out" << llendl; + LL_WARNS("Plugin") << "timeout in exiting state, bailing out" << LL_ENDL; errorState(); } break; @@ -411,8 +492,7 @@ void LLPluginProcessParent::idle(void) break; case STATE_CLEANUP: - // Don't do a kill here anymore -- closing the sockets is the new 'kill'. - mProcess.orphan(); + mProcess.kill(); killSockets(); setState(STATE_DONE); break; @@ -491,29 +571,315 @@ void LLPluginProcessParent::sendMessage(const LLPluginMessage &message) std::string buffer = message.generate(); LL_DEBUGS("Plugin") << "Sending: " << buffer << LL_ENDL; writeMessageRaw(buffer); + + // Try to send message immediately. + if(mMessagePipe) + { + mMessagePipe->pumpOutput(); + } +} + +//virtual +void LLPluginProcessParent::setMessagePipe(LLPluginMessagePipe *message_pipe) +{ + bool update_pollset = false; + + if(mMessagePipe) + { + // Unsetting an existing message pipe -- remove from the pollset + mPollFD.client_data = NULL; + + // pollset needs an update + update_pollset = true; + } + if(message_pipe != NULL) + { + // Set up the apr_pollfd_t + mPollFD.p = gAPRPoolp; + mPollFD.desc_type = APR_POLL_SOCKET; + mPollFD.reqevents = APR_POLLIN|APR_POLLERR|APR_POLLHUP; + mPollFD.rtnevents = 0; + mPollFD.desc.s = mSocket->getSocket(); + mPollFD.client_data = (void*)this; + + // pollset needs an update + update_pollset = true; + } + + mMessagePipe = message_pipe; + + if(update_pollset) + { + dirtyPollSet(); + } } +//static +void LLPluginProcessParent::dirtyPollSet() +{ + sPollsetNeedsRebuild = true; + + if(sPollThread) + { + LL_DEBUGS("PluginPoll") << "unpausing polling thread " << LL_ENDL; + sPollThread->unpause(); + } +} + +void LLPluginProcessParent::updatePollset() +{ + if(!sInstancesMutex) + { + // No instances have been created yet. There's no work to do. + return; + } + + sInstancesMutex->lock(); + + if(sPollSet) + { + LL_DEBUGS("PluginPoll") << "destroying pollset " << sPollSet << LL_ENDL; + // delete the existing pollset. + apr_pollset_destroy(sPollSet); + sPollSet = NULL; + } + + std::list::iterator iter; + int count = 0; + + // Count the number of instances that want to be in the pollset + for(iter = sInstances.begin(); iter != sInstances.end(); iter++) + { + (*iter)->mPolledInput = false; + if((*iter)->mPollFD.client_data) + { + // This instance has a socket that needs to be polled. + ++count; + } + } + + if(sUseReadThread && sPollThread && !sPollThread->isQuitting()) + { + if(!sPollSet && (count > 0)) + { + // The pollset doesn't exist yet. Create it now. + apr_status_t status = apr_pollset_create(&sPollSet, count, gAPRPoolp, APR_POLLSET_NOCOPY); + if(status != APR_SUCCESS) + { + LL_WARNS("PluginPoll") << "Couldn't create pollset. Falling back to non-pollset mode." << LL_ENDL; + sPollSet = NULL; + } + else + { + LL_DEBUGS("PluginPoll") << "created pollset " << sPollSet << LL_ENDL; + + // Pollset was created, add all instances to it. + for(iter = sInstances.begin(); iter != sInstances.end(); iter++) + { + if((*iter)->mPollFD.client_data) + { + status = apr_pollset_add(sPollSet, &((*iter)->mPollFD)); + if(status == APR_SUCCESS) + { + (*iter)->mPolledInput = true; + } + else + { + LL_WARNS("PluginPoll") << "apr_pollset_add failed with status " << status << LL_ENDL; + } + } + } + } + } + } + sInstancesMutex->unlock(); +} + +void LLPluginProcessParent::setUseReadThread(bool use_read_thread) +{ + if(sUseReadThread != use_read_thread) + { + sUseReadThread = use_read_thread; + + if(sUseReadThread) + { + if(!sPollThread) + { + // start up the read thread + LL_INFOS("PluginPoll") << "creating polling thread " << LL_ENDL; + + // make sure the pollset gets rebuilt. + sPollsetNeedsRebuild = true; + + sPollThread = new LLPluginProcessParentPollThread; + sPollThread->start(); + } + } + else + { + if(sPollThread) + { + // shut down the read thread + LL_INFOS("PluginPoll") << "destroying polling thread " << LL_ENDL; + delete sPollThread; + sPollThread = NULL; + } + } + + } +} + +void LLPluginProcessParent::poll(F64 timeout) +{ + if(sPollsetNeedsRebuild || !sUseReadThread) + { + sPollsetNeedsRebuild = false; + updatePollset(); + } + + if(sPollSet) + { + apr_status_t status; + apr_int32_t count; + const apr_pollfd_t *descriptors; + status = apr_pollset_poll(sPollSet, (apr_interval_time_t)(timeout * 1000000), &count, &descriptors); + if(status == APR_SUCCESS) + { + // One or more of the descriptors signalled. Call them. + for(int i = 0; i < count; i++) + { + LLPluginProcessParent *self = (LLPluginProcessParent *)(descriptors[i].client_data); + // NOTE: the descriptor returned here is actually a COPY of the original (even though we create the pollset with APR_POLLSET_NOCOPY). + // This means that even if the parent has set its mPollFD.client_data to NULL, the old pointer may still there in this descriptor. + // It's even possible that the old pointer no longer points to a valid LLPluginProcessParent. + // This means that we can't safely dereference the 'self' pointer here without some extra steps... + if(self) + { + // Make sure this pointer is still in the instances list + sInstancesMutex->lock(); + bool valid = false; + for(std::list::iterator iter = sInstances.begin(); iter != sInstances.end(); ++iter) + { + if(*iter == self) + { + // Lock the instance's mutex before unlocking the global mutex. + // This avoids a possible race condition where the instance gets deleted between this check and the servicePoll() call. + self->mIncomingQueueMutex.lock(); + valid = true; + break; + } + } + sInstancesMutex->unlock(); + + if(valid) + { + // The instance is still valid. + // Pull incoming messages off the socket + self->servicePoll(); + self->mIncomingQueueMutex.unlock(); + } + else + { + LL_DEBUGS("PluginPoll") << "detected deleted instance " << self << LL_ENDL; + } + + } + } + } + else if(APR_STATUS_IS_TIMEUP(status)) + { + // timed out with no incoming data. Just return. + } + else if(status == EBADF) + { + // This happens when one of the file descriptors in the pollset is destroyed, which happens whenever a plugin's socket is closed. + // The pollset has been or will be recreated, so just return. + LL_DEBUGS("PluginPoll") << "apr_pollset_poll returned EBADF" << LL_ENDL; + } + else if(status != APR_SUCCESS) + { + LL_WARNS("PluginPoll") << "apr_pollset_poll failed with status " << status << LL_ENDL; + } + } +} + +void LLPluginProcessParent::servicePoll() +{ + bool result = true; + + // poll signalled on this object's socket. Try to process incoming messages. + if(mMessagePipe) + { + result = mMessagePipe->pumpInput(0.0f); + } + + if(!result) + { + // If we got a read error on input, remove this pipe from the pollset + apr_pollset_remove(sPollSet, &mPollFD); + + // and tell the code not to re-add it + mPollFD.client_data = NULL; + } +} void LLPluginProcessParent::receiveMessageRaw(const std::string &message) { LL_DEBUGS("Plugin") << "Received: " << message << LL_ENDL; - - // FIXME: should this go into a queue instead? LLPluginMessage parsed; if(parsed.parse(message) != -1) { - receiveMessage(parsed); + if(parsed.hasValue("blocking_request")) + { + mBlocked = true; + } + + if(mPolledInput) + { + // This is being called on the polling thread -- only do minimal processing/queueing. + receiveMessageEarly(parsed); + } + else + { + // This is not being called on the polling thread -- do full message processing at this time. + receiveMessage(parsed); + } } } -void LLPluginProcessParent::receiveMessage(const LLPluginMessage &message) +void LLPluginProcessParent::receiveMessageEarly(const LLPluginMessage &message) { - if(message.hasValue("blocking_request")) + // NOTE: this function will be called from the polling thread. It will be called with mIncomingQueueMutex _already locked_. + + bool handled = false; + + std::string message_class = message.getClass(); + if(message_class == LLPLUGIN_MESSAGE_CLASS_INTERNAL) { - mBlocked = true; + // no internal messages need to be handled early. } + else + { + // Call out to the owner and see if they to reply + // TODO: Should this only happen when blocked? + if(mOwner != NULL) + { + handled = mOwner->receivePluginMessageEarly(message); + } + } + + if(!handled) + { + // any message that wasn't handled early needs to be queued. +// mIncomingQueueMutex.lock(); + mIncomingQueue.push(message); +// mIncomingQueueMutex.unlock(); + } +} +void LLPluginProcessParent::receiveMessage(const LLPluginMessage &message) +{ std::string message_class = message.getClass(); if(message_class == LLPLUGIN_MESSAGE_CLASS_INTERNAL) { @@ -704,12 +1070,12 @@ bool LLPluginProcessParent::pluginLockedUpOrQuit() if(!mProcess.isRunning()) { - LL_WARNS("Plugin") << "child exited" << llendl; + LL_WARNS("Plugin") << "child exited" << LL_ENDL; result = true; } else if(pluginLockedUp()) { - LL_WARNS("Plugin") << "timeout" << llendl; + LL_WARNS("Plugin") << "timeout" << LL_ENDL; result = true; } diff --git a/indra/llplugin/llpluginprocessparent.h b/indra/llplugin/llpluginprocessparent.h index 31f125bfb3..1ad0fbf059 100644 --- a/indra/llplugin/llpluginprocessparent.h +++ b/indra/llplugin/llpluginprocessparent.h @@ -41,12 +41,14 @@ #include "llpluginsharedmemory.h" #include "lliosocket.h" +#include "llthread.h" class LLPluginProcessParentOwner { public: virtual ~LLPluginProcessParentOwner(); virtual void receivePluginMessage(const LLPluginMessage &message) = 0; + virtual bool receivePluginMessageEarly(const LLPluginMessage &message) {return false;}; // This will only be called when the plugin has died unexpectedly virtual void pluginLaunchFailed() {}; virtual void pluginDied() {}; @@ -90,7 +92,9 @@ public: void receiveMessage(const LLPluginMessage &message); // Inherited from LLPluginMessagePipeOwner - void receiveMessageRaw(const std::string &message); + /*virtual*/ void receiveMessageRaw(const std::string &message); + /*virtual*/ void receiveMessageEarly(const LLPluginMessage &message); + /*virtual*/ void setMessagePipe(LLPluginMessagePipe *message_pipe) ; // This adds a memory segment shared with the client, generating a name for the segment. The name generated is guaranteed to be unique on the host. // The caller must call removeSharedMemory first (and wait until getSharedMemorySize returns 0 for the indicated name) before re-adding a segment with the same name. @@ -113,7 +117,11 @@ public: void setLockupTimeout(F32 timeout) { mPluginLockupTimeout = timeout; }; F64 getCPUUsage() { return mCPUUsage; }; - + + static void poll(F64 timeout); + static bool canPollThreadRun() { return (sPollSet || sPollsetNeedsRebuild || sUseReadThread); }; + static void setUseReadThread(bool use_read_thread); + static bool getUseReadThread() { return sUseReadThread; }; private: enum EState @@ -164,12 +172,26 @@ private: bool mDisableTimeout; bool mDebug; bool mBlocked; + bool mPolledInput; LLProcessLauncher mDebugger; F32 mPluginLaunchTimeout; // Somewhat longer timeout for initial launch. F32 mPluginLockupTimeout; // If we don't receive a heartbeat in this many seconds, we declare the plugin locked up. + static bool sUseReadThread; + apr_pollfd_t mPollFD; + static apr_pollset_t *sPollSet; + static bool sPollsetNeedsRebuild; + static LLMutex *sInstancesMutex; + static std::list sInstances; + static void dirtyPollSet(); + static void updatePollset(); + void servicePoll(); + static LLThread *sPollThread; + + LLMutex mIncomingQueueMutex; + std::queue mIncomingQueue; }; #endif // LL_LLPLUGINPROCESSPARENT_H -- cgit v1.2.3 From b7bc6920c89791563e4fd63002e632967ffde966 Mon Sep 17 00:00:00 2001 From: Monroe Linden Date: Tue, 27 Apr 2010 17:43:09 -0700 Subject: Fixed a silly error in the code that tries to avoid queueing up mouse move events to blocked plugins. --- indra/llplugin/llpluginclassmedia.cpp | 1 + 1 file changed, 1 insertion(+) (limited to 'indra/llplugin') diff --git a/indra/llplugin/llpluginclassmedia.cpp b/indra/llplugin/llpluginclassmedia.cpp index 24c671437e..0c9b325b68 100644 --- a/indra/llplugin/llpluginclassmedia.cpp +++ b/indra/llplugin/llpluginclassmedia.cpp @@ -440,6 +440,7 @@ void LLPluginClassMedia::mouseEvent(EMouseEventType type, int button, int x, int if(!mPlugin || !mPlugin->isRunning() || mPlugin->isBlocked()) { // Don't queue up mouse move events that can't be delivered. + return; } if((x == mLastMouseX) && (y == mLastMouseY)) -- cgit v1.2.3 From 77b13dc2df679c46f648a4f99c8e4d8836983a48 Mon Sep 17 00:00:00 2001 From: Monroe Linden Date: Thu, 29 Apr 2010 17:33:37 -0700 Subject: Incorporate suggestions from Richard's review of the LLPlugin changes. Use LLMutexLock (stack-based locker/unlocker) for the straightforward cases instead of explicit lock()/unlock(). There are still a couple of cases (one overlapping lock lifetime and two loops that unlock the mutex to call another function inside the loop) where I'm leaving explicit lock/unlock calls. Rename LLPluginProcessParent::sPollThread to sReadThread, for consistency. Made the LLPluginProcessParent destructor hold mIncomingQueueMutex while removing the instance from the global list -- this should prevent a possible race condition in LLPluginProcessParent::poll(). Removed a redundant check when calling LLPluginProcessParent::setUseReadThread(). --- indra/llplugin/llpluginmessagepipe.cpp | 9 ++-- indra/llplugin/llpluginprocessparent.cpp | 78 +++++++++++++++++--------------- indra/llplugin/llpluginprocessparent.h | 2 +- 3 files changed, 46 insertions(+), 43 deletions(-) (limited to 'indra/llplugin') diff --git a/indra/llplugin/llpluginmessagepipe.cpp b/indra/llplugin/llpluginmessagepipe.cpp index a62534de95..89f8b44569 100644 --- a/indra/llplugin/llpluginmessagepipe.cpp +++ b/indra/llplugin/llpluginmessagepipe.cpp @@ -117,10 +117,9 @@ LLPluginMessagePipe::~LLPluginMessagePipe() bool LLPluginMessagePipe::addMessage(const std::string &message) { // queue the message for later output - mOutputMutex.lock(); + LLMutexLock lock(&mOutputMutex); mOutput += message; mOutput += MESSAGE_DELIMITER; // message separator - mOutputMutex.unlock(); return true; } @@ -173,7 +172,7 @@ bool LLPluginMessagePipe::pumpOutput() apr_status_t status; apr_size_t size; - mOutputMutex.lock(); + LLMutexLock lock(&mOutputMutex); if(!mOutput.empty()) { // write any outgoing messages @@ -225,7 +224,6 @@ bool LLPluginMessagePipe::pumpOutput() result = false; } } - mOutputMutex.unlock(); } return result; @@ -288,9 +286,8 @@ bool LLPluginMessagePipe::pumpInput(F64 timeout) if(size > 0) { - mInputMutex.lock(); + LLMutexLock lock(&mInputMutex); mInput.append(input_buf, size); - mInputMutex.unlock(); } if(status == APR_SUCCESS) diff --git a/indra/llplugin/llpluginprocessparent.cpp b/indra/llplugin/llpluginprocessparent.cpp index c61a903a2c..9d510e737d 100644 --- a/indra/llplugin/llpluginprocessparent.cpp +++ b/indra/llplugin/llpluginprocessparent.cpp @@ -50,7 +50,7 @@ apr_pollset_t *LLPluginProcessParent::sPollSet = NULL; bool LLPluginProcessParent::sPollsetNeedsRebuild = false; LLMutex *LLPluginProcessParent::sInstancesMutex; std::list LLPluginProcessParent::sInstances; -LLThread *LLPluginProcessParent::sPollThread = NULL; +LLThread *LLPluginProcessParent::sReadThread = NULL; class LLPluginProcessParentPollThread: public LLThread @@ -108,9 +108,10 @@ LLPluginProcessParent::LLPluginProcessParent(LLPluginProcessParentOwner *owner): mHeartbeat.stop(); // Don't add to the global list until fully constructed. - sInstancesMutex->lock(); - sInstances.push_back(this); - sInstancesMutex->unlock(); + { + LLMutexLock lock(sInstancesMutex); + sInstances.push_back(this); + } } LLPluginProcessParent::~LLPluginProcessParent() @@ -118,9 +119,14 @@ LLPluginProcessParent::~LLPluginProcessParent() LL_DEBUGS("Plugin") << "destructor" << LL_ENDL; // Remove from the global list before beginning destruction. - sInstancesMutex->lock(); - sInstances.remove(this); - sInstancesMutex->unlock(); + { + // Make sure to get the global mutex _first_ here, to avoid a possible deadlock against LLPluginProcessParent::poll() + LLMutexLock lock(sInstancesMutex); + { + LLMutexLock lock2(&mIncomingQueueMutex); + sInstances.remove(this); + } + } // Destroy any remaining shared memory regions sharedMemoryRegionsType::iterator iter; @@ -139,9 +145,11 @@ LLPluginProcessParent::~LLPluginProcessParent() void LLPluginProcessParent::killSockets(void) { - mIncomingQueueMutex.lock(); - killMessagePipe(); - mIncomingQueueMutex.unlock(); + { + LLMutexLock lock(&mIncomingQueueMutex); + killMessagePipe(); + } + mListenSocket.reset(); mSocket.reset(); } @@ -619,10 +627,10 @@ void LLPluginProcessParent::dirtyPollSet() { sPollsetNeedsRebuild = true; - if(sPollThread) + if(sReadThread) { - LL_DEBUGS("PluginPoll") << "unpausing polling thread " << LL_ENDL; - sPollThread->unpause(); + LL_DEBUGS("PluginPoll") << "unpausing read thread " << LL_ENDL; + sReadThread->unpause(); } } @@ -634,7 +642,7 @@ void LLPluginProcessParent::updatePollset() return; } - sInstancesMutex->lock(); + LLMutexLock lock(sInstancesMutex); if(sPollSet) { @@ -658,7 +666,7 @@ void LLPluginProcessParent::updatePollset() } } - if(sUseReadThread && sPollThread && !sPollThread->isQuitting()) + if(sUseReadThread && sReadThread && !sReadThread->isQuitting()) { if(!sPollSet && (count > 0)) { @@ -692,7 +700,6 @@ void LLPluginProcessParent::updatePollset() } } } - sInstancesMutex->unlock(); } void LLPluginProcessParent::setUseReadThread(bool use_read_thread) @@ -703,26 +710,26 @@ void LLPluginProcessParent::setUseReadThread(bool use_read_thread) if(sUseReadThread) { - if(!sPollThread) + if(!sReadThread) { // start up the read thread - LL_INFOS("PluginPoll") << "creating polling thread " << LL_ENDL; + LL_INFOS("PluginPoll") << "creating read thread " << LL_ENDL; // make sure the pollset gets rebuilt. sPollsetNeedsRebuild = true; - sPollThread = new LLPluginProcessParentPollThread; - sPollThread->start(); + sReadThread = new LLPluginProcessParentPollThread; + sReadThread->start(); } } else { - if(sPollThread) + if(sReadThread) { // shut down the read thread - LL_INFOS("PluginPoll") << "destroying polling thread " << LL_ENDL; - delete sPollThread; - sPollThread = NULL; + LL_INFOS("PluginPoll") << "destroying read thread " << LL_ENDL; + delete sReadThread; + sReadThread = NULL; } } @@ -756,21 +763,22 @@ void LLPluginProcessParent::poll(F64 timeout) if(self) { // Make sure this pointer is still in the instances list - sInstancesMutex->lock(); bool valid = false; - for(std::list::iterator iter = sInstances.begin(); iter != sInstances.end(); ++iter) { - if(*iter == self) + LLMutexLock lock(sInstancesMutex); + for(std::list::iterator iter = sInstances.begin(); iter != sInstances.end(); ++iter) { - // Lock the instance's mutex before unlocking the global mutex. - // This avoids a possible race condition where the instance gets deleted between this check and the servicePoll() call. - self->mIncomingQueueMutex.lock(); - valid = true; - break; + if(*iter == self) + { + // Lock the instance's mutex before unlocking the global mutex. + // This avoids a possible race condition where the instance gets deleted between this check and the servicePoll() call. + self->mIncomingQueueMutex.lock(); + valid = true; + break; + } } } - sInstancesMutex->unlock(); - + if(valid) { // The instance is still valid. @@ -872,9 +880,7 @@ void LLPluginProcessParent::receiveMessageEarly(const LLPluginMessage &message) if(!handled) { // any message that wasn't handled early needs to be queued. -// mIncomingQueueMutex.lock(); mIncomingQueue.push(message); -// mIncomingQueueMutex.unlock(); } } diff --git a/indra/llplugin/llpluginprocessparent.h b/indra/llplugin/llpluginprocessparent.h index 1ad0fbf059..4dff835b6a 100644 --- a/indra/llplugin/llpluginprocessparent.h +++ b/indra/llplugin/llpluginprocessparent.h @@ -188,7 +188,7 @@ private: static void dirtyPollSet(); static void updatePollset(); void servicePoll(); - static LLThread *sPollThread; + static LLThread *sReadThread; LLMutex mIncomingQueueMutex; std::queue mIncomingQueue; -- cgit v1.2.3 From 34e29fe71277609e6c18c967b8fd7a45116d4d82 Mon Sep 17 00:00:00 2001 From: Xiaohong Bao Date: Thu, 29 Apr 2010 12:24:46 -0600 Subject: add debug code for EXT-7011: crash at LLPluginClassMedia::idle [secondlife-bin llpluginclassmedia.cpp:158] --- indra/llplugin/llpluginclassmedia.cpp | 4 ++++ indra/llplugin/llpluginclassmedia.h | 8 ++++++++ 2 files changed, 12 insertions(+) (limited to 'indra/llplugin') diff --git a/indra/llplugin/llpluginclassmedia.cpp b/indra/llplugin/llpluginclassmedia.cpp index 0c9b325b68..41ace62964 100644 --- a/indra/llplugin/llpluginclassmedia.cpp +++ b/indra/llplugin/llpluginclassmedia.cpp @@ -57,11 +57,15 @@ LLPluginClassMedia::LLPluginClassMedia(LLPluginClassMediaOwner *owner) mOwner = owner; mPlugin = NULL; reset(); + + //debug use + mDeleteOK = true ; } LLPluginClassMedia::~LLPluginClassMedia() { + llassert_always(mDeleteOK) ; reset(); } diff --git a/indra/llplugin/llpluginclassmedia.h b/indra/llplugin/llpluginclassmedia.h index 8c7b00f45b..66853c9940 100644 --- a/indra/llplugin/llpluginclassmedia.h +++ b/indra/llplugin/llpluginclassmedia.h @@ -373,6 +373,14 @@ protected: F64 mCurrentRate; F64 mLoadedDuration; +//-------------------------------------- + //debug use only + // +private: + bool mDeleteOK ; +public: + void setDeleteOK(bool flag) { mDeleteOK = flag ;} +//-------------------------------------- }; #endif // LL_LLPLUGINCLASSMEDIA_H -- cgit v1.2.3 From d597a7e36a0a8c4cbee70b053c41aa01afeab6b5 Mon Sep 17 00:00:00 2001 From: Tofu Linden Date: Fri, 30 Apr 2010 08:27:56 +0100 Subject: Looks like the linux apr version is a bit too old to build 5458d58b4cd0. Use this workaround for now. --- indra/llplugin/llpluginprocessparent.cpp | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'indra/llplugin') diff --git a/indra/llplugin/llpluginprocessparent.cpp b/indra/llplugin/llpluginprocessparent.cpp index 9d510e737d..3589b22a77 100644 --- a/indra/llplugin/llpluginprocessparent.cpp +++ b/indra/llplugin/llpluginprocessparent.cpp @@ -670,12 +670,15 @@ void LLPluginProcessParent::updatePollset() { if(!sPollSet && (count > 0)) { +#ifdef APR_POLLSET_NOCOPY // The pollset doesn't exist yet. Create it now. apr_status_t status = apr_pollset_create(&sPollSet, count, gAPRPoolp, APR_POLLSET_NOCOPY); if(status != APR_SUCCESS) { +#endif // APR_POLLSET_NOCOPY LL_WARNS("PluginPoll") << "Couldn't create pollset. Falling back to non-pollset mode." << LL_ENDL; sPollSet = NULL; +#ifdef APR_POLLSET_NOCOPY } else { @@ -698,6 +701,7 @@ void LLPluginProcessParent::updatePollset() } } } +#endif // APR_POLLSET_NOCOPY } } } -- cgit v1.2.3