2009-06-29 18:38:29 +00:00
|
|
|
/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
|
|
|
|
* vim: sw=4 ts=4 et :
|
2009-07-13 21:55:04 +00:00
|
|
|
*/
|
|
|
|
/* ***** BEGIN LICENSE BLOCK *****
|
2009-06-29 18:38:29 +00:00
|
|
|
* Version: MPL 1.1/GPL 2.0/LGPL 2.1
|
|
|
|
*
|
|
|
|
* The contents of this file are subject to the Mozilla Public License Version
|
|
|
|
* 1.1 (the "License"); you may not use this file except in compliance with
|
|
|
|
* the License. You may obtain a copy of the License at
|
|
|
|
* http://www.mozilla.org/MPL/
|
|
|
|
*
|
|
|
|
* Software distributed under the License is distributed on an "AS IS" basis,
|
|
|
|
* WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
|
|
|
|
* for the specific language governing rights and limitations under the
|
|
|
|
* License.
|
|
|
|
*
|
|
|
|
* The Original Code is Mozilla Plugin App.
|
|
|
|
*
|
|
|
|
* The Initial Developer of the Original Code is
|
|
|
|
* Chris Jones <jones.chris.g@gmail.com>
|
|
|
|
* Portions created by the Initial Developer are Copyright (C) 2009
|
|
|
|
* the Initial Developer. All Rights Reserved.
|
|
|
|
*
|
|
|
|
* Contributor(s):
|
|
|
|
*
|
|
|
|
* Alternatively, the contents of this file may be used under the terms of
|
|
|
|
* either the GNU General Public License Version 2 or later (the "GPL"), or
|
|
|
|
* the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
|
|
|
|
* in which case the provisions of the GPL or the LGPL are applicable instead
|
|
|
|
* of those above. If you wish to allow use of your version of this file only
|
|
|
|
* under the terms of either the GPL or the LGPL, and not to allow others to
|
|
|
|
* use your version of this file under the terms of the MPL, indicate your
|
|
|
|
* decision by deleting the provisions above and replace them with the notice
|
|
|
|
* and other provisions required by the GPL or the LGPL. If you do not delete
|
|
|
|
* the provisions above, a recipient may use your version of this file under
|
|
|
|
* the terms of any one of the MPL, the GPL or the LGPL.
|
|
|
|
*
|
|
|
|
* ***** END LICENSE BLOCK ***** */
|
|
|
|
|
|
|
|
#include "mozilla/ipc/RPCChannel.h"
|
2010-01-27 06:41:32 +00:00
|
|
|
#include "mozilla/ipc/ProtocolUtils.h"
|
2009-06-29 18:38:29 +00:00
|
|
|
|
|
|
|
#include "nsDebug.h"
|
2009-11-12 22:46:29 +00:00
|
|
|
#include "nsTraceRefcnt.h"
|
2009-06-29 18:38:29 +00:00
|
|
|
|
2009-10-09 06:21:39 +00:00
|
|
|
#define RPC_ASSERT(_cond, ...) \
|
|
|
|
do { \
|
|
|
|
if (!(_cond)) \
|
|
|
|
DebugAbort(__FILE__, __LINE__, #_cond,## __VA_ARGS__); \
|
|
|
|
} while (0)
|
|
|
|
|
2009-06-29 18:38:29 +00:00
|
|
|
using mozilla::MutexAutoLock;
|
2009-09-10 23:54:37 +00:00
|
|
|
using mozilla::MutexAutoUnlock;
|
2009-06-29 18:38:29 +00:00
|
|
|
|
|
|
|
template<>
|
|
|
|
struct RunnableMethodTraits<mozilla::ipc::RPCChannel>
|
|
|
|
{
|
|
|
|
static void RetainCallee(mozilla::ipc::RPCChannel* obj) { }
|
|
|
|
static void ReleaseCallee(mozilla::ipc::RPCChannel* obj) { }
|
|
|
|
};
|
|
|
|
|
2010-01-27 06:41:32 +00:00
|
|
|
|
|
|
|
namespace
|
|
|
|
{
|
|
|
|
|
|
|
|
// Async (from the sending side's perspective)
|
|
|
|
class BlockChildMessage : public IPC::Message
|
|
|
|
{
|
|
|
|
public:
|
|
|
|
enum { ID = BLOCK_CHILD_MESSAGE_TYPE };
|
|
|
|
BlockChildMessage() :
|
|
|
|
Message(MSG_ROUTING_NONE, ID, IPC::Message::PRIORITY_NORMAL)
|
|
|
|
{ }
|
|
|
|
};
|
|
|
|
|
|
|
|
// Async
|
|
|
|
class UnblockChildMessage : public IPC::Message
|
|
|
|
{
|
|
|
|
public:
|
|
|
|
enum { ID = UNBLOCK_CHILD_MESSAGE_TYPE };
|
|
|
|
UnblockChildMessage() :
|
|
|
|
Message(MSG_ROUTING_NONE, ID, IPC::Message::PRIORITY_NORMAL)
|
|
|
|
{ }
|
|
|
|
};
|
|
|
|
|
|
|
|
} // namespace <anon>
|
|
|
|
|
|
|
|
|
2009-06-29 18:38:29 +00:00
|
|
|
namespace mozilla {
|
|
|
|
namespace ipc {
|
|
|
|
|
2009-11-12 22:16:54 +00:00
|
|
|
RPCChannel::RPCChannel(RPCListener* aListener,
|
|
|
|
RacyRPCPolicy aPolicy)
|
|
|
|
: SyncChannel(aListener),
|
|
|
|
mPending(),
|
|
|
|
mStack(),
|
2010-01-22 02:04:10 +00:00
|
|
|
mOutOfTurnReplies(),
|
2009-11-12 22:16:54 +00:00
|
|
|
mDeferred(),
|
|
|
|
mRemoteStackDepthGuess(0),
|
2010-01-27 06:41:32 +00:00
|
|
|
mRacePolicy(aPolicy),
|
|
|
|
mBlockedOnParent(false)
|
2009-11-12 22:16:54 +00:00
|
|
|
{
|
|
|
|
MOZ_COUNT_CTOR(RPCChannel);
|
|
|
|
}
|
|
|
|
|
|
|
|
RPCChannel::~RPCChannel()
|
|
|
|
{
|
|
|
|
MOZ_COUNT_DTOR(RPCChannel);
|
|
|
|
// FIXME/cjones: impl
|
|
|
|
}
|
|
|
|
|
2010-02-09 23:04:06 +00:00
|
|
|
#ifdef OS_WIN
|
2010-02-09 22:34:38 +00:00
|
|
|
// static
|
|
|
|
int RPCChannel::sInnerEventLoopDepth = 0;
|
2010-02-09 23:04:06 +00:00
|
|
|
#endif
|
2010-02-09 22:34:38 +00:00
|
|
|
|
2010-02-10 00:02:54 +00:00
|
|
|
bool
|
|
|
|
RPCChannel::EventOccurred()
|
|
|
|
{
|
|
|
|
AssertWorkerThread();
|
|
|
|
mMutex.AssertCurrentThreadOwns();
|
|
|
|
RPC_ASSERT(StackDepth() > 0, "not in wait loop");
|
|
|
|
|
|
|
|
return (!Connected() ||
|
|
|
|
!mPending.empty() ||
|
|
|
|
(!mOutOfTurnReplies.empty() &&
|
|
|
|
mOutOfTurnReplies.find(mStack.top().seqno())
|
|
|
|
!= mOutOfTurnReplies.end()));
|
|
|
|
}
|
|
|
|
|
2009-06-29 18:38:29 +00:00
|
|
|
bool
|
|
|
|
RPCChannel::Call(Message* msg, Message* reply)
|
|
|
|
{
|
2009-10-08 19:11:13 +00:00
|
|
|
AssertWorkerThread();
|
2009-12-03 08:16:28 +00:00
|
|
|
mMutex.AssertNotCurrentThreadOwns();
|
2009-10-09 06:21:39 +00:00
|
|
|
RPC_ASSERT(!ProcessingSyncMessage(),
|
|
|
|
"violation of sync handler invariant");
|
|
|
|
RPC_ASSERT(msg->is_rpc(), "can only Call() RPC messages here");
|
2009-09-22 02:02:15 +00:00
|
|
|
|
2009-10-08 21:44:43 +00:00
|
|
|
MutexAutoLock lock(mMutex);
|
|
|
|
|
2009-10-27 21:32:55 +00:00
|
|
|
if (!Connected()) {
|
|
|
|
ReportConnectionError("RPCChannel");
|
2009-09-22 02:02:15 +00:00
|
|
|
return false;
|
2009-10-27 21:32:55 +00:00
|
|
|
}
|
2009-07-02 05:45:19 +00:00
|
|
|
|
2010-01-22 02:04:09 +00:00
|
|
|
msg->set_seqno(NextSeqno());
|
2009-10-08 21:44:43 +00:00
|
|
|
msg->set_rpc_remote_stack_depth_guess(mRemoteStackDepthGuess);
|
2010-01-22 02:04:09 +00:00
|
|
|
msg->set_rpc_local_stack_depth(1 + StackDepth());
|
|
|
|
mStack.push(*msg);
|
2009-08-19 05:22:01 +00:00
|
|
|
|
2009-10-09 06:21:39 +00:00
|
|
|
mIOLoop->PostTask(
|
|
|
|
FROM_HERE,
|
|
|
|
NewRunnableMethod(this, &RPCChannel::OnSend, msg));
|
2009-07-13 21:55:04 +00:00
|
|
|
|
2009-06-29 18:38:29 +00:00
|
|
|
while (1) {
|
2009-12-09 23:15:01 +00:00
|
|
|
// now might be the time to process a message deferred because
|
|
|
|
// of race resolution
|
|
|
|
MaybeProcessDeferredIncall();
|
|
|
|
|
2009-09-11 07:28:09 +00:00
|
|
|
// here we're waiting for something to happen. see long
|
|
|
|
// comment about the queue in RPCChannel.h
|
2010-02-10 00:02:54 +00:00
|
|
|
while (!EventOccurred()) {
|
|
|
|
bool maybeTimedOut = !RPCChannel::WaitForNotify();
|
|
|
|
|
|
|
|
if (EventOccurred())
|
|
|
|
break;
|
|
|
|
|
2010-02-10 21:41:44 +00:00
|
|
|
if (maybeTimedOut && !ShouldContinueFromTimeout())
|
2010-02-10 00:02:54 +00:00
|
|
|
return false;
|
2009-09-11 07:28:09 +00:00
|
|
|
}
|
|
|
|
|
2009-10-27 21:32:55 +00:00
|
|
|
if (!Connected()) {
|
|
|
|
ReportConnectionError("RPCChannel");
|
2009-09-22 02:02:15 +00:00
|
|
|
return false;
|
2009-10-27 21:32:55 +00:00
|
|
|
}
|
2009-09-22 02:02:15 +00:00
|
|
|
|
2010-01-22 02:04:10 +00:00
|
|
|
Message recvd;
|
2010-02-09 22:34:38 +00:00
|
|
|
MessageMap::iterator it;
|
2010-01-22 02:04:10 +00:00
|
|
|
if (!mOutOfTurnReplies.empty() &&
|
2010-02-09 22:34:38 +00:00
|
|
|
((it = mOutOfTurnReplies.find(mStack.top().seqno())) !=
|
|
|
|
mOutOfTurnReplies.end())) {
|
|
|
|
recvd = it->second;
|
|
|
|
mOutOfTurnReplies.erase(it);
|
2010-01-22 02:04:10 +00:00
|
|
|
}
|
|
|
|
else {
|
|
|
|
recvd = mPending.front();
|
|
|
|
mPending.pop();
|
|
|
|
}
|
2009-06-29 18:38:29 +00:00
|
|
|
|
2009-09-11 07:28:09 +00:00
|
|
|
if (!recvd.is_sync() && !recvd.is_rpc()) {
|
|
|
|
MutexAutoUnlock unlock(mMutex);
|
|
|
|
AsyncChannel::OnDispatchMessage(recvd);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2009-09-10 23:54:37 +00:00
|
|
|
if (recvd.is_sync()) {
|
2009-10-09 06:21:39 +00:00
|
|
|
RPC_ASSERT(mPending.empty(),
|
|
|
|
"other side should have been blocked");
|
2009-09-10 23:54:37 +00:00
|
|
|
MutexAutoUnlock unlock(mMutex);
|
|
|
|
SyncChannel::OnDispatchMessage(recvd);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2010-01-22 02:04:09 +00:00
|
|
|
RPC_ASSERT(recvd.is_rpc(), "wtf???");
|
2009-09-10 23:54:37 +00:00
|
|
|
|
2009-08-19 05:22:01 +00:00
|
|
|
if (recvd.is_reply()) {
|
2009-10-08 22:41:18 +00:00
|
|
|
RPC_ASSERT(0 < mStack.size(), "invalid RPC stack");
|
2009-07-13 21:55:04 +00:00
|
|
|
|
2009-09-11 07:28:09 +00:00
|
|
|
const Message& outcall = mStack.top();
|
2009-08-07 23:13:20 +00:00
|
|
|
|
2010-01-22 02:04:10 +00:00
|
|
|
if (recvd.seqno() < outcall.seqno()) {
|
2010-02-09 22:34:38 +00:00
|
|
|
mOutOfTurnReplies[recvd.seqno()] = recvd;
|
2010-01-22 02:04:10 +00:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2009-10-08 22:41:18 +00:00
|
|
|
// FIXME/cjones: handle error
|
2009-10-09 06:21:39 +00:00
|
|
|
RPC_ASSERT(
|
2010-01-22 02:04:09 +00:00
|
|
|
recvd.is_reply_error() ||
|
|
|
|
(recvd.type() == (outcall.type()+1) &&
|
|
|
|
recvd.seqno() == outcall.seqno()),
|
2009-10-09 06:21:39 +00:00
|
|
|
"somebody's misbehavin'", "rpc", true);
|
2009-07-13 21:55:04 +00:00
|
|
|
|
2009-08-19 05:22:01 +00:00
|
|
|
// we received a reply to our most recent outstanding
|
|
|
|
// call. pop this frame and return the reply
|
2009-09-11 07:28:09 +00:00
|
|
|
mStack.pop();
|
2009-08-07 23:13:20 +00:00
|
|
|
|
|
|
|
bool isError = recvd.is_reply_error();
|
|
|
|
if (!isError) {
|
|
|
|
*reply = recvd;
|
|
|
|
}
|
2009-06-29 18:38:29 +00:00
|
|
|
|
2010-01-22 02:04:10 +00:00
|
|
|
if (0 == StackDepth()) {
|
2009-10-09 06:21:39 +00:00
|
|
|
// we may have received new messages while waiting for
|
|
|
|
// our reply. because we were awaiting a reply,
|
|
|
|
// StackDepth > 0, and the IO thread didn't enqueue
|
|
|
|
// OnMaybeDequeueOne() events for us. so to avoid
|
|
|
|
// "losing" the new messages, we do that now.
|
|
|
|
EnqueuePendingMessages();
|
|
|
|
|
2010-01-22 02:04:10 +00:00
|
|
|
|
|
|
|
RPC_ASSERT(
|
|
|
|
mOutOfTurnReplies.empty(),
|
|
|
|
"still have pending replies with no pending out-calls",
|
|
|
|
"rpc", true);
|
|
|
|
}
|
|
|
|
|
2009-10-09 06:21:39 +00:00
|
|
|
// finished with this RPC stack frame
|
2009-08-07 23:13:20 +00:00
|
|
|
return !isError;
|
2009-06-29 18:38:29 +00:00
|
|
|
}
|
|
|
|
|
2009-10-09 06:21:39 +00:00
|
|
|
// in-call. process in a new stack frame.
|
2009-09-11 07:28:09 +00:00
|
|
|
|
|
|
|
// "snapshot" the current stack depth while we own the Mutex
|
|
|
|
size_t stackDepth = StackDepth();
|
|
|
|
{
|
2009-09-10 23:54:37 +00:00
|
|
|
MutexAutoUnlock unlock(mMutex);
|
2009-06-29 18:38:29 +00:00
|
|
|
// someone called in to us from the other side. handle the call
|
2009-10-09 06:21:39 +00:00
|
|
|
Incall(recvd, stackDepth);
|
2009-07-13 21:55:04 +00:00
|
|
|
// FIXME/cjones: error handling
|
2009-06-29 18:38:29 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2009-09-11 07:28:09 +00:00
|
|
|
void
|
2009-10-09 06:21:39 +00:00
|
|
|
RPCChannel::MaybeProcessDeferredIncall()
|
2009-09-11 07:28:09 +00:00
|
|
|
{
|
2009-10-08 19:11:13 +00:00
|
|
|
AssertWorkerThread();
|
2009-10-09 06:21:39 +00:00
|
|
|
mMutex.AssertCurrentThreadOwns();
|
|
|
|
|
|
|
|
if (mDeferred.empty())
|
|
|
|
return;
|
|
|
|
|
|
|
|
size_t stackDepth = StackDepth();
|
|
|
|
|
|
|
|
// the other side can only *under*-estimate our actual stack depth
|
|
|
|
RPC_ASSERT(mDeferred.top().rpc_remote_stack_depth_guess() <= stackDepth,
|
|
|
|
"fatal logic error");
|
|
|
|
|
|
|
|
if (mDeferred.top().rpc_remote_stack_depth_guess() < stackDepth)
|
|
|
|
return;
|
|
|
|
|
|
|
|
// time to process this message
|
|
|
|
Message call = mDeferred.top();
|
|
|
|
mDeferred.pop();
|
|
|
|
|
|
|
|
// fix up fudge factor we added to account for race
|
|
|
|
RPC_ASSERT(0 < mRemoteStackDepthGuess, "fatal logic error");
|
|
|
|
--mRemoteStackDepthGuess;
|
|
|
|
|
|
|
|
MutexAutoUnlock unlock(mMutex);
|
|
|
|
fprintf(stderr, " (processing deferred in-call)\n");
|
|
|
|
Incall(call, stackDepth);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
RPCChannel::EnqueuePendingMessages()
|
|
|
|
{
|
|
|
|
AssertWorkerThread();
|
|
|
|
mMutex.AssertCurrentThreadOwns();
|
2009-12-09 23:15:01 +00:00
|
|
|
RPC_ASSERT(mDeferred.empty() || 1 == mDeferred.size(),
|
|
|
|
"expected mDeferred to have 0 or 1 items");
|
|
|
|
|
|
|
|
if (!mDeferred.empty())
|
|
|
|
mWorkerLoop->PostTask(
|
|
|
|
FROM_HERE,
|
|
|
|
NewRunnableMethod(this, &RPCChannel::OnMaybeDequeueOne));
|
|
|
|
|
|
|
|
// XXX performance tuning knob: could process all or k pending
|
|
|
|
// messages here, rather than enqueuing for later processing
|
2009-10-09 06:21:39 +00:00
|
|
|
|
|
|
|
for (size_t i = 0; i < mPending.size(); ++i)
|
|
|
|
mWorkerLoop->PostTask(
|
|
|
|
FROM_HERE,
|
|
|
|
NewRunnableMethod(this, &RPCChannel::OnMaybeDequeueOne));
|
2009-09-11 07:28:09 +00:00
|
|
|
}
|
|
|
|
|
2009-09-22 15:23:29 +00:00
|
|
|
void
|
|
|
|
RPCChannel::OnMaybeDequeueOne()
|
|
|
|
{
|
2009-10-09 06:21:39 +00:00
|
|
|
// XXX performance tuning knob: could process all or k pending
|
|
|
|
// messages here
|
|
|
|
|
2009-10-08 19:11:13 +00:00
|
|
|
AssertWorkerThread();
|
2009-10-09 06:21:39 +00:00
|
|
|
mMutex.AssertNotCurrentThreadOwns();
|
2009-12-09 23:15:01 +00:00
|
|
|
RPC_ASSERT(mDeferred.empty() || 1 == mDeferred.size(),
|
|
|
|
"expected mDeferred to have 0 or 1 items, but it has %lu");
|
2009-10-09 06:21:39 +00:00
|
|
|
|
|
|
|
Message recvd;
|
2009-09-22 15:23:29 +00:00
|
|
|
{
|
|
|
|
MutexAutoLock lock(mMutex);
|
|
|
|
|
2009-12-09 23:15:01 +00:00
|
|
|
if (!mDeferred.empty())
|
|
|
|
return MaybeProcessDeferredIncall();
|
|
|
|
|
2009-09-22 15:23:29 +00:00
|
|
|
if (mPending.empty())
|
|
|
|
return;
|
|
|
|
|
|
|
|
recvd = mPending.front();
|
|
|
|
mPending.pop();
|
|
|
|
}
|
|
|
|
|
2009-10-09 06:21:39 +00:00
|
|
|
if (recvd.is_rpc())
|
|
|
|
return Incall(recvd, 0);
|
|
|
|
else if (recvd.is_sync())
|
|
|
|
return SyncChannel::OnDispatchMessage(recvd);
|
|
|
|
else
|
|
|
|
return AsyncChannel::OnDispatchMessage(recvd);
|
2009-08-19 05:22:01 +00:00
|
|
|
}
|
|
|
|
|
2009-10-08 22:41:18 +00:00
|
|
|
void
|
2009-10-09 06:21:39 +00:00
|
|
|
RPCChannel::Incall(const Message& call, size_t stackDepth)
|
2009-08-19 05:22:01 +00:00
|
|
|
{
|
2009-10-08 19:11:13 +00:00
|
|
|
AssertWorkerThread();
|
2009-08-19 05:22:01 +00:00
|
|
|
mMutex.AssertNotCurrentThreadOwns();
|
2009-10-09 06:21:39 +00:00
|
|
|
RPC_ASSERT(call.is_rpc() && !call.is_reply(), "wrong message type");
|
2009-08-19 05:22:01 +00:00
|
|
|
|
2009-10-08 21:44:43 +00:00
|
|
|
// Race detection: see the long comment near
|
|
|
|
// mRemoteStackDepthGuess in RPCChannel.h. "Remote" stack depth
|
|
|
|
// means our side, and "local" means other side.
|
|
|
|
if (call.rpc_remote_stack_depth_guess() != stackDepth) {
|
2010-02-09 22:34:38 +00:00
|
|
|
//NS_WARNING("RPC in-calls have raced!");
|
|
|
|
#ifndef OS_WIN
|
2009-10-09 06:21:39 +00:00
|
|
|
RPC_ASSERT(call.rpc_remote_stack_depth_guess() < stackDepth,
|
|
|
|
"fatal logic error");
|
|
|
|
RPC_ASSERT(1 == (stackDepth - call.rpc_remote_stack_depth_guess()),
|
|
|
|
"got more than 1 RPC message out of sync???");
|
2010-02-09 22:34:38 +00:00
|
|
|
RPC_ASSERT(1 == (call.rpc_local_stack_depth() - mRemoteStackDepthGuess),
|
2009-10-09 06:21:39 +00:00
|
|
|
"RPC unexpected not symmetric");
|
2010-02-09 22:34:38 +00:00
|
|
|
#else
|
|
|
|
// See WindowsEventLoop, windows can race heavily when modal ui
|
|
|
|
// loops are displayed by plugins.
|
|
|
|
#endif
|
2009-10-08 21:44:43 +00:00
|
|
|
// the "winner", if there is one, gets to defer processing of
|
|
|
|
// the other side's in-call
|
|
|
|
bool defer;
|
|
|
|
const char* winner;
|
|
|
|
switch (mRacePolicy) {
|
|
|
|
case RRPChildWins:
|
|
|
|
winner = "child";
|
|
|
|
defer = mChild;
|
|
|
|
break;
|
|
|
|
case RRPParentWins:
|
|
|
|
winner = "parent";
|
|
|
|
defer = !mChild;
|
|
|
|
break;
|
|
|
|
case RRPError:
|
|
|
|
NS_RUNTIMEABORT("NYI: 'Error' RPC race policy");
|
|
|
|
return;
|
|
|
|
default:
|
|
|
|
NS_RUNTIMEABORT("not reached");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2009-10-09 06:21:39 +00:00
|
|
|
fprintf(stderr, " (%s won, so we're%sdeferring)\n",
|
|
|
|
winner, defer ? " " : " not ");
|
2009-10-08 21:44:43 +00:00
|
|
|
|
|
|
|
if (defer) {
|
2009-10-09 06:21:39 +00:00
|
|
|
// we now know the other side's stack has one more frame
|
|
|
|
// than we thought
|
|
|
|
++mRemoteStackDepthGuess; // decremented in MaybeProcessDeferred()
|
|
|
|
mDeferred.push(call);
|
2009-10-08 21:44:43 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2009-10-09 06:21:39 +00:00
|
|
|
// we "lost" and need to process the other side's in-call.
|
|
|
|
// don't need to fix up the mRemoteStackDepthGuess here,
|
|
|
|
// because we're just about to increment it in DispatchCall(),
|
|
|
|
// which will make it correct again
|
2009-10-08 21:44:43 +00:00
|
|
|
}
|
2009-06-29 18:38:29 +00:00
|
|
|
|
2009-10-09 06:21:39 +00:00
|
|
|
DispatchIncall(call);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
RPCChannel::DispatchIncall(const Message& call)
|
|
|
|
{
|
|
|
|
AssertWorkerThread();
|
|
|
|
mMutex.AssertNotCurrentThreadOwns();
|
|
|
|
RPC_ASSERT(call.is_rpc() && !call.is_reply(),
|
|
|
|
"wrong message type");
|
|
|
|
|
2009-08-07 23:13:20 +00:00
|
|
|
Message* reply = nsnull;
|
2009-08-19 05:22:01 +00:00
|
|
|
|
2009-10-08 21:44:43 +00:00
|
|
|
++mRemoteStackDepthGuess;
|
2009-08-19 05:22:01 +00:00
|
|
|
Result rv =
|
|
|
|
static_cast<RPCListener*>(mListener)->OnCallReceived(call, reply);
|
2009-10-08 21:44:43 +00:00
|
|
|
--mRemoteStackDepthGuess;
|
2009-08-19 05:22:01 +00:00
|
|
|
|
2009-10-27 21:32:55 +00:00
|
|
|
if (!MaybeHandleError(rv, "RPCChannel")) {
|
2009-08-07 23:13:20 +00:00
|
|
|
delete reply;
|
|
|
|
reply = new Message();
|
|
|
|
reply->set_rpc();
|
|
|
|
reply->set_reply();
|
|
|
|
reply->set_reply_error();
|
2009-06-29 18:38:29 +00:00
|
|
|
}
|
2009-09-14 20:00:31 +00:00
|
|
|
|
2010-01-22 02:04:09 +00:00
|
|
|
reply->set_seqno(call.seqno());
|
|
|
|
|
2009-10-09 06:21:39 +00:00
|
|
|
mIOLoop->PostTask(
|
|
|
|
FROM_HERE,
|
|
|
|
NewRunnableMethod(this, &RPCChannel::OnSend, reply));
|
|
|
|
}
|
|
|
|
|
2010-01-27 06:41:32 +00:00
|
|
|
bool
|
|
|
|
RPCChannel::BlockChild()
|
|
|
|
{
|
|
|
|
AssertWorkerThread();
|
|
|
|
|
|
|
|
if (mChild)
|
|
|
|
NS_RUNTIMEABORT("child tried to block parent");
|
|
|
|
SendSpecialMessage(new BlockChildMessage());
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
|
|
|
RPCChannel::UnblockChild()
|
|
|
|
{
|
|
|
|
AssertWorkerThread();
|
|
|
|
|
|
|
|
if (mChild)
|
|
|
|
NS_RUNTIMEABORT("child tried to unblock parent");
|
|
|
|
SendSpecialMessage(new UnblockChildMessage());
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
|
|
|
RPCChannel::OnSpecialMessage(uint16 id, const Message& msg)
|
|
|
|
{
|
|
|
|
AssertWorkerThread();
|
|
|
|
|
|
|
|
switch (id) {
|
|
|
|
case BLOCK_CHILD_MESSAGE_TYPE:
|
|
|
|
BlockOnParent();
|
|
|
|
return true;
|
|
|
|
|
|
|
|
case UNBLOCK_CHILD_MESSAGE_TYPE:
|
|
|
|
UnblockFromParent();
|
|
|
|
return true;
|
|
|
|
|
|
|
|
default:
|
|
|
|
return SyncChannel::OnSpecialMessage(id, msg);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
RPCChannel::BlockOnParent()
|
|
|
|
{
|
|
|
|
AssertWorkerThread();
|
|
|
|
|
|
|
|
if (!mChild)
|
|
|
|
NS_RUNTIMEABORT("child tried to block parent");
|
|
|
|
|
|
|
|
MutexAutoLock lock(mMutex);
|
|
|
|
|
|
|
|
if (mBlockedOnParent || AwaitingSyncReply() || 0 < StackDepth())
|
|
|
|
NS_RUNTIMEABORT("attempt to block child when it's already blocked");
|
|
|
|
|
|
|
|
mBlockedOnParent = true;
|
2010-01-27 08:17:17 +00:00
|
|
|
do {
|
2010-01-27 06:41:32 +00:00
|
|
|
// XXX this dispatch loop shares some similarities with the
|
|
|
|
// one in Call(), but the logic is simpler and different
|
|
|
|
// enough IMHO to warrant its own dispatch loop
|
|
|
|
while (Connected() && mPending.empty() && mBlockedOnParent) {
|
|
|
|
WaitForNotify();
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!Connected()) {
|
|
|
|
mBlockedOnParent = false;
|
|
|
|
ReportConnectionError("RPCChannel");
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!mPending.empty()) {
|
|
|
|
Message recvd = mPending.front();
|
|
|
|
mPending.pop();
|
|
|
|
|
|
|
|
MutexAutoUnlock unlock(mMutex);
|
|
|
|
if (recvd.is_rpc()) {
|
|
|
|
// stack depth must be 0 here
|
|
|
|
Incall(recvd, 0);
|
|
|
|
}
|
|
|
|
else if (recvd.is_sync()) {
|
|
|
|
SyncChannel::OnDispatchMessage(recvd);
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
AsyncChannel::OnDispatchMessage(recvd);
|
|
|
|
}
|
|
|
|
}
|
2010-01-27 08:17:17 +00:00
|
|
|
} while (mBlockedOnParent);
|
2010-01-27 06:41:32 +00:00
|
|
|
|
|
|
|
EnqueuePendingMessages();
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
RPCChannel::UnblockFromParent()
|
|
|
|
{
|
|
|
|
AssertWorkerThread();
|
|
|
|
|
|
|
|
if (!mChild)
|
|
|
|
NS_RUNTIMEABORT("child tried to block parent");
|
|
|
|
MutexAutoLock lock(mMutex);
|
|
|
|
mBlockedOnParent = false;
|
|
|
|
}
|
2009-09-14 20:00:31 +00:00
|
|
|
|
2009-10-09 06:21:39 +00:00
|
|
|
void
|
|
|
|
RPCChannel::DebugAbort(const char* file, int line, const char* cond,
|
|
|
|
const char* why,
|
|
|
|
const char* type, bool reply)
|
|
|
|
{
|
|
|
|
fprintf(stderr,
|
2009-10-19 16:28:42 +00:00
|
|
|
"###!!! [RPCChannel][%s][%s:%d] "
|
2009-10-09 06:21:39 +00:00
|
|
|
"Assertion (%s) failed. %s (triggered by %s%s)\n",
|
|
|
|
mChild ? "Child" : "Parent",
|
|
|
|
file, line, cond,
|
|
|
|
why,
|
|
|
|
type, reply ? "reply" : "");
|
|
|
|
// technically we need the mutex for this, but we're dying anyway
|
|
|
|
fprintf(stderr, " local RPC stack size: %lu\n",
|
|
|
|
mStack.size());
|
|
|
|
fprintf(stderr, " remote RPC stack guess: %lu\n",
|
|
|
|
mRemoteStackDepthGuess);
|
|
|
|
fprintf(stderr, " deferred stack size: %lu\n",
|
|
|
|
mDeferred.size());
|
2010-01-22 02:04:10 +00:00
|
|
|
fprintf(stderr, " out-of-turn RPC replies stack size: %lu\n",
|
|
|
|
mOutOfTurnReplies.size());
|
2009-10-09 06:21:39 +00:00
|
|
|
fprintf(stderr, " Pending queue size: %lu, front to back:\n",
|
|
|
|
mPending.size());
|
|
|
|
while (!mPending.empty()) {
|
|
|
|
fprintf(stderr, " [ %s%s ]\n",
|
|
|
|
mPending.front().is_rpc() ? "rpc" :
|
|
|
|
(mPending.front().is_sync() ? "sync" : "async"),
|
|
|
|
mPending.front().is_reply() ? "reply" : "");
|
|
|
|
mPending.pop();
|
|
|
|
}
|
|
|
|
|
|
|
|
NS_RUNTIMEABORT(why);
|
2009-06-29 18:38:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
//
|
|
|
|
// The methods below run in the context of the IO thread, and can proxy
|
|
|
|
// back to the methods above
|
|
|
|
//
|
|
|
|
|
|
|
|
void
|
|
|
|
RPCChannel::OnMessageReceived(const Message& msg)
|
2009-07-13 21:55:04 +00:00
|
|
|
{
|
2009-10-08 19:11:13 +00:00
|
|
|
AssertIOThread();
|
2009-07-13 21:55:04 +00:00
|
|
|
MutexAutoLock lock(mMutex);
|
|
|
|
|
2009-09-22 15:23:29 +00:00
|
|
|
// regardless of the RPC stack, if we're awaiting a sync reply, we
|
|
|
|
// know that it needs to be immediately handled to unblock us.
|
2009-10-09 06:21:39 +00:00
|
|
|
if (AwaitingSyncReply() && msg.is_sync()) {
|
|
|
|
// wake up worker thread waiting at SyncChannel::Send
|
2009-09-22 15:23:29 +00:00
|
|
|
mRecvd = msg;
|
2009-10-09 06:21:39 +00:00
|
|
|
NotifyWorkerThread();
|
2009-09-22 15:23:29 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2009-10-09 06:21:39 +00:00
|
|
|
mPending.push(msg);
|
2009-09-22 15:23:29 +00:00
|
|
|
|
2010-01-27 06:41:32 +00:00
|
|
|
if (0 == StackDepth() && !mBlockedOnParent)
|
2009-10-09 06:21:39 +00:00
|
|
|
// the worker thread might be idle, make sure it wakes up
|
2009-10-08 21:44:43 +00:00
|
|
|
mWorkerLoop->PostTask(
|
|
|
|
FROM_HERE,
|
|
|
|
NewRunnableMethod(this, &RPCChannel::OnMaybeDequeueOne));
|
2010-02-10 00:02:54 +00:00
|
|
|
else if (!AwaitingSyncReply())
|
2009-10-09 06:21:39 +00:00
|
|
|
NotifyWorkerThread();
|
2009-06-29 18:38:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2009-09-22 02:02:15 +00:00
|
|
|
void
|
|
|
|
RPCChannel::OnChannelError()
|
|
|
|
{
|
2009-10-08 19:11:13 +00:00
|
|
|
AssertIOThread();
|
2009-09-22 02:02:15 +00:00
|
|
|
|
2009-12-03 08:16:28 +00:00
|
|
|
AsyncChannel::OnChannelError();
|
2009-09-22 02:02:15 +00:00
|
|
|
|
|
|
|
// skip SyncChannel::OnError(); we subsume its duties
|
2009-12-03 08:16:28 +00:00
|
|
|
MutexAutoLock lock(mMutex);
|
|
|
|
if (AwaitingSyncReply()
|
|
|
|
|| 0 < StackDepth())
|
|
|
|
NotifyWorkerThread();
|
2009-09-22 02:02:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2009-06-29 18:38:29 +00:00
|
|
|
} // namespace ipc
|
|
|
|
} // namespace mozilla
|
2009-10-09 06:21:39 +00:00
|
|
|
|