Automated merge with ssh://hg.lindenlab.com/monty/viewer-drano-http-3

Thu, 22 Aug 2013 19:53:46 -0400

author
Monty Brandenberg <monty@lindenlab.com>
date
Thu, 22 Aug 2013 19:53:46 -0400
changeset 40710
fe3e11b6b50c
parent 40709
f38b9792a8f3
parent 40708
88761ed1b6bc
child 40711
3eea7d35f3c0

Automated merge with ssh://hg.lindenlab.com/monty/viewer-drano-http-3

     1.1 --- a/indra/llcorehttp/_httplibcurl.cpp	Thu Aug 22 19:48:45 2013 -0400
     1.2 +++ b/indra/llcorehttp/_httplibcurl.cpp	Thu Aug 22 19:53:46 2013 -0400
     1.3 @@ -41,7 +41,8 @@
     1.4  HttpLibcurl::HttpLibcurl(HttpService * service)
     1.5  	: mService(service),
     1.6  	  mPolicyCount(0),
     1.7 -	  mMultiHandles(NULL)
     1.8 +	  mMultiHandles(NULL),
     1.9 +	  mActiveHandles(NULL)
    1.10  {}
    1.11  
    1.12  
    1.13 @@ -77,6 +78,9 @@
    1.14  
    1.15  		delete [] mMultiHandles;
    1.16  		mMultiHandles = NULL;
    1.17 +
    1.18 +		delete [] mActiveHandles;
    1.19 +		mActiveHandles = NULL;
    1.20  	}
    1.21  
    1.22  	mPolicyCount = 0;
    1.23 @@ -90,9 +94,12 @@
    1.24  	
    1.25  	mPolicyCount = policy_count;
    1.26  	mMultiHandles = new CURLM * [mPolicyCount];
    1.27 +	mActiveHandles = new int [mPolicyCount];
    1.28 +	
    1.29  	for (int policy_class(0); policy_class < mPolicyCount; ++policy_class)
    1.30  	{
    1.31  		mMultiHandles[policy_class] = curl_multi_init();
    1.32 +		mActiveHandles[policy_class] = 0;
    1.33  	}
    1.34  }
    1.35  
    1.36 @@ -110,8 +117,10 @@
    1.37  	// Give libcurl some cycles to do I/O & callbacks
    1.38  	for (int policy_class(0); policy_class < mPolicyCount; ++policy_class)
    1.39  	{
    1.40 -		if (! mMultiHandles[policy_class])
    1.41 +		if (! mActiveHandles[policy_class] || ! mMultiHandles[policy_class])
    1.42 +		{
    1.43  			continue;
    1.44 +		}
    1.45  		
    1.46  		int running(0);
    1.47  		CURLMcode status(CURLM_CALL_MULTI_PERFORM);
    1.48 @@ -191,6 +200,7 @@
    1.49  	
    1.50  	// On success, make operation active
    1.51  	mActiveOps.insert(op);
    1.52 +	++mActiveHandles[op->mReqPolicy];
    1.53  }
    1.54  
    1.55  
    1.56 @@ -212,6 +222,7 @@
    1.57  
    1.58  	// Drop references
    1.59  	mActiveOps.erase(it);
    1.60 +	--mActiveHandles[op->mReqPolicy];
    1.61  	op->release();
    1.62  
    1.63  	return true;
    1.64 @@ -273,6 +284,7 @@
    1.65  
    1.66  	// Deactivate request
    1.67  	mActiveOps.erase(it);
    1.68 +	--mActiveHandles[op->mReqPolicy];
    1.69  	op->mCurlActive = false;
    1.70  
    1.71  	// Set final status of request if it hasn't failed by other mechanisms yet
    1.72 @@ -334,19 +346,9 @@
    1.73  
    1.74  int HttpLibcurl::getActiveCountInClass(int policy_class) const
    1.75  {
    1.76 -	int count(0);
    1.77 -	
    1.78 -	for (active_set_t::const_iterator iter(mActiveOps.begin());
    1.79 -		 mActiveOps.end() != iter;
    1.80 -		 ++iter)
    1.81 -	{
    1.82 -		if ((*iter)->mReqPolicy == policy_class)
    1.83 -		{
    1.84 -			++count;
    1.85 -		}
    1.86 -	}
    1.87 -	
    1.88 -	return count;
    1.89 +	llassert_always(policy_class < mPolicyCount);
    1.90 +
    1.91 +	return mActiveHandles ? mActiveHandles[policy_class] : 0;
    1.92  }
    1.93  
    1.94  
     2.1 --- a/indra/llcorehttp/_httplibcurl.h	Thu Aug 22 19:48:45 2013 -0400
     2.2 +++ b/indra/llcorehttp/_httplibcurl.h	Thu Aug 22 19:53:46 2013 -0400
     2.3 @@ -133,6 +133,7 @@
     2.4  	active_set_t		mActiveOps;
     2.5  	int					mPolicyCount;
     2.6  	CURLM **			mMultiHandles;			// One handle per policy class
     2.7 +	int *				mActiveHandles;			// Active count per policy class
     2.8  }; // end class HttpLibcurl
     2.9  
    2.10  }  // end namespace LLCore
     3.1 --- a/indra/llcorehttp/_httppolicy.cpp	Thu Aug 22 19:48:45 2013 -0400
     3.2 +++ b/indra/llcorehttp/_httppolicy.cpp	Thu Aug 22 19:53:46 2013 -0400
     3.3 @@ -153,14 +153,16 @@
     3.4  		};
     3.5  	static const int delta_max(int(LL_ARRAY_SIZE(retry_deltas)) - 1);
     3.6  	static const HttpStatus error_503(503);
     3.7 -	
     3.8 +
     3.9  	const HttpTime now(totalTime());
    3.10  	const int policy_class(op->mReqPolicy);
    3.11  	HttpTime delta(retry_deltas[llclamp(op->mPolicyRetries, 0, delta_max)]);
    3.12 +	bool external_delta(false);
    3.13  
    3.14  	if (op->mReplyRetryAfter > 0 && op->mReplyRetryAfter < 30)
    3.15  	{
    3.16  		delta = op->mReplyRetryAfter * U64L(1000000);
    3.17 +		external_delta = true;
    3.18  	}
    3.19  	op->mPolicyRetryAt = now + delta;
    3.20  	++op->mPolicyRetries;
    3.21 @@ -171,7 +173,8 @@
    3.22  	LL_DEBUGS("CoreHttp") << "HTTP request " << static_cast<HttpHandle>(op)
    3.23  						  << " retry " << op->mPolicyRetries
    3.24  						  << " scheduled in " << (delta / HttpTime(1000))
    3.25 -						  << " mS.  Status:  " << op->mStatus.toHex()
    3.26 +						  << " mS (" << (external_delta ? "external" : "internal")
    3.27 +						  << ").  Status:  " << op->mStatus.toHex()
    3.28  						  << LL_ENDL;
    3.29  	if (op->mTracing > HTTP_TRACE_OFF)
    3.30  	{
    3.31 @@ -212,6 +215,14 @@
    3.32  	for (int policy_class(0); policy_class < mClasses.size(); ++policy_class)
    3.33  	{
    3.34  		ClassState & state(*mClasses[policy_class]);
    3.35 +		HttpRetryQueue & retryq(state.mRetryQueue);
    3.36 +		HttpReadyQueue & readyq(state.mReadyQueue);
    3.37 +
    3.38 +		if (retryq.empty() && readyq.empty())
    3.39 +		{
    3.40 +			continue;
    3.41 +		}
    3.42 +		
    3.43  		const bool throttle_enabled(state.mOptions.mThrottleRate > 0L);
    3.44  		const bool throttle_current(throttle_enabled && now < state.mThrottleEnd);
    3.45  
    3.46 @@ -225,9 +236,6 @@
    3.47  		int active(transport.getActiveCountInClass(policy_class));
    3.48  		int needed(state.mOptions.mConnectionLimit - active);		// Expect negatives here
    3.49  
    3.50 -		HttpRetryQueue & retryq(state.mRetryQueue);
    3.51 -		HttpReadyQueue & readyq(state.mReadyQueue);
    3.52 -
    3.53  		if (needed > 0)
    3.54  		{
    3.55  			// First see if we have any retries...
     4.1 --- a/indra/newview/llmeshrepository.cpp	Thu Aug 22 19:48:45 2013 -0400
     4.2 +++ b/indra/newview/llmeshrepository.cpp	Thu Aug 22 19:53:46 2013 -0400
     4.3 @@ -79,10 +79,6 @@
     4.4  #include <queue>
     4.5  
     4.6  
     4.7 -// [ Disclaimer:  this documentation isn't by one of the original authors
     4.8 -//   but by someone coming through later and extracting intent and function.
     4.9 -//   Some of this will be wrong so use judgement. ]
    4.10 -//
    4.11  // Purpose
    4.12  //
    4.13  //   The purpose of this module is to provide access between the viewer
    4.14 @@ -101,6 +97,7 @@
    4.15  //     * getMeshHeader (For structural details, see:
    4.16  //       http://wiki.secondlife.com/wiki/Mesh/Mesh_Asset_Format)
    4.17  //     * notifyLoadedMeshes
    4.18 +//     * getSkinInfo
    4.19  //
    4.20  // Threads
    4.21  //
    4.22 @@ -108,7 +105,54 @@
    4.23  //   repo     Overseeing worker thread associated with the LLMeshRepoThread class
    4.24  //   decom    Worker thread for mesh decomposition requests
    4.25  //   core     HTTP worker thread:  does the work but doesn't intrude here
    4.26 -//   uploadN  0-N temporary mesh upload threads
    4.27 +//   uploadN  0-N temporary mesh upload threads (0-1 in practice)
    4.28 +//
    4.29 +// Sequence of Operations
    4.30 +//
    4.31 +//   What follows is a description of the retrieval of one LOD for
    4.32 +//   a new mesh object.  Work is performed by a series of short, quick
    4.33 +//   actions distributed over a number of threads.  Each is meant
    4.34 +//   to proceed without stalling and the whole forms a deep request
    4.35 +//   pipeline to achieve throughput.  Ellipsis indicates a return
    4.36 +//   or break in processing which is resumed elsewhere.
    4.37 +//
    4.38 +//         main thread         repo thread (run() method)
    4.39 +//
    4.40 +//         loadMesh() invoked to request LOD
    4.41 +//           append LODRequest to mPendingRequests
    4.42 +//         ...
    4.43 +//         other mesh requests may be made
    4.44 +//         ...
    4.45 +//         notifyLoadedMeshes() invoked to stage work
    4.46 +//           append HeaderRequest to mHeaderReqQ
    4.47 +//         ...
    4.48 +//                             scan mHeaderReqQ
    4.49 +//                             issue 4096-byte GET for header
    4.50 +//                             ...
    4.51 +//                             onCompleted() invoked for GET
    4.52 +//                               data copied
    4.53 +//                               headerReceived() invoked
    4.54 +//                                 LLSD parsed
    4.55 +//                                 mMeshHeader, mMeshHeaderSize updated
    4.56 +//                                 scan mPendingLOD for LOD request
    4.57 +//                                 push LODRequest to mLODReqQ
    4.58 +//                             ...
    4.59 +//                             scan mLODReqQ
    4.60 +//                             fetchMeshLOD() invoked
    4.61 +//                               issue Byte-Range GET for LOD
    4.62 +//                             ...
    4.63 +//                             onCompleted() invoked for GET
    4.64 +//                               data copied
    4.65 +//                               lodReceived() invoked
    4.66 +//                                 unpack data into LLVolume
    4.67 +//                                 append LoadedMesh to mLoadedQ
    4.68 +//                             ...
    4.69 +//         notifyLoadedMeshes() invoked again
    4.70 +//           scan mLoadedQ
    4.71 +//           notifyMeshLoaded() for LOD
    4.72 +//             setMeshAssetLoaded() invoked for system volume
    4.73 +//             notifyMeshLoaded() invoked for each interested object
    4.74 +//         ...
    4.75  //
    4.76  // Mutexes
    4.77  //
    4.78 @@ -163,19 +207,19 @@
    4.79  //
    4.80  //   LLMeshRepository:
    4.81  //
    4.82 -//     sBytesReceived
    4.83 -//     sMeshRequestCount
    4.84 -//     sHTTPRequestCount
    4.85 -//     sHTTPLargeRequestCount
    4.86 -//     sHTTPRetryCount
    4.87 -//     sHTTPErrorCount
    4.88 -//     sLODPending
    4.89 -//     sLODProcessing
    4.90 -//     sCacheBytesRead
    4.91 -//     sCacheBytesWritten
    4.92 -//     sCacheReads
    4.93 -//     sCacheWrites
    4.94 -//     mLoadingMeshes                  none            rw.main.none, rw.main.mMeshMutex [4]
    4.95 +//     sBytesReceived                  none            rw.repo.none, ro.main.none [1]
    4.96 +//     sMeshRequestCount               "
    4.97 +//     sHTTPRequestCount               "
    4.98 +//     sHTTPLargeRequestCount          "
    4.99 +//     sHTTPRetryCount                 "
   4.100 +//     sHTTPErrorCount                 "
   4.101 +//     sLODPending                     mMeshMutex [4]  rw.main.mMeshMutex
   4.102 +//     sLODProcessing                  Repo::mMutex    rw.any.Repo::mMutex
   4.103 +//     sCacheBytesRead                 none            rw.repo.none, ro.main.none [1]
   4.104 +//     sCacheBytesWritten              "
   4.105 +//     sCacheReads                     "
   4.106 +//     sCacheWrites                    "
   4.107 +//     mLoadingMeshes                  mMeshMutex [4]  rw.main.none, rw.any.mMeshMutex
   4.108  //     mSkinMap                        none            rw.main.none
   4.109  //     mDecompositionMap               none            rw.main.none
   4.110  //     mPendingRequests                mMeshMutex [4]  rw.main.mMeshMutex
   4.111 @@ -199,25 +243,18 @@
   4.112  //     sMaxConcurrentRequests   mMutex        wo.main.none, ro.repo.none, ro.main.mMutex
   4.113  //     mMeshHeader              mHeaderMutex  rw.repo.mHeaderMutex, ro.main.mHeaderMutex, ro.main.none [0]
   4.114  //     mMeshHeaderSize          mHeaderMutex  rw.repo.mHeaderMutex
   4.115 -//     mSkinRequests            none          rw.repo.none, rw.main.none [0]
   4.116 -//     mSkinInfoQ               none          rw.repo.none, rw.main.none [0]
   4.117 -//     mDecompositionRequests   none          rw.repo.none, rw.main.none [0]
   4.118 -//     mPhysicsShapeRequests    none          rw.repo.none, rw.main.none [0]
   4.119 -//     mDecompositionQ          none          rw.repo.none, rw.main.none [0]
   4.120 -//     mHeaderReqQ              mMutex        ro.repo.none [3], rw.repo.mMutex, rw.any.mMutex
   4.121 -//     mLODReqQ                 mMutex        ro.repo.none [3], rw.repo.mMutex, rw.any.mMutex
   4.122 -//     mUnavailableQ            mMutex        rw.repo.none [0], ro.main.none [3], rw.main.mMutex
   4.123 -//     mLoadedQ                 mMutex        rw.repo.mMutex, ro.main.none [3], rw.main.mMutex
   4.124 +//     mSkinRequests            mMutex        rw.repo.mMutex, ro.repo.none [5]
   4.125 +//     mSkinInfoQ               none          rw.repo.none, rw.main.mMutex [0]
   4.126 +//     mDecompositionRequests   mMutex        rw.repo.mMutex, ro.repo.none [5]
   4.127 +//     mPhysicsShapeRequests    mMutex        rw.repo.mMutex, ro.repo.none [5]
   4.128 +//     mDecompositionQ          none          rw.repo.none, rw.main.mMutex [0]
   4.129 +//     mHeaderReqQ              mMutex        ro.repo.none [5], rw.repo.mMutex, rw.any.mMutex
   4.130 +//     mLODReqQ                 mMutex        ro.repo.none [5], rw.repo.mMutex, rw.any.mMutex
   4.131 +//     mUnavailableQ            mMutex        rw.repo.none [0], ro.main.none [5], rw.main.mMutex
   4.132 +//     mLoadedQ                 mMutex        rw.repo.mMutex, ro.main.none [5], rw.main.mMutex
   4.133  //     mPendingLOD              mMutex        rw.repo.mMutex, rw.any.mMutex
   4.134  //     mHttp*                   none          rw.repo.none
   4.135  //
   4.136 -//   LLPhysicsDecomp:
   4.137 -//    
   4.138 -//     mRequestQ
   4.139 -//     mCurRequest
   4.140 -//     mCompletedQ
   4.141 -//
   4.142 -//
   4.143  // QA/Development Testing
   4.144  //
   4.145  //   Debug variable 'MeshUploadFakeErrors' takes a mask of bits that will
   4.146 @@ -230,15 +267,27 @@
   4.147  //                   locally-generated 500 status.
   4.148  //   0x08            As with 0x04 but for the upload operation.
   4.149  //
   4.150 +// *TODO:  Work list for followup actions:
   4.151 +//   * Review anything marked as unsafe above, verify if there are real issues.
   4.152 +//   * See if we can put ::run() into a hard sleep.  May not actually perform better
   4.153 +//     than the current scheme so be prepared for disappointment.  You'll likely
   4.154 +//     need to introduce a condition variable class that references a mutex in
   4.155 +//     methods rather than derives from mutex which isn't correct.
   4.156 +//   * On upload failures, make more information available to the alerting
   4.157 +//     dialog.  Get the structured information going into the log into a
   4.158 +//     tree there.
   4.159 +//   * Header parse failures come without much explanation.  Elaborate.
   4.160 +//   * Need a final failure state for requests that are retried and just won't
   4.161 +//     complete.  We can fail a LOD request, others we don't.
   4.162  
   4.163  LLMeshRepository gMeshRepo;
   4.164  
   4.165  const S32 MESH_HEADER_SIZE = 4096;                      // Important:  assumption is that headers fit in this space
   4.166 -const S32 REQUEST_HIGH_WATER_MIN = 32;
   4.167 -const S32 REQUEST_HIGH_WATER_MAX = 200;
   4.168 +const S32 REQUEST_HIGH_WATER_MIN = 32;					// Limits for GetMesh regions
   4.169 +const S32 REQUEST_HIGH_WATER_MAX = 150;					// Should remain under 2X throttle
   4.170  const S32 REQUEST_LOW_WATER_MIN = 16;
   4.171 -const S32 REQUEST_LOW_WATER_MAX = 100;
   4.172 -const S32 REQUEST2_HIGH_WATER_MIN = 32;
   4.173 +const S32 REQUEST_LOW_WATER_MAX = 75;
   4.174 +const S32 REQUEST2_HIGH_WATER_MIN = 32;					// Limits for GetMesh2 regions
   4.175  const S32 REQUEST2_HIGH_WATER_MAX = 80;
   4.176  const S32 REQUEST2_LOW_WATER_MIN = 16;
   4.177  const S32 REQUEST2_LOW_WATER_MAX = 40;
   4.178 @@ -269,7 +318,7 @@
   4.179  U32 LLMeshRepository::sCacheWrites = 0;
   4.180  U32 LLMeshRepository::sMaxLockHoldoffs = 0;
   4.181  
   4.182 -LLDeadmanTimer LLMeshRepository::sQuiescentTimer(15.0, true);	// true -> gather cpu metrics
   4.183 +LLDeadmanTimer LLMeshRepository::sQuiescentTimer(15.0, false);	// true -> gather cpu metrics
   4.184  
   4.185  	
   4.186  static S32 dump_num = 0;
   4.187 @@ -703,7 +752,7 @@
   4.188  
   4.189  	while (!LLApp::isQuitting())
   4.190  	{
   4.191 -		// *TODO:  Revise sleep/wake strategy and try to move away'
   4.192 +		// *TODO:  Revise sleep/wake strategy and try to move away
   4.193  		// from polling operations in this thread.  We can sleep
   4.194  		// this thread hard when:
   4.195  		// * All Http requests are serviced
   4.196 @@ -714,7 +763,8 @@
   4.197  		// * Physics shape request queue empty
   4.198  		// We wake the thread when any of the above become untrue.
   4.199  		// Will likely need a correctly-implemented condition variable to do this.
   4.200 -
   4.201 +		// On the other hand, this may actually be an effective and efficient scheme...
   4.202 +		
   4.203  		mSignal->wait();
   4.204  
   4.205  		if (LLApp::isQuitting())
   4.206 @@ -810,7 +860,7 @@
   4.207  
   4.208  			// holding lock, try next list
   4.209  			// *TODO:  For UI/debug-oriented lists, we might drop the fine-
   4.210 -			// grained locking as there's lowered expectations of smoothness
   4.211 +			// grained locking as there's a lowered expectation of smoothness
   4.212  			// in these cases.
   4.213  			if (! mDecompositionRequests.empty() && mHttpRequestSet.size() < sRequestHighWater)
   4.214  			{
   4.215 @@ -2303,24 +2353,26 @@
   4.216  
   4.217  void LLMeshRepoThread::notifyLoadedMeshes()
   4.218  {
   4.219 +	bool update_metrics(false);
   4.220 +	
   4.221  	if (!mMutex)
   4.222  	{
   4.223  		return;
   4.224  	}
   4.225  
   4.226 -	if (!mLoadedQ.empty() || !mUnavailableQ.empty())
   4.227 -	{
   4.228 -		// Ping time-to-load metrics for mesh download operations.
   4.229 -		LLMeshRepository::metricsProgress(0);
   4.230 -	}
   4.231 -	
   4.232  	while (!mLoadedQ.empty())
   4.233  	{
   4.234  		mMutex->lock();
   4.235 +		if (mLoadedQ.empty())
   4.236 +		{
   4.237 +			mMutex->unlock();
   4.238 +			break;
   4.239 +		}
   4.240  		LoadedMesh mesh = mLoadedQ.front();
   4.241  		mLoadedQ.pop();
   4.242  		mMutex->unlock();
   4.243  		
   4.244 +		update_metrics = true;
   4.245  		if (mesh.mVolume && mesh.mVolume->getNumVolumeFaces() > 0)
   4.246  		{
   4.247  			gMeshRepo.notifyMeshLoaded(mesh.mMeshParams, mesh.mVolume);
   4.248 @@ -2335,10 +2387,17 @@
   4.249  	while (!mUnavailableQ.empty())
   4.250  	{
   4.251  		mMutex->lock();
   4.252 +		if (mUnavailableQ.empty())
   4.253 +		{
   4.254 +			mMutex->unlock();
   4.255 +			break;
   4.256 +		}
   4.257 +		
   4.258  		LODRequest req = mUnavailableQ.front();
   4.259  		mUnavailableQ.pop();
   4.260  		mMutex->unlock();
   4.261 -		
   4.262 +
   4.263 +		update_metrics = true;
   4.264  		gMeshRepo.notifyMeshUnavailable(req.mMeshParams, req.mLOD);
   4.265  	}
   4.266  
   4.267 @@ -2353,6 +2412,13 @@
   4.268  		gMeshRepo.notifyDecompositionReceived(mDecompositionQ.front());
   4.269  		mDecompositionQ.pop();
   4.270  	}
   4.271 +
   4.272 +	if (update_metrics)
   4.273 +	{
   4.274 +		// Ping time-to-load metrics for mesh download operations.
   4.275 +		LLMeshRepository::metricsProgress(0);
   4.276 +	}
   4.277 +	
   4.278  }
   4.279  
   4.280  S32 LLMeshRepoThread::getActualMeshLOD(const LLVolumeParams& mesh_params, S32 lod) 
   4.281 @@ -2461,6 +2527,12 @@
   4.282  		// speculative loads aren't done.
   4.283  		static const LLCore::HttpStatus par_status(HTTP_PARTIAL_CONTENT);
   4.284  
   4.285 +		if (par_status != status)
   4.286 +		{
   4.287 +			LL_WARNS_ONCE(LOG_MESH) << "Non-206 successful status received for fetch:  "
   4.288 +									<< status.toHex() << LL_ENDL;
   4.289 +		}
   4.290 +		
   4.291  		LLCore::BufferArray * body(response->getBody());
   4.292  		S32 data_size(body ? body->size() : 0);
   4.293  		U8 * data(NULL);
   4.294 @@ -2995,7 +3067,8 @@
   4.295  	}
   4.296  	else
   4.297  	{
   4.298 -		// GetMesh2 operation with keepalives, etc.
   4.299 +		// GetMesh2 operation with keepalives, etc.  With pipelining,
   4.300 +		// we'll increase this.
   4.301  		LLMeshRepoThread::sMaxConcurrentRequests = gSavedSettings.getU32("Mesh2MaxConcurrentRequests");
   4.302  		LLMeshRepoThread::sRequestHighWater = llclamp(5 * S32(LLMeshRepoThread::sMaxConcurrentRequests),
   4.303  													  REQUEST2_HIGH_WATER_MIN,
   4.304 @@ -3083,7 +3156,10 @@
   4.305  	mDecompThread->notifyCompleted();
   4.306  
   4.307  	// For major operations, attempt to get the required locks
   4.308 -	// without blocking and punt if they're not available.
   4.309 +	// without blocking and punt if they're not available.  The
   4.310 +	// longest run of holdoffs is kept in sMaxLockHoldoffs just
   4.311 +	// to collect the data.  In testing, I've never seen a value
   4.312 +	// greater than 2 (written to log on exit).
   4.313  	{
   4.314  		LLMutexTrylock lock1(mMeshMutex);
   4.315  		LLMutexTrylock lock2(mThread->mMutex);

mercurial