（文章很长，时间不多的同学可以直接看最后！）

## 三、调度机制

/// <summary>
/// Queues the specified work to run on the ThreadPool and returns a Task handle for that work.
/// </summary>
/// <param name="action">The work to execute asynchronously</param>
/// <returns>A Task that represents the work queued to execute in the ThreadPool.</returns>
/// <exception cref="T:System.ArgumentNullException">
/// The <paramref name="action"/> parameter was null.
/// </exception>
{
}

// Implicitly converts action to object and handles the meat of the StartNew() logic.
{
// Validate arguments.
if (scheduler == null)
{
ThrowHelper.ThrowArgumentNullException(ExceptionArgument.scheduler);
}
Contract.EndContractBlock();

// Create and schedule the task. This throws an InvalidOperationException if already shut down.
// Here we add the InternalTaskOptions.QueuedByRuntime to the internalOptions, so that TaskConstructorCore can skip the cancellation token registration

t.ScheduleAndStart(false);
return t;
}

/// <summary>
/// Schedules the task for execution.
/// </summary>
/// <param name="needsProtection">If true, TASK_STATE_STARTED bit is turned on in
/// an atomic fashion, making sure that TASK_STATE_CANCELED does not get set
/// underneath us.  If false, TASK_STATE_STARTED bit is OR-ed right in.  This
/// allows us to streamline things a bit for StartNew(), where competing cancellations
/// are not a problem.</param>
internal void ScheduleAndStart(bool needsProtection)
{

if (needsProtection)
{
if (!MarkStarted())
{
// A cancel has snuck in before we could get started.  Quietly exit.
return;
}
}
else
{
}

if (s_asyncDebuggingEnabled)
{
}

{
AsyncCausalityTracer.TraceOperationCreation(CausalityTraceLevel.Required, this.Id, "Task: " + m_action.Method.Name, 0);
}

try
{
// Queue to the indicated scheduler.
}
{
}
catch (Exception e)
{
// a Faulted state.
Finish(false);

// Now we need to mark ourselves as "handled" to avoid crashing the finalizer thread if we are called from StartNew(),
// because the exception is either propagated outside directly, or added to an enclosing parent. However we won't do this for
// continuation tasks, because in that case we internally eat the exception and therefore we need to make sure the user does
// later observe it explicitly or see it on the finalizer.

{
Debug.Assert(
(m_contingentProperties != null) &&
(m_contingentProperties.m_exceptionsHolder != null) &&
(m_contingentProperties.m_exceptionsHolder.ContainsFaultList),
"Task.ScheduleAndStart(): Expected m_contingentProperties.m_exceptionsHolder to exist " +
"and to have faults recorded.");

m_contingentProperties.m_exceptionsHolder.MarkAsHandled(false);
}
// re-throw the exception wrapped as a TaskSchedulerException.
throw tse;
}
}

protected internal override void QueueTask(Task task)
{
{
}
else
{
}
}

public void Enqueue(IThreadPoolWorkItem callback, bool forceGlobal)
{
if (loggingEnabled)

if (!forceGlobal)

if (null != tl)
{
tl.workStealingQueue.LocalPush(callback);
}
else
{
workItems.Enqueue(callback);
}

}

internal void EnsureThreadRequested()
{
//
// If we have not yet requested #procs threads from the VM, then request a new thread.
// Note that there is a separate count in the VM which will also be incremented in this case,
// which is handled by RequestWorkerThread.
//
{
int prev = Interlocked.CompareExchange(ref numOutstandingThreadRequests, count + 1, count);
if (prev == count)
{
break;
}
count = prev;
}
}

Console.WriteLine("CPU={0}", Environment.ProcessorCount);
for (var i = 0; i < 10; i++)
{
{
var n = (Int32)s;
Console.WriteLine("{0:HH:mm:ss.fff} th {1} start", DateTime.Now, n);
Console.WriteLine("{0:HH:mm:ss.fff} th {1} end", DateTime.Now, n);
}, i);
}

CPU=4
18:05:27.936 th 2 start
18:05:27.936 th 3 start
18:05:27.936 th 1 start
18:05:27.936 th 0 start
18:05:29.373 th 4 start
18:05:29.939 th 2 end
18:05:29.940 th 5 start
18:05:29.940 th 0 end
18:05:29.941 th 6 start
18:05:29.940 th 1 end
18:05:29.940 th 3 end
18:05:29.942 th 7 start
18:05:29.942 th 8 start
18:05:30.871 th 9 start
18:05:31.374 th 4 end
18:05:31.942 th 5 end
18:05:31.942 th 6 end
18:05:31.943 th 7 end
18:05:31.943 th 8 end
18:05:32.872 th 9 end

internal static bool Dispatch()
{
//
// The clock is ticking!  We have ThreadPoolGlobals.TP_QUANTUM milliseconds to get some work done, and then
//
int quantumStartTime = Environment.TickCount;

//
// Update our records to indicate that an outstanding request for a thread has now been fulfilled.
// From this point on, we are responsible for requesting another thread if we stop working for any
// reason, and we believe there might still be work in the queue.
//
// Note that if this thread is aborted before we get a chance to request another one, the VM will
// record a thread request on our behalf.  So we don't need to worry about getting aborted right here.
//

// Has the desire for logging changed since the last time we entered?

//
// Assume that we're going to need another thread if this one returns to the VM.  We'll set this to
// false later, but only if we're absolutely certain that the queue is empty.
//
try
{
//
// Set up our thread-local data
//

//
// Loop until our quantum expires.
//
while ((Environment.TickCount - quantumStartTime) < ThreadPoolGlobals.TP_QUANTUM)
{
bool missedSteal = false;
workItem = workQueue.Dequeue(tl, ref missedSteal);

if (workItem == null)
{
//
// No work.  We're going to return to the VM once we leave this protected region.
// If we missed a steal, though, there may be more work in the queue.
// Instead of looping around and trying again, we'll just request another thread.  This way
// we won't starve other AppDomains while we spin trying to get locks, and hopefully the thread
// that owns the contended work-stealing queue will pick up its own workitems in the meantime,
// which will be more efficient than this thread doing it anyway.
//

// Tell the VM we're returning normally, not because Hill Climbing asked us to return.
return true;
}

if (workQueue.loggingEnabled)

//
// If we found work, there may be more work.  Ask for another thread so that the other work can be processed
// in parallel.  Note that this will only ask for a max of #procs threads, so it's safe to call it for every dequeue.
//

//
// Execute the workitem outside of any finally blocks, so that it can be aborted if needed.
//
{
bool reportedStatus = false;
try
{
reportedStatus = true;
workItem.ExecuteWorkItem();
}
finally
{
if (reportedStatus)
}
}
else
{
workItem.ExecuteWorkItem();
}
workItem = null;

//
// Notify the VM that we executed this workitem.  This is also our opportunity to ask whether Hill Climbing wants
// us to return the thread to the pool or not.
//
return false;
}

// If we get here, it's because our quantum expired.  Tell the VM we're returning normally.
return true;
}
{
//
// This is here to catch the case where this thread is aborted between the time we exit the finally block in the dispatch
// loop, and the time we execute the work item.  QueueUserWorkItemCallback uses this to update its accounting of whether
// it was executed or not (in debug builds only).  Task uses this to communicate the ThreadAbortException to anyone
// who waits for the task to complete.
//
workItem?.MarkAborted(tae);

//
// In this case, the VM is going to request another thread on our behalf.  No need to do it twice.
//
// throw;  //no need to explicitly rethrow a ThreadAbortException, and doing so causes allocations on amd64.
}
finally
{
//
// If we are exiting for any reason other than that the queue is definitely empty, ask for another
// thread to pick up where we left off.
//
}

// we can never reach this point, but the C# compiler doesn't know that, because it doesn't know the ThreadAbortException will be reraised above.
Debug.Fail("Should never reach this point");
return true;
}
public IThreadPoolWorkItem Dequeue(ThreadPoolWorkQueueThreadLocals tl, ref bool missedSteal)
{
WorkStealingQueue localWsq = tl.workStealingQueue;

if ((callback = localWsq.LocalPop()) == null && // first try the local queue
!workItems.TryDequeue(out callback)) // then try the global queue
{
// finally try to steal from another thread's local queue
WorkStealingQueue[] queues = WorkStealingQueueList.Queues;
int c = queues.Length;
Debug.Assert(c > 0, "There must at least be a queue for this thread.");
int maxIndex = c - 1;
int i = tl.random.Next(c);
while (c > 0)
{
i = (i < maxIndex) ? i + 1 : 0;
WorkStealingQueue otherQueue = queues[i];
if (otherQueue != localWsq && otherQueue.CanSteal)
{
callback = otherQueue.TrySteal(ref missedSteal);
if (callback != null)
{
break;
}
}
c--;
}
}

return callback;
}
internal void MarkThreadRequestSatisfied()
{
int num2;
for (int num = numOutstandingThreadRequests; num > 0; num = num2)
{
num2 = Interlocked.CompareExchange(ref numOutstandingThreadRequests, num - 1, num);
if (num2 == num)
{
break;
}
}
}

2. 一次Dispatch处理多个任务，只要总耗时不超过30个滴答，这样可以减少线程切换
3. 每次从队列拿一个任务来处理，然后检查打开更多线程（如果不足CPU数）
4. 先从本地队列弹出任务，然后到全局队列，最后再从其它线程的本地队列随机偷一个
5. 本地队列是压栈弹栈FILO，也就是先进来的任务后执行

## 四、线程池增长

https://gist.github.com/JonCole/e65411214030f0d823cb

Once the number of existing (busy) threads hits the "minimum" number of threads, the ThreadPool will throttle the rate at which is injects new threads to one thread per 500 milliseconds. This means that if your system gets a burst of work needing an IOCP thread, it will process that work very quickly. However, if the burst of work is more than the configured "Minimum" setting, there will be some delay in processing some of the work as the ThreadPool waits for one of two things to happen 1. An existing thread becomes free to process the work 2. No existing thread becomes free for 500ms, so a new thread is created.

Given the above information, we strongly recommend that customers set the minimum configuration value for IOCP and WORKER threads to something larger than the default value. We can't give one-size-fits-all guidance on what this value should be because the right value for one application will be too high/low for another application. This setting can also impact the performance of other parts of complicated applications, so each customer needs to fine-tune this setting to their specific needs. A good starting place is 100, then test and tweak as needed.

## 五、死锁

Imagine a method in your code that needs to connect via socket with a Web server. A possible implementation is opening the connection asynchronously with the Socket class' BeginConnect method and wait for the connection to be established with the EndConnect method.

class ConnectionSocket
{
public void Connect()
{
IPHostEntry ipHostEntry = Dns.Resolve(Dns.GetHostName());
80);
Socket s = new Socket(ipEndPoint.AddressFamily, SocketType.Stream,
ProtocolType.Tcp);
IAsyncResult ar = s.BeginConnect(ipEndPoint, null, null);
s.EndConnect(ar);
}
}

So far, so good—calling BeginConnect makes the asynchronous operation execute on the thread pool and EndConnect blocks waiting for the connection to be established.

What happens if we use this class from a function executed on the thread pool? Imagine that the size of the pool is just two threads and we launch two asynchronous functions that use our connection class. With both functions executing on the pool, there is no room for additional requests until the functions are finished. The problem is that these functions call our class' Connect method. This method launches again an asynchronous operation on the thread pool, but since the pool is full, the request is queued waiting any thread to be free. Unfortunately, this will never happen because the functions that are using the pool are waiting for the queued functions to finish. The conclusion: our application is blocked.

In general, a deadlock can appear whenever a pool thread waits for an asynchronous function to finish. If we change the code so that we use the synchronous version of Connect, the problem will disappear:

class ConnectionSocket
{
public void Connect()
{
IPHostEntry ipHostEntry = Dns.Resolve(Dns.GetHostName());
IPEndPoint ipEndPoint = new IPEndPoint(ipHostEntry.AddressList[0], 80);
Socket s = new Socket(ipEndPoint.AddressFamily, SocketType.Stream,
ProtocolType.Tcp);
s.Connect(ipEndPoint);
}
}

If you want to avoid deadlocks in your applications, do not ever block a thread executed on the pool that is waiting for another function on the pool. This seems to be easy, but keep in mind that this rule implies two more:

• Do not create any class whose synchronous methods wait for asynchronous functions, since this class could be called from a thread on the pool.
• Do not use any class inside an asynchronous function if the class blocks waiting for asynchronous functions.

If you want to detect a deadlock in your application, check the available number of threads on the thread pool when your system is hung. The lack of available threads and CPU utilization near 0% are clear symptoms of a deadlock. You should monitor your code to identify where a function executed on the pool is waiting for an asynchronous operation and remove it.

1. 不要在同步方法中等待异步方法，因为同步方法可能在线程池里执行
2. 不要在异步方法中阻塞等待异步方法

End.

posted @ 2018-07-05 18:40  大石头  阅读(...)  评论(...编辑  收藏