涛子 - 简单就是美

成单纯魁增,永继振国兴,克复宗清政,广开家必升

  博客园  :: 首页  :: 新随笔  :: 联系 :: 订阅 订阅  :: 管理

https://www.jianshu.com/p/e8b26dc7884a

package main

import (
    "fmt"
    "math/rand"
    "sync"
    "time"
)

// SimpleData stuct
type SimpleData struct {
    ID int
}

// Work define
func Work(allData []SimpleData) {
    start := time.Now()

    for i := range allData {
        Process(allData[i])
    }

    fmt.Printf("Took ===============> %s\n", time.Since(start))
}

// Process define
func Process(data SimpleData) {
    fmt.Printf("Start processing %d\n", data.ID)
    time.Sleep(100 * time.Millisecond)
    fmt.Printf("Finish processing %d\n", data.ID)
}

// NotPooledWork define
func NotPooledWork(allData []SimpleData) {
    start := time.Now()
    var wg sync.WaitGroup

    dataCh := make(chan SimpleData, 10)

    wg.Add(1)
    go func() {
        defer wg.Done()
        for data := range dataCh {
            wg.Add(1)
            go func(data SimpleData) {
                defer wg.Done()
                Process(data)
            }(data)
        }
    }()

    for i := range allData {
        dataCh <- allData[i]
    }

    close(dataCh)
    wg.Wait()
    fmt.Printf("Took ===============> %s\n", time.Since(start))
}

// PooledWork define
func PooledWork(allData []SimpleData) {
    start := time.Now()
    var wg sync.WaitGroup
    workerPoolSize := 10

    dataCh := make(chan SimpleData, workerPoolSize)

    for i := 0; i < workerPoolSize; i++ {
        wg.Add(1)
        go func() {
            defer wg.Done()

            for data := range dataCh {
                Process(data)
            }
        }()
    }

    for i := range allData {
        dataCh <- allData[i]
    }

    close(dataCh)
    wg.Wait()
    fmt.Printf("Took ===============> %s\n", time.Since(start))
}

// PooledWorkError define
func PooledWorkError(allData []SimpleData) {
    start := time.Now()
    var wg sync.WaitGroup
    workerPoolSize := 10

    dataCh := make(chan SimpleData, workerPoolSize)
    errors := make(chan error, 1000)

    for i := 0; i < workerPoolSize; i++ {
        wg.Add(1)
        go func() {
            defer wg.Done()

            for data := range dataCh {
                ProcessError(data, errors)
            }
        }()
    }

    for i := range allData {
        dataCh <- allData[i]
    }

    close(dataCh)

    wg.Add(1)
    go func() {
        defer wg.Done()
        for {
            select {
            case err := <-errors:
                fmt.Println("finished with error:", err.Error())
            case <-time.After(time.Second * 1):
                fmt.Println("Timeout: errors finished")
                return
            }
        }
    }()

    defer close(errors)
    wg.Wait()
    fmt.Printf("Took ===============> %s\n", time.Since(start))
}

// ProcessError define
func ProcessError(data SimpleData, errors chan<- error) {
    fmt.Printf("Start processing %d\n", data.ID)
    time.Sleep(100 * time.Millisecond)
    if data.ID%29 == 0 {
        errors <- fmt.Errorf("error on job %v", data.ID)
    } else {
        fmt.Printf("Finish processing %d\n", data.ID)
    }
}

// Task define
type Task struct {
    Err  error
    Data interface{}
    f    func(interface{}) error
}

// NewTask define
func NewTask(f func(interface{}) error, data interface{}) *Task {
    return &Task{f: f, Data: data}
}

func process(workerID int, task *Task) {
    fmt.Printf("Worker %d processes task %v\n", workerID, task.Data)
    task.Err = task.f(task.Data)
}

// Worker handles all the work
type Worker struct {
    ID       int
    taskChan chan *Task
    quit     chan bool
}

// NewWorker returns new instance of worker
func NewWorker(channel chan *Task, ID int) *Worker {
    return &Worker{
        ID:       ID,
        taskChan: channel,
        quit:     make(chan bool),
    }
}

// Start starts the worker
func (wr *Worker) Start(wg *sync.WaitGroup) {
    fmt.Printf("Starting worker %d\n", wr.ID)

    wg.Add(1)
    go func() {
        defer wg.Done()
        for task := range wr.taskChan {
            process(wr.ID, task)
        }
    }()
}

// StartBackground starts the worker in background waiting
func (wr *Worker) StartBackground() {
    fmt.Printf("Starting worker %d\n", wr.ID)

    for {
        select {
        case task := <-wr.taskChan:
            process(wr.ID, task)
        case <-wr.quit:
            return
        }
    }
}

// Stop quits the worker
func (wr *Worker) Stop() {
    fmt.Printf("Closing worker %d\n", wr.ID)
    go func() {
        wr.quit <- true
    }()
}

// Pool is the worker pool
type Pool struct {
    Tasks   []*Task
    Workers []*Worker

    concurrency   int
    collector     chan *Task
    runBackground chan bool
    wg            sync.WaitGroup
}

// NewPool initializes a new pool with the given tasks and at the given concurrency.
func NewPool(tasks []*Task, concurrency int) *Pool {
    return &Pool{
        Tasks:       tasks,
        concurrency: concurrency,
        collector:   make(chan *Task, 1000),
    }
}

// Run runs all work within the pool and blocks until it's finished.
func (p *Pool) Run() {
    for i := 1; i <= p.concurrency; i++ {
        worker := NewWorker(p.collector, i)
        worker.Start(&p.wg)
    }

    for i := range p.Tasks {
        p.collector <- p.Tasks[i]
    }
    close(p.collector)

    p.wg.Wait()
}

// AddTask adds a task to the pool
func (p *Pool) AddTask(task *Task) {
    p.collector <- task
}

// RunBackground runs the pool in background
func (p *Pool) RunBackground() {
    go func() {
        for {
            fmt.Print("⌛ Waiting for tasks to come in ...\n")
            time.Sleep(10 * time.Second)
        }
    }()

    for i := 1; i <= p.concurrency; i++ {
        worker := NewWorker(p.collector, i)
        p.Workers = append(p.Workers, worker)
        go worker.StartBackground()
    }

    for i := range p.Tasks {
        p.collector <- p.Tasks[i]
    }

    p.runBackground = make(chan bool)
    <-p.runBackground
}

// Stop stops background workers
func (p *Pool) Stop() {
    for i := range p.Workers {
        p.Workers[i].Stop()
    }
    p.runBackground <- true
}

func main() {
    // generate all data
    var allData []SimpleData
    for i := 0; i < 1000; i++ {
        data := SimpleData{ID: i}
        allData = append(allData, data)
    }

    // generate all task
    var allTask []*Task
    for i := 1; i <= 100; i++ {
        task := NewTask(func(data interface{}) error {
            taskID := data.(int)
            time.Sleep(100 * time.Millisecond)
            fmt.Printf("Task %d processed\n", taskID)
            return nil
        }, i)
        allTask = append(allTask, task)
    }

    pool := NewPool(allTask, 5)

    // run synchronously
    // Work(allData)

    // run without any pooling
    // NotPooledWork(allData)

    // run with pooling
    // PooledWork(allData)

    // run with pooling that handles errors
    // PooledWorkError(allData)

    // run robust worker pool
    // pool.Run()

    // run robust worker pool in background
    go func() {
        for {
            taskID := rand.Intn(100) + 20

            if taskID%7 == 0 {
                pool.Stop()
            }

            time.Sleep(time.Duration(rand.Intn(5)) * time.Second)
            task := NewTask(func(data interface{}) error {
                taskID := data.(int)
                time.Sleep(100 * time.Millisecond)
                fmt.Printf("Task %d processed\n", taskID)
                return nil
            }, taskID)
            pool.AddTask(task)
        }
    }()
    pool.RunBackground()
}
posted on 2023-01-29 11:15  北京涛子  阅读(26)  评论(0编辑  收藏  举报