Warning: file_get_contents(/data/phpspider/zhask/data//catemap/3/sockets/2.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
在Golang,如何使用channel处理多个goroutine_Go - Fatal编程技术网

在Golang,如何使用channel处理多个goroutine

在Golang,如何使用channel处理多个goroutine,go,Go,我想在Golang同时启动1000个goroutine,使用for循环。 问题是:我必须确保每个goroutine都已执行。 有没有可能通过渠道帮助我确保这一点 结构有点像这样: func main { for i ... { go ... ch? ch? } 正如@Andy提到的,您可以使用sync.WaitGroup来实现这一点。下面是一个例子。希望代码是不言自明的 package main import ( "fmt" "

我想在Golang同时启动1000个goroutine,使用for循环。
问题是:我必须确保每个goroutine都已执行。
有没有可能通过渠道帮助我确保这一点

结构有点像这样:

func main {
    for i ... {
        go ...
        ch?
    ch?
}

正如@Andy提到的,您可以使用
sync.WaitGroup
来实现这一点。下面是一个例子。希望代码是不言自明的

package main

import (
    "fmt"
    "sync"
    "time"
)

func dosomething(millisecs int64, wg *sync.WaitGroup) {
    defer wg.Done()
    duration := time.Duration(millisecs) * time.Millisecond
    time.Sleep(duration)
    fmt.Println("Function in background, duration:", duration)
}

func main() {
    arr := []int64{200, 400, 150, 600}
    var wg sync.WaitGroup
    for _, n := range arr {
     wg.Add(1)
     go dosomething(n, &wg)
    }
    wg.Wait()
    fmt.Println("Done")
}

要确保完成goroutine并收集结果,请尝试以下示例:

主程序包
进口(
“fmt”
)
常数最大值=1000
func main(){

对于i:=1;i我建议您遵循一种模式。并发性和通道是好的,但如果您使用它的方式不好,您的程序可能会比预期的慢。处理多个go例程和通道的简单方法是通过工作池模式

仔细看看下面的代码

// In this example we'll look at how to implement
// a _worker pool_ using goroutines and channels.

package main

import "fmt"
import "time"

// Here's the worker, of which we'll run several
// concurrent instances. These workers will receive
// work on the `jobs` channel and send the corresponding
// results on `results`. We'll sleep a second per job to
// simulate an expensive task.
func worker(id int, jobs <-chan int, results chan<- int) {
    for j := range jobs {
        fmt.Println("worker", id, "started  job", j)
        time.Sleep(time.Second)
        fmt.Println("worker", id, "finished job", j)
        results <- j * 2
    }
}

func main() {

    // In order to use our pool of workers we need to send
    // them work and collect their results. We make 2
    // channels for this.
    jobs := make(chan int, 100)
    results := make(chan int, 100)

    // This starts up 3 workers, initially blocked
    // because there are no jobs yet.
    for w := 1; w <= 3; w++ {
        go worker(w, jobs, results)
    }

    // Here we send 5 `jobs` and then `close` that
    // channel to indicate that's all the work we have.
    for j := 1; j <= 5; j++ {
        jobs <- j
    }
    close(jobs)

    // Finally we collect all the results of the work.
    for a := 1; a <= 5; a++ {
        <-results
    }
}
//在本例中,我们将了解如何实现
//使用goroutines和channels的u工作池。
包干管
输入“fmt”
导入“时间”
//这是工人,我们将运行几个
//并发实例。这些工作人员将收到
//使用“作业”频道并发送相应的
//“结果”上的结果。我们将在每个作业上睡眠一秒钟,以便
//模拟一项昂贵的任务。

func工人(id int,作业您的意思是要等待goroutine完成吗?如果是,您可以使用
sync.WaitGroup
。请参阅。可能是我不理解任务。您是否有1000个不同的goroutine?如果没有-如果它们执行相同的任务,执行其中哪些goroutine有什么不同?即使这可以工作并满足OP的约束,这不是最佳实践。这种特殊方法的问题是可维护性和可扩展性。
WaitGroup
就是为了解决这个问题而设计的。@KshitijSaraogi:最佳实践非常取决于手头的问题。例如:假设您需要1000个goroutine来执行特定的计算,然后将
int
结果发送给主要的goroutine,那么这就是最佳实践。也许
dosomething()
开头的
defer wg.Done()。
package main

import (
    "fmt"
    "sync"
)

func main() {
    wg := &sync.WaitGroup{}

    for i := 0; i < 1000; i++ {
        wg.Add(1)
        go f(wg, i)
    }

    wg.Wait()
    fmt.Println("Done.")
}

func f(wg *sync.WaitGroup, n int) {
    defer wg.Done()
    fmt.Print(n, " ")
}
// In this example we'll look at how to implement
// a _worker pool_ using goroutines and channels.

package main

import "fmt"
import "time"

// Here's the worker, of which we'll run several
// concurrent instances. These workers will receive
// work on the `jobs` channel and send the corresponding
// results on `results`. We'll sleep a second per job to
// simulate an expensive task.
func worker(id int, jobs <-chan int, results chan<- int) {
    for j := range jobs {
        fmt.Println("worker", id, "started  job", j)
        time.Sleep(time.Second)
        fmt.Println("worker", id, "finished job", j)
        results <- j * 2
    }
}

func main() {

    // In order to use our pool of workers we need to send
    // them work and collect their results. We make 2
    // channels for this.
    jobs := make(chan int, 100)
    results := make(chan int, 100)

    // This starts up 3 workers, initially blocked
    // because there are no jobs yet.
    for w := 1; w <= 3; w++ {
        go worker(w, jobs, results)
    }

    // Here we send 5 `jobs` and then `close` that
    // channel to indicate that's all the work we have.
    for j := 1; j <= 5; j++ {
        jobs <- j
    }
    close(jobs)

    // Finally we collect all the results of the work.
    for a := 1; a <= 5; a++ {
        <-results
    }
}