处理大型csv文件并限制goroutines
我正试图找到读取csv文件(约1M行)的最佳有效方法。 每一行都包含一个指向我需要下载的图像的HTTP链接 这是我当前使用工作池的代码:处理大型csv文件并限制goroutines,csv,go,multiprocess,Csv,Go,Multiprocess,我正试图找到读取csv文件(约1M行)的最佳有效方法。 每一行都包含一个指向我需要下载的图像的HTTP链接 这是我当前使用工作池的代码: func worker(queue chan []string, worknumber int, done, ks chan bool) { for true { select { case url := <-queue: fmt.Println("doing work!", url, "w
func worker(queue chan []string, worknumber int, done, ks chan bool) {
for true {
select {
case url := <-queue:
fmt.Println("doing work!", url, "worknumber", worknumber)
processData(url) // HTTP download
done <- true
case <-ks:
fmt.Println("worker halted, number", worknumber)
return
}
}
}
func main() {
start := time.Now()
flag.Parse()
fmt.Print(strings.Join(flag.Args(), "\n"))
if *filename == "REQUIRED" {
return
}
csvfile, err := os.Open(*filename)
if err != nil {
fmt.Println(err)
return
}
count, _ := lineCounter(csvfile)
fmt.Printf("Total count: %d\n", count)
csvfile.Seek(0, 0)
defer csvfile.Close()
//bar := pb.StartNew(count)
bar := progressbar.NewOptions(count)
bar.RenderBlank()
reader := csv.NewReader(csvfile)
//channel for terminating the workers
killsignal := make(chan bool)
//queue of jobs
q := make(chan []string)
// done channel takes the result of the job
done := make(chan bool)
numberOfWorkers := *numChannels
for i := 0; i < numberOfWorkers; i++ {
go worker(q, i, done, killsignal)
}
i := 0
for {
record, err := reader.Read()
if err == io.EOF {
break
} else if err != nil {
fmt.Println(err)
return
}
i++
go func(r []string, i int) {
q <- r
bar.Add(1)
}(record, i)
}
// a deadlock occurs if c >= numberOfJobs
for c := 0; c < count; c++ {
<-done
}
fmt.Println("finished")
// cleaning workers
close(killsignal)
time.Sleep(2 * time.Second)
fmt.Printf("\n%2fs", time.Since(start).Seconds())
}
func worker(队列chan[]字符串,工作编号int,完成,ks chan bool){
真的{
挑选{
案例url:=您正在为文件中的每一行创建一个新的goroutine。这就是为什么。如果您已经有了所需的工作人员,那么没有理由这样做
简言之,改变这一点:
go func(r []string, i int) {
q <- r
bar.Add(1)
}(record, i)
go func(r[]字符串,i int){
q我去掉了进度条,因为我不想为它操心,但总的来说,这更接近于您所寻找的
它不能真正地处理错误,它们只是在致命状态下失败
我添加了上下文和取消支持
你可能想检查一下
作为一般建议,您需要学习golang模式及其用法
很明显,你工作得还不够,或者你正在学习
这根本不是最快的程序,但它确实做到了
这只是一个草案,让你回到一个更好的方向
package main
import (
"context"
"encoding/csv"
"flag"
"fmt"
"io"
"log"
"os"
"os/signal"
"sync"
"time"
)
func worker(ctx context.Context, dst chan string, src chan []string) {
for {
select {
case url, ok := <-src: // you must check for readable state of the channel.
if !ok {
return
}
dst <- fmt.Sprintf("out of %v", url) // do somethingg useful.
case <-ctx.Done(): // if the context is cancelled, quit.
return
}
}
}
func main() {
// create a context
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// that cancels at ctrl+C
go onSignal(os.Interrupt, cancel)
// parse command line arguments
var filename string
var numberOfWorkers int
flag.StringVar(&filename, "filename", "", "src file")
flag.IntVar(&numberOfWorkers, "c", 2, "concurrent workers")
flag.Parse()
// check arguments
if filename == "" {
log.Fatal("filename required")
}
start := time.Now()
csvfile, err := os.Open(filename)
if err != nil {
log.Fatal(err)
}
defer csvfile.Close()
reader := csv.NewReader(csvfile)
// create the pair of input/output channels for the controller=>workers com.
src := make(chan []string)
out := make(chan string)
// use a waitgroup to manage synchronization
var wg sync.WaitGroup
// declare the workers
for i := 0; i < numberOfWorkers; i++ {
wg.Add(1)
go func() {
defer wg.Done()
worker(ctx, out, src)
}()
}
// read the csv and write it to src
go func() {
for {
record, err := reader.Read()
if err == io.EOF {
break
} else if err != nil {
log.Fatal(err)
}
src <- record // you might select on ctx.Done().
}
close(src) // close src to signal workers that no more job are incoming.
}()
// wait for worker group to finish and close out
go func() {
wg.Wait() // wait for writers to quit.
close(out) // when you close(out) it breaks the below loop.
}()
// drain the output
for res := range out {
fmt.Println(res)
}
fmt.Printf("\n%2fs", time.Since(start).Seconds())
}
func onSignal(s os.Signal, h func()) {
c := make(chan os.Signal, 1)
signal.Notify(c, s)
<-c
h()
}
主程序包
进口(
“上下文”
“编码/csv”
“旗帜”
“fmt”
“io”
“日志”
“操作系统”
“操作系统/信号”
“同步”
“时间”
)
func worker(ctx context.context、dst chan字符串、src chan[]字符串){
为了{
挑选{
案例url,确定:=缓冲通道可以帮助您限制goroutine
var taskPipe=make(chan接口{},5)
func main(){
go func(){
taskPipe您是否考虑过bufio.NewScanner(文件)
它不会占用太多内存。您在cvs行上的循环应位于不同的例程中。主例程应负责排出辅助进程的输出,并等待完成或错误退出。我尝试过,但随后它似乎进入死锁。它开始处理numberOfWorkers
行数,然后卡住。发生这种情况的原因是,在您完全处理输入之前,不会读取工作程序的输出。您能帮助我了解有关您的注释的更多信息吗?您的工作程序会写入done
通道,但直到程序结束才会读取。这就是导致死锁的原因。
package main
import (
"context"
"encoding/csv"
"flag"
"fmt"
"io"
"log"
"os"
"os/signal"
"sync"
"time"
)
func worker(ctx context.Context, dst chan string, src chan []string) {
for {
select {
case url, ok := <-src: // you must check for readable state of the channel.
if !ok {
return
}
dst <- fmt.Sprintf("out of %v", url) // do somethingg useful.
case <-ctx.Done(): // if the context is cancelled, quit.
return
}
}
}
func main() {
// create a context
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// that cancels at ctrl+C
go onSignal(os.Interrupt, cancel)
// parse command line arguments
var filename string
var numberOfWorkers int
flag.StringVar(&filename, "filename", "", "src file")
flag.IntVar(&numberOfWorkers, "c", 2, "concurrent workers")
flag.Parse()
// check arguments
if filename == "" {
log.Fatal("filename required")
}
start := time.Now()
csvfile, err := os.Open(filename)
if err != nil {
log.Fatal(err)
}
defer csvfile.Close()
reader := csv.NewReader(csvfile)
// create the pair of input/output channels for the controller=>workers com.
src := make(chan []string)
out := make(chan string)
// use a waitgroup to manage synchronization
var wg sync.WaitGroup
// declare the workers
for i := 0; i < numberOfWorkers; i++ {
wg.Add(1)
go func() {
defer wg.Done()
worker(ctx, out, src)
}()
}
// read the csv and write it to src
go func() {
for {
record, err := reader.Read()
if err == io.EOF {
break
} else if err != nil {
log.Fatal(err)
}
src <- record // you might select on ctx.Done().
}
close(src) // close src to signal workers that no more job are incoming.
}()
// wait for worker group to finish and close out
go func() {
wg.Wait() // wait for writers to quit.
close(out) // when you close(out) it breaks the below loop.
}()
// drain the output
for res := range out {
fmt.Println(res)
}
fmt.Printf("\n%2fs", time.Since(start).Seconds())
}
func onSignal(s os.Signal, h func()) {
c := make(chan os.Signal, 1)
signal.Notify(c, s)
<-c
h()
}