同时从一个csv文件中写入多个csv文件,在Golang的分区列上拆分
我的目标是读取一个或多个共享公共格式的csv文件,并根据csv数据中的分区列写入单独的文件。请允许最后一列是分区,数据未排序,并且可以在多个文件中找到给定分区。一个文件的示例:同时从一个csv文件中写入多个csv文件,在Golang的分区列上拆分,csv,go,concurrency,channel,Csv,Go,Concurrency,Channel,我的目标是读取一个或多个共享公共格式的csv文件,并根据csv数据中的分区列写入单独的文件。请允许最后一列是分区,数据未排序,并且可以在多个文件中找到给定分区。一个文件的示例: fsdio,abc,def,2017,11,06,01 1sdf9,abc,def,2017,11,06,04 22df9,abc,def,2017,11,06,03 1d243,abc,def,2017,11,06,02 如果这种方法闻起来像可怕的XY问题,我很乐意调整 到目前为止,我所尝试的: 读入数据集并迭代每
fsdio,abc,def,2017,11,06,01
1sdf9,abc,def,2017,11,06,04
22df9,abc,def,2017,11,06,03
1d243,abc,def,2017,11,06,02
如果这种方法闻起来像可怕的XY问题,我很乐意调整
到目前为止,我所尝试的:
- 读入数据集并迭代每一行
- 如果分区有
如图所示,衍生出一个新的工作程序例程(该例程将包含一个文件/csv
作者)。将该行发送到
chan[]字符串中
- 由于每个worker都是一个文件编写器,所以它应该只在其输入通道上接收一个分区的行
id字符串
,但不知道如何选择要发送到的工作者,如果我应该为每个工作者创建一个单独的chan[]string
,并使用选择
发送到该频道,或者,如果一个结构应该用某种池和路由功能容纳每个工人
TLDR;我不知道如何根据某个分类的字符串值有条件地将数据发送到给定的go例程或通道,其中unique的数量可以是任意的,但可能不超过24个unique分区值
我会警告你,我已经注意到像这样的问题,所以如果你觉得这是反建设性的或不完整的,足以否决投票,请评论为什么,这样我就可以避免重复冒犯
提前谢谢你的帮助
片段:
package main
import (
"encoding/csv"
"fmt"
"log"
"strings"
"time"
)
func main() {
// CSV
r := csv.NewReader(csvFile1)
lines, err := r.ReadAll()
if err != nil {
log.Fatalf("error reading all lines: %v", err)
}
// CHANNELS
lineChan := make(chan []string)
// TRACKER
var seenPartitions []string
for _, line := range lines {
hour := line[6]
if !stringInSlice(hour, seenPartitions) {
seenPartitions = append(seenPartitions, hour)
go worker(hour, lineChan)
}
// How to send to the correct worker/channel?
lineChan <- line
}
close(lineChan)
}
func worker(id string, lineChan <-chan []string) {
for j := range lineChan {
fmt.Println("worker", id, "started job", j)
// Write to a new file here and wait for input over the channel
time.Sleep(time.Second)
fmt.Println("worker", id, "finished job", j)
}
}
func stringInSlice(str string, list []string) bool {
for _, v := range list {
if v == str {
return true
}
}
return false
}
// DUMMY
var csvFile1 = strings.NewReader(`
12fy3,abc,def,2017,11,06,04
fsdio,abc,def,2017,11,06,01
11213,abc,def,2017,11,06,02
1sdf9,abc,def,2017,11,06,01
2123r,abc,def,2017,11,06,03
1v2t3,abc,def,2017,11,06,01
1r2r3,abc,def,2017,11,06,02
g1253,abc,def,2017,11,06,02
d1e23,abc,def,2017,11,06,02
a1d23,abc,def,2017,11,06,02
12jj3,abc,def,2017,11,06,03
t1r23,abc,def,2017,11,06,03
22123,abc,def,2017,11,06,03
14d23,abc,def,2017,11,06,04
1d243,abc,def,2017,11,06,01
1da23,abc,def,2017,11,06,04
a1523,abc,def,2017,11,06,01
12453,abc,def,2017,11,06,04`)
主程序包
进口(
“编码/csv”
“fmt”
“日志”
“字符串”
“时间”
)
func main(){
//CSV
r:=csv.NewReader(csvFile1)
行,err:=r.ReadAll()
如果错误!=零{
log.Fatalf(“读取所有行时出错:%v”,错误)
}
//渠道
lineChan:=make(chan[]字符串)
//跟踪器
var seenPartitions[]字符串
对于u,行:=范围行{
小时:=行[6]
if!stringInSlice(小时,请参见nPartitions){
seenPartitions=附加(seenPartitions,小时)
go worker(小时,线)
}
//如何发送到正确的工作人员/通道?
lineChan同步版本no go concurrent magic first(参见下面的并发版本)
和并发版本:
package main
import (
"encoding/csv"
"fmt"
"io"
"log"
"strings"
"sync"
)
var (
// list of channels to communicate with workers
// workers accessed synchronousely no mutex required
workers = make(map[string]chan []string)
// wg is to make sure all workers done before exiting main
wg = sync.WaitGroup{}
// mu used only for sequential printing, not relevant for program logic
mu = sync.Mutex{}
)
func main() {
// wait for all workers to finish up before exit
defer wg.Wait()
r := csv.NewReader(csvFile1)
for {
rec, err := r.Read()
if err != nil {
if err == io.EOF {
savePartitions()
return
}
log.Fatal(err) // sorry for the panic
}
process(rec)
}
}
func process(rec []string) {
l := len(rec)
part := rec[l-1]
if c, ok := workers[part]; ok {
// send rec to worker
c <- rec
} else {
// if no worker for the partition
// make a chan
nc := make(chan []string)
workers[part] = nc
// start worker with this chan
go worker(nc)
// send rec to worker via chan
nc <- rec
}
}
func worker(c chan []string) {
// wg.Done signals to main worker completion
wg.Add(1)
defer wg.Done()
part := [][]string{}
for {
// wait for a rec or close(chan)
rec, ok := <-c
if ok {
// save the rec
// instead of accumulation in memory
// this can be saved to file directly
part = append(part, rec)
} else {
// channel closed on EOF
// dump partition
// locks ensures sequential printing
// not a required for independent files
mu.Lock()
for _, p := range part {
fmt.Printf("%+v\n", p)
}
mu.Unlock()
return
}
}
}
// simply signals to workers to stop
func savePartitions() {
for _, c := range workers {
// signal to all workers to exit
close(c)
}
}
// DUMMY
var csvFile1 = strings.NewReader(`
fsdio,abc,def,2017,11,06,01
1sdf9,abc,def,2017,11,06,01
1d243,abc,def,2017,11,06,01
1v2t3,abc,def,2017,11,06,01
a1523,abc,def,2017,11,06,01
1r2r3,abc,def,2017,11,06,02
11213,abc,def,2017,11,06,02
g1253,abc,def,2017,11,06,02
d1e23,abc,def,2017,11,06,02
a1d23,abc,def,2017,11,06,02
12jj3,abc,def,2017,11,06,03
t1r23,abc,def,2017,11,06,03
2123r,abc,def,2017,11,06,03
22123,abc,def,2017,11,06,03
14d23,abc,def,2017,11,06,04
1da23,abc,def,2017,11,06,04
12fy3,abc,def,2017,11,06,04
12453,abc,def,2017,11,06,04`)
主程序包
进口(
“编码/csv”
“fmt”
“io”
“日志”
“字符串”
“同步”
)
变量(
//与工人沟通的渠道列表
//工人同步访问,无需互斥
workers=make(映射[string]chan[]string)
//工作组应确保所有工人在离开主管道前完成工作
wg=sync.WaitGroup{}
//mu仅用于顺序打印,与程序逻辑无关
mu=sync.Mutex{}
)
func main(){
//等待所有工人完成后再离开
延迟工作组等待()
r:=csv.NewReader(csvFile1)
为了{
rec,err:=r.Read()
如果错误!=零{
如果err==io.EOF{
savePartitions()
返回
}
log.Fatal(err)//很抱歉引起恐慌
}
过程(rec)
}
}
func进程(rec[]字符串){
l:=len(rec)
部分:=rec[l-1]
如果c,ok:=工人[零件];ok{
//向工人发送rec
创建一个映射[string](chan[]string)
。然后,给定一个分区键,您可以将该行发送到相应的通道。同意w/@zerkms。或者,为了在保持简单的同时获得更大的灵活性,请将每个worker设置为持有其ID的结构
类型的实例、用于发送行的通道、告诉它何时停止/刷新/关闭的退出通道以及它需要的任何其他内容,然后按住一个map[string]worker
并使用它将正确的行发送到正确的worker。@gpanda通道不适合充当未绑定的完全有序队列。相反,您可以创建一个包含锁(互斥锁)的结构内部实现为一个切片。然后在每个push
上向切片添加一个元素,在每个pop
上从头部提取一个元素。点很好,@zerkms。不过,使用这种方法我非常接近!在所有例程完成后获得死锁。我将处理此问题,并在尝试了mux解决方案后报告n、 @gpanda我认为它不是线程安全的:workerPool
访问也必须同步,因为您同时对它进行读写操作。此外,当您使它具有并发安全性时,您会发现您的实现中没有任何东西保证顺序:因此,行可能会被安排以任意顺序处理。我绝对不会我看一下我的用例是如何工作的,如果有任何问题,请反馈。谢谢!两种解决方案都很好,但第二种解决方案特别优雅。我特别欣赏用于关闭chan in worker()的ok语法以及解释性注释。我很好奇关于锁()如何工作的注释独立文件不需要。我想这是我将适应于为每个分区写入单独的csv的地方-您确定将每个分区写入文件时不需要锁定吗?@gpanda是的,我非常确定您不需要锁定。这是必需的,因为打印时我们只有一个共享资源std out我们打印到的位置。尝试注释mu.Lock和mu.Unlock
。除了行的顺序是随机的之外,不会发生任何不好的事情。使用此锁,每个工作人员都会说“看,大家闭嘴说话”。当您
package main
import (
"encoding/csv"
"fmt"
"io"
"log"
"strings"
"sync"
)
var (
// list of channels to communicate with workers
// workers accessed synchronousely no mutex required
workers = make(map[string]chan []string)
// wg is to make sure all workers done before exiting main
wg = sync.WaitGroup{}
// mu used only for sequential printing, not relevant for program logic
mu = sync.Mutex{}
)
func main() {
// wait for all workers to finish up before exit
defer wg.Wait()
r := csv.NewReader(csvFile1)
for {
rec, err := r.Read()
if err != nil {
if err == io.EOF {
savePartitions()
return
}
log.Fatal(err) // sorry for the panic
}
process(rec)
}
}
func process(rec []string) {
l := len(rec)
part := rec[l-1]
if c, ok := workers[part]; ok {
// send rec to worker
c <- rec
} else {
// if no worker for the partition
// make a chan
nc := make(chan []string)
workers[part] = nc
// start worker with this chan
go worker(nc)
// send rec to worker via chan
nc <- rec
}
}
func worker(c chan []string) {
// wg.Done signals to main worker completion
wg.Add(1)
defer wg.Done()
part := [][]string{}
for {
// wait for a rec or close(chan)
rec, ok := <-c
if ok {
// save the rec
// instead of accumulation in memory
// this can be saved to file directly
part = append(part, rec)
} else {
// channel closed on EOF
// dump partition
// locks ensures sequential printing
// not a required for independent files
mu.Lock()
for _, p := range part {
fmt.Printf("%+v\n", p)
}
mu.Unlock()
return
}
}
}
// simply signals to workers to stop
func savePartitions() {
for _, c := range workers {
// signal to all workers to exit
close(c)
}
}
// DUMMY
var csvFile1 = strings.NewReader(`
fsdio,abc,def,2017,11,06,01
1sdf9,abc,def,2017,11,06,01
1d243,abc,def,2017,11,06,01
1v2t3,abc,def,2017,11,06,01
a1523,abc,def,2017,11,06,01
1r2r3,abc,def,2017,11,06,02
11213,abc,def,2017,11,06,02
g1253,abc,def,2017,11,06,02
d1e23,abc,def,2017,11,06,02
a1d23,abc,def,2017,11,06,02
12jj3,abc,def,2017,11,06,03
t1r23,abc,def,2017,11,06,03
2123r,abc,def,2017,11,06,03
22123,abc,def,2017,11,06,03
14d23,abc,def,2017,11,06,04
1da23,abc,def,2017,11,06,04
12fy3,abc,def,2017,11,06,04
12453,abc,def,2017,11,06,04`)