Concurrency Golang上传整个目录并返回多个打开的文件
我正在尝试将整个目录上载到服务器。它适用于小目录,但当有100多张图片时,它会返回错误“到许多打开的文件”。我在读取文件后立即关闭该文件。你知道怎么解决这个问题吗 这是我的密码Concurrency Golang上传整个目录并返回多个打开的文件,concurrency,go,Concurrency,Go,我正在尝试将整个目录上载到服务器。它适用于小目录,但当有100多张图片时,它会返回错误“到许多打开的文件”。我在读取文件后立即关闭该文件。你知道怎么解决这个问题吗 这是我的密码 func uploadDir(path string) error { dir, err := os.Open(path) if err != nil { return err } files, err := dir.Readdirnames(-1) if
func uploadDir(path string) error {
dir, err := os.Open(path)
if err != nil {
return err
}
files, err := dir.Readdirnames(-1)
if err != nil {
return err
}
dir.Close()
errChan := make(chan error)
resChan := make(chan *client.PutResult)
remaining := len(files)
for _, file := range files {
file := file
go func() {
file, err := os.Open(path + "/" + file)
if err != nil {
errChan <- err
}
c := client.NewClient(os.Getenv("DROPS_SERVER"))
res, err := c.Upload(client.NewUploadHandleFromReader(file))
file.Close()
if err != nil {
errChan <- err
}
resChan <- res
}()
}
for {
select {
case res := <-resChan:
log.Println(res)
remaining--
case err := <-errChan:
if err != nil {
return err
}
}
if remaining == 0 {
break
}
}
return nil
}
func uploadDir(路径字符串)错误{
dir,err:=os.Open(路径)
如果错误!=零{
返回错误
}
文件,错误:=dir.Readdirnames(-1)
如果错误!=零{
返回错误
}
目录关闭()
errChan:=make(chan错误)
resChan:=make(chan*client.PutResult)
剩余:=len(文件)
对于u,文件:=范围文件{
文件:=文件
go func(){
文件,错误:=os.Open(路径+“/”+文件)
如果错误!=零{
errChan为了发送目录,我只需在本地存档/压缩目录,然后上传它
但是,如果您确实想这样做,一个简单的技巧是设置最大上载限制(即最大打开文件限制)
在任何系统(osx/linux,不确定windows)上,您都有一个最大打开fd限制。您可以手动增加该数字以允许更多,但要注意内存消耗。
如果我没记错的话,默认限制是1024
主程序包
进口(
“日志”
“操作系统”
)
func uploadDir(路径字符串,maxOpen int)错误{
dir,err:=os.Open(路径)
如果错误!=零{
返回错误
}
文件,错误:=dir.Readdirnames(-1)
如果错误!=零{
返回错误
}
目录关闭()
errChan:=make(chan错误)
resChan:=make(chan*client.PutResult)
doneChan:=make(chan bool)
剩余:=len(文件)
限制:=make(chan结构{},maxOpen)
对于i:=0;i 限制原始代码不限制活动go例程的数量,因此不限制打开的文件描述符的数量。几个操作系统对打开的文件描述符的数量有限制。修复方法是创建固定数量的工作go例程
func uploadDir(path string) error {
// Read directory and close.
dir, err := os.Open(path)
if err != nil {
return err
}
names, err := dir.Readdirnames(-1)
if err != nil {
return err
}
dir.Close()
// Copy names to a channel for workers to consume. Close the
// channel so that workers stop when all work is complete.
namesChan := make(chan string, len(names))
for _, name := range names {
namesChan <- name
}
close(namesChan)
// Create a maximum of 8 workers
workers := 8
if len(names) < workers {
workers = len(names)
}
errChan := make(chan error, 1)
resChan := make(chan *client.PutResult, len(names))
// Run workers
for i := 0; i < workers; i++ {
go func() {
// Consume work from namesChan. Loop will end when no more work.
for name := range namesChan {
file, err := os.Open(filepath.Join(path, name))
if err != nil {
select {
case errChan <- err:
// will break parent goroutine out of loop
default:
// don't care, first error wins
}
return
}
c := client.NewClient(os.Getenv("DROPS_SERVER"))
res, err := c.Upload(client.NewUploadHandleFromReader(file))
file.Close()
if err != nil {
select {
case errChan <- err:
// will break parent goroutine out of loop
default:
// don't care, first error wins
}
return
}
resChan <- res
}
}()
}
// Collect results from workers
for i := 0; i < len(names); i++ {
select {
case res := <-resChan:
log.Println(res)
case err := <-errChan:
return err
}
}
return nil
}
func uploadDir(路径字符串)错误{
//读取目录并关闭。
dir,err:=os.Open(路径)
如果错误!=零{
返回错误
}
名称,错误:=dir.Readdirnames(-1)
如果错误!=零{
返回错误
}
目录关闭()
//将名称复制到供工作人员使用的频道。关闭
//通道,以便工人在所有工作完成时停止工作。
namesChan:=make(chan字符串,len(名称))
对于u,名称:=范围名称{
namesChan我不喜欢那里的两个select语句,但它很有效。thx很多。在linux系统下,操作系统限制您打开一定数量的文件描述符,这个限制与go
无关。试试limit-Sn
看看是否是256。每个连接2个fd~100听起来正确。
func uploadDir(path string) error {
// Read directory and close.
dir, err := os.Open(path)
if err != nil {
return err
}
names, err := dir.Readdirnames(-1)
if err != nil {
return err
}
dir.Close()
// Copy names to a channel for workers to consume. Close the
// channel so that workers stop when all work is complete.
namesChan := make(chan string, len(names))
for _, name := range names {
namesChan <- name
}
close(namesChan)
// Create a maximum of 8 workers
workers := 8
if len(names) < workers {
workers = len(names)
}
errChan := make(chan error, 1)
resChan := make(chan *client.PutResult, len(names))
// Run workers
for i := 0; i < workers; i++ {
go func() {
// Consume work from namesChan. Loop will end when no more work.
for name := range namesChan {
file, err := os.Open(filepath.Join(path, name))
if err != nil {
select {
case errChan <- err:
// will break parent goroutine out of loop
default:
// don't care, first error wins
}
return
}
c := client.NewClient(os.Getenv("DROPS_SERVER"))
res, err := c.Upload(client.NewUploadHandleFromReader(file))
file.Close()
if err != nil {
select {
case errChan <- err:
// will break parent goroutine out of loop
default:
// don't care, first error wins
}
return
}
resChan <- res
}
}()
}
// Collect results from workers
for i := 0; i < len(names); i++ {
select {
case res := <-resChan:
log.Println(res)
case err := <-errChan:
return err
}
}
return nil
}