Python 在通过sys.stdout打印导出到同一文件后读取同一脚本中的文件时出现问题
我正在使用一个音频分析脚本,输出一系列打印命令。在打印命令全部执行并通过脚本中的sys.stdout导出到外部文本文件后,我希望读取同一脚本中的文本文件。但是,文本文件在脚本中显示为空。我试过运行f.flush()和f.seek(),但都没有用。脚本完成后打开文本文件时,所有打印命令的内容都会按预期显示。似乎必须完成脚本才能让sys.stdout()将内容写入外部文件。我是否可以确保此步骤发生在脚本末尾的f.read()之前Python 在通过sys.stdout打印导出到同一文件后读取同一脚本中的文件时出现问题,python,stdout,Python,Stdout,我正在使用一个音频分析脚本,输出一系列打印命令。在打印命令全部执行并通过脚本中的sys.stdout导出到外部文本文件后,我希望读取同一脚本中的文本文件。但是,文本文件在脚本中显示为空。我试过运行f.flush()和f.seek(),但都没有用。脚本完成后打开文本文件时,所有打印命令的内容都会按预期显示。似乎必须完成脚本才能让sys.stdout()将内容写入外部文件。我是否可以确保此步骤发生在脚本末尾的f.read()之前 import sys sys.path.append(".") #pr
import sys
sys.path.append(".")
#print(sys.path)
import time
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
start_time = time.time()
import numba
import numpy as np
import librosa
def print_color(msg, color=32):
if sys.stdout.isatty():
print("\033[{color}m {msg} \033[0m".format(msg=msg, color=color))
else:
print(" *** {msg} ***".format(msg=msg))
# TODO: configure this via cmdline
SAMPLE_RATE = 44100 # Hz
ELF_THRESHOLD_DB = -22 #dB
#OLDBUCKET N_FFT = 16384
N_FFT = 13000
FIRST_BASS_BUCKET = 0
LAST_BASS_BUCKET = 11
LAST_ANALYSIS_BUCKET = 64
DEBUG_ENABLED = False
if len(sys.argv) < 2:
print("ElfTag: Extremely Low Frequency Audio Tagger")
print("Usage: %(cmd)s </path/to/directory>" % {"cmd": sys.argv[0]})
sys.exit(0)
#filename = sys.argv[1]
directory = sys.argv[1]
print(directory)
#filename = librosa.util.example_audio_file()
#filename = "/Volumes/SDXC128GB/ElfTag/sp/02. Luma.mp3"
def debug(msg):
if DEBUG_ENABLED:
print(msg)
files = librosa.util.find_files(directory)
print("Total Tracks: ",len(files))
queue = (len(files))
class Tee:
def write(self, *args, **kwargs):
self.out1.write(*args, **kwargs)
self.out2.write(*args, **kwargs)
def __init__(self, out1, out2):
self.out1 = out1
self.out2 = out2
def flush(self):
pass
import sys
logfile = input ("Enter Filename for Log File: ")
sys.stdout = Tee(open(logfile, "w"), sys.stdout)
for filename in files:
try:
queue = queue - 1
print(queue, "Songs Remaining")
print("Loading %(filename)s" % {"filename": filename})
y, sr = librosa.load(filename, sr=None)
duration = librosa.core.get_duration(y=y, sr=sr)
print("Detected sample rate: %(sr)d Hz, duration: %(duration)f seconds." % {"sr": sr, "duration": duration})
bin_size_hz = float(sr) / N_FFT
num_bins = N_FFT / 2 + 1
print("Using transform length of %(n_fft)d for FFT, which gives us %(num_bins)d bins at %(bin_size_hz)f Hz per bin." % {"n_fft": N_FFT, "num_bins": num_bins, "bin_size_hz": bin_size_hz})
start_hz = bin_size_hz * FIRST_BASS_BUCKET
end_hz = bin_size_hz * (LAST_BASS_BUCKET + 1)
anal_hz = bin_size_hz * (LAST_ANALYSIS_BUCKET + 1)
print("Detecting deep bass as peaks between %(start)f Hz and %(end)f Hz above %(db)d dB chosen from frequency range below %(anal)f Hz." % { "start" : start_hz, "end" : end_hz, "db" : ELF_THRESHOLD_DB, "anal" : anal_hz })
#y = librosa.core.to_mono(y)
D = librosa.stft(y, n_fft = N_FFT)
tempo, beats = librosa.beat.beat_track(y=y, sr=sr, units='frames', hop_length=512)
numBeats = beats.shape[0]
print("Estimated tempo: %(tempo)f." % {"tempo" : tempo})
print("Number of beats detected: %(beats)d." % {"beats" : numBeats})
# Split into Harmonic and Percussive layers to aid with beat detection
#H, P = librosa.decompose.hpss(D)
P = D
P = librosa.amplitude_to_db(P, ref=np.max)
totalFrames = P.shape[1]
print("Total frames: %(frames)d, about %(secPerFrame)f seconds per frame" % {"frames": totalFrames, "secPerFrame": (duration / totalFrames)})
# Select significant bass frame rows
Pbass = P[FIRST_BASS_BUCKET:(LAST_ANALYSIS_BUCKET + 1)]
firstFrame = np.argmax(Pbass.max(axis=0) > -80)
debug("firstFrame")
debug(firstFrame)
Pbass = Pbass[:, firstFrame:]
debug("Pbass")
debug(Pbass)
localmaxBass = librosa.util.localmax(Pbass)
debug("localmaxBass")
debug(localmaxBass)
maskBass = localmaxBass[FIRST_BASS_BUCKET:(LAST_BASS_BUCKET + 1)]
debug("maskBass")
debug(maskBass)
ourBass = Pbass[FIRST_BASS_BUCKET:(LAST_BASS_BUCKET + 1)]
debug("ourBass")
debug(ourBass)
filteredBass = (ourBass > ELF_THRESHOLD_DB)
debug("filteredBass")
debug(filteredBass)
peakFilteredBass = np.multiply(filteredBass, maskBass)
debug("peakFilteredBass")
debug(peakFilteredBass)
vertBassFrames = np.sum(filteredBass, axis=0)
debug("vertBassFrames")
debug(vertBassFrames)
horizBassFrames = (vertBassFrames > 0)
debug("horizBassFrames")
debug(horizBassFrames)
deepBassFrames = np.nonzero(horizBassFrames)[0]
debug("deepBassFrames")
debug(deepBassFrames.shape)
debug(deepBassFrames)
# Adjacent Deep Bass detector
shiftedHorizBassFrames = np.append(horizBassFrames[1:], [False])
andedShiftedHorizBassFrames = np.logical_and(horizBassFrames, shiftedHorizBassFrames)
adjacentHorizBassFrames = np.logical_and(andedShiftedHorizBassFrames, np.append(andedShiftedHorizBassFrames[1:], [False]))
debug("adjacentHorizBassFrames")
debug(adjacentHorizBassFrames)
# /End Adjacent Deep Bass detector
debug("beats")
debug(beats.shape)
debug(beats)
deepBassBeats = np.intersect1d(deepBassFrames, beats, assume_unique=True)
debug("deepBassBeats")
debug(deepBassBeats.shape)
debug(deepBassBeats)
numDeepBeats = deepBassBeats.shape[0]
print("Number of deep beats: %(numDeepBeats)d" % {"numDeepBeats": numDeepBeats})
deepBeatsPercentage = float(numDeepBeats) / numBeats
print("Percentage of deep beats: %(deepBeatsPercentage)f" % {"deepBeatsPercentage": deepBeatsPercentage})
numBassFrames = horizBassFrames.sum()
print("Number of frames with deep bass: %(frames)d." % {"frames": numBassFrames})
numAdjacentBassFrames = adjacentHorizBassFrames.sum()
print("Number of adjacent frames with deep bass: %(frames)d." % {"frames": numAdjacentBassFrames})
bassFramesPerBeat = float(numBassFrames) / numBeats
print("Number of deep bass frames per beat: %(bassFramesPerBeat)f" % {"bassFramesPerBeat": bassFramesPerBeat})
bassFramesPercentage = float(numBassFrames) / totalFrames
print("Percentage of deep bass frames: %(bassFramesPercentage)f" % {"bassFramesPercentage": bassFramesPercentage})
adjacentBassFramesPercentage = float(numAdjacentBassFrames) / totalFrames
print("Percentage of adjacent deep bass frames: %(bassFramesPercentage)f" % {"bassFramesPercentage": adjacentBassFramesPercentage})
#if %(bassFramesPercentage)f" % {"bassFramesPercentage": adjacentBassFramesPercentage} >= "0.30":
# print("DEEP BASS TRACK NEEDS TAGGING")
print(("--- %s seconds ---" % (time.time() - start_time)))
#sys.exit(0)
except:
continue
f= open(logfile, 'r+')
f.flush()
f.seek(0)
fh = f.read()
print(fh.rstrip())
导入系统
sys.path.append(“.”)
#打印(系统路径)
导入时间
进口警告
warnings.simplefilter(action='ignore',category=futurewaning)
开始时间=time.time()
进口麻木
将numpy作为np导入
进口藏红花
def打印颜色(消息,颜色=32):
如果sys.stdout.isatty():
打印(“\033[{color}m{msg}\033[0m.”格式(msg=msg,color=color))
其他:
打印(“***{msg}***.”格式(msg=msg))
#TODO:通过cmdline配置此选项
采样率=44100赫兹
ELF_阈值_DB=-22#DB
#oldbuck N_FFT=16384
N_FFT=13000
第一个低音桶=0
最后一个低音桶=11
最后一次\u分析\u桶=64
DEBUG_ENABLED=False
如果len(系统argv)<2:
打印(“ElfTag:极低频音频标记器”)
打印(“用法:%(cmd)s“%{”cmd:sys.argv[0]}”)
系统出口(0)
#filename=sys.argv[1]
directory=sys.argv[1]
打印(目录)
#filename=librosa.util.example_audio_file()
#filename=“/Volumes/SDXC128GB/ElfTag/sp/02.Luma.mp3”
def调试(msg):
如果已启用调试单元:
打印(msg)
files=librosa.util.find_文件(目录)
打印(“总曲目:,len(文件))
队列=(len(文件))
三级发球台:
def写入(自身、*args、**kwargs):
self.out1.write(*args,**kwargs)
self.out2.write(*args,**kwargs)
定义初始值(自身、输出1、输出2):
self.out1=out1
self.out2=out2
def冲洗(自):
通过
导入系统
日志文件=输入(“输入日志文件的文件名:”)
sys.stdout=Tee(打开(日志文件,“w”),sys.stdout)
对于文件中的文件名:
尝试:
队列=队列-1
打印(队列,“剩余歌曲”)
打印(“正在加载%(文件名)s”%{“文件名”:文件名})
y、 sr=librosa.load(文件名,sr=None)
duration=librosa.core.get_duration(y=y,sr=sr)
打印(“检测到的采样率:%(sr)d Hz,持续时间:%(持续时间)f秒。”%{“sr”:sr,“持续时间”:持续时间})
bin\u size\u hz=浮点(sr)/N\u FFT
数量=N\u FFT/2+1
打印(“使用(n_fft)d的变换长度进行fft,这为我们提供了(num_bin)d个bin,每个bin的频率为%(bin_size_hz)f hz。”%{“n_fft”:n_fft,“num_bin”:num_bin,“bin_size_hz”:bin_size_hz})
起始频率=桶大小频率*第一个低音桶
end_hz=箱子尺寸_hz*(最后一个桶+1)
anal_hz=料仓尺寸_hz*(最后一次分析_桶+1)
打印(“从低于%(anal)f Hz的频率范围中选择%(开始)f Hz和高于%(结束)f Hz的%(db)d db之间的峰值检测深低音。”%{“开始”:开始?Hz,“结束”:结束?Hz,“db”:极低频阈值?db,“anal”:anal?Hz})
#y=librosa.core.to_mono(y)
D=librosa.stft(y,n_fft=n_fft)
节奏,节拍=librosa.beat.beat_曲目(y=y,sr=sr,units='frames',hop_length=512)
NumberAts=节拍。形状[0]
打印(“估计节拍:%(节拍)f.”%{“节拍”:节拍})
打印(“检测到的节拍数:%(节拍)d.“%{”节拍:numberats})
#分为和声层和打击层,以辅助节拍检测
#H、 P=librosa.decompose.hpss(D)
P=D
P=librosa.振幅到分贝(P,ref=np.最大值)
totalFrames=P.shape[1]
打印(“总帧数:%(帧数)d,每帧大约%(secPerFrame)f秒”%{“帧数”:总帧数,“secPerFrame”:(持续时间/总帧数)}
#选择重要的低音帧行
Pbass=P[第一个低音桶:(最后一个分析桶+1)]
firstFrame=np.argmax(Pbass.max(轴=0)>-80)
调试(“第一帧”)
调试(第一帧)
Pbass=Pbass[:,第一帧:]
调试(“Pbass”)
调试(Pbass)
localmaxBass=librosa.util.localmax(Pbass)
调试(“localmaxBass”)
调试(localmaxBass)
maskBass=localmaxBass[第一个低音桶:(最后一个低音桶+1)]
调试(“maskBass”)
调试(maskBass)
ourBass=Pbass[第一个低音桶:(最后一个低音桶+1)]
调试(“我们的低音”)
调试(ourBass)
filteredBass=(ourBass>ELF\u THRESHOLD\u DB)
调试(“过滤器数据库”)
调试(过滤器数据库)
peakFilteredBass=np.multiply(filteredBass,maskBass)
调试(“峰值过滤数据库”)
调试(峰值过滤数据库)
vertBassFrames=np.sum(过滤数据库,轴=0)
调试(“虚拟帧”)
调试(帧)
horizBassFrames=(vertBassFrames>0)
调试(“水平帧”)
调试(水平帧)
deepBassFrames=np.非零(水平BassFrames)[0]
调试(“deepBassFrames”)
调试(deepBassFrames.shape)
调试(帧)
#相邻深低音检波器
shiftedHorizBassFrames=np.append(horizBassFrames[1:],[False])
andedShiftedHorizBassFrames=np.logical_和(horizBassFrames,shiftedHorizBassFrames)
adjacentHorizBassFrames=np.logical_和(andedShiftedHorizBassFrames,np.append(andedShiftedHorizBassFrames[1:],[False]))
调试(“邻接OrizBassFrames”)
调试(邻接OrizBassFrames)
#/End相邻深低音检波器
调试(“节拍”)
调试(beats.shape)
调试(节拍)
deepBassBeats=np.intersect1d(deepBassFrames,beats,假设_unique=True)
调试(“deepBassBeats”)
调试(deepbass.shape)
调试(deepBassBeats)
numDeepBeats=深低音拍
def flush(self):
self.out1.flush()
self.out2.flush()