Python 将1个子文件夹的数据添加到第2个子文件夹

Python 将1个子文件夹的数据添加到第2个子文件夹,python,Python,我有一个物体识别的脚本。将输出写入磁盘时,1个子文件夹的输出会附加到第2个子文件夹的输出 该代码可以很好地用于对象识别,并完美地写入第一个子文件夹的数据,但在写入第二个子文件夹的输出时,第一个子文件夹的输出也会添加到第二个子文件夹的输出中 def recognize_object(model_name,ckpt_path,label_path,test_img_path,img_output): count=0 sys.path.append("..") MODEL_

我有一个物体识别的脚本。将输出写入磁盘时,1个子文件夹的输出会附加到第2个子文件夹的输出

该代码可以很好地用于对象识别,并完美地写入第一个子文件夹的数据,但在写入第二个子文件夹的输出时,第一个子文件夹的输出也会添加到第二个子文件夹的输出中

def recognize_object(model_name,ckpt_path,label_path,test_img_path,img_output):

    count=0
    sys.path.append("..")

    MODEL_NAME = model_name

    PATH_TO_CKPT = ckpt_path

    PATH_TO_LABELS = label_path

    if not os.path.exists(img_output):
        os.makedirs(img_output,exist_ok=True)

    folders = glob(test_img_path)
    print(folders)
    img_list=[]

    for folder in folders:
        folder_name=os.path.basename(folder)
        print(folder_name)
        out=img_output+"\\"+folder_name
        os.makedirs(out,exist_ok=True)
        print(out)

        for f in glob(folder+"/*.jpg"):
            img_list.append(f)

        for x in range(len(img_list)):
            PATH_TO_IMAGE = img_list[x]
            v1=os.path.basename(img_list[x])
            img_name = os.path.splitext(v1)[0]

            NUM_CLASSES = 3

            label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
            categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
            category_index = label_map_util.create_category_index(categories)

            detection_graph = tf.Graph()

            with detection_graph.as_default():
                od_graph_def = tf.GraphDef()
                with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
                    serialized_graph = fid.read()
                    od_graph_def.ParseFromString(serialized_graph)
                    tf.import_graph_def(od_graph_def, name='')

                sess = tf.Session(graph=detection_graph)

            image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')

            detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
            detection_scores = detection_graph.get_tensor_by_name('detection_scores:0')
            detection_classes = detection_graph.get_tensor_by_name('detection_classes:0')
            num_detections = detection_graph.get_tensor_by_name('num_detections:0')


            image = cv2.imread(PATH_TO_IMAGE)
            image_expanded = np.expand_dims(image, axis=0)

            (boxes, scores, classes, num) = sess.run([detection_boxes, detection_scores, detection_classes, num_detections],feed_dict={image_tensor: image_expanded})


            vis_util.visualize_boxes_and_labels_on_image_array(
            image,
            np.squeeze(boxes),
            np.squeeze(classes).astype(np.int32),
            np.squeeze(scores),
            category_index,
            use_normalized_coordinates=True,
            line_thickness=4,
            min_score_thresh=0.80,
            skip_scores=True)


            coordinates=vis_util.return_coordinates(
            image,
            np.squeeze(boxes),
            np.squeeze(classes).astype(np.int32),
            np.squeeze(scores),
            category_index,
            use_normalized_coordinates=True,
            line_thickness=4,
            min_score_thresh=0.80)

            threshold=0.80

            cv2.imwrite(out+"\\{}.jpg".format(img_name),image)
            cv2.waitKey(0)
            cv2.destroyAllWindows()




            objects = []
            with open(out+'/metadata.csv','a') as csv_file:
                writer = csv.writer(csv_file)
                for index, value in enumerate(classes[0]):
                    object_dict = {}
                    if scores[0, index] > threshold:
                        object_dict[(category_index.get(value)).get('name').encode('utf8')] = scores[0, index]
                        objects.append(object_dict)
                writer.writerow(objects)
                print (objects)



            filename_string='coordinates_data'

            textfile = open("json/"+filename_string+".json", "a")
            textfile.write(json.dumps(coordinates))
            textfile.write("\n")

            textfile = open("json/"+"img_names"+".json", "a")
            textfile.write(json.dumps(PATH_TO_IMAGE))
            textfile.write("\n")
    img_list=[]


model_name='inference_graph'
ckpt_path=("C:\\new_multi_cat\\models\\research\\object_detection\\inference_graph\\frozen_inference_graph.pb")
label_path=("C:\\new_multi_cat\\models\\research\\object_detection\\training\\labelmap.pbtxt")
test_img_path=("C:\\Python35\\target_non_target\\Target_images_new\\*")
img_output=("C:\\new_multi_cat\\models\\research\\object_detection\\my_imgs")

recognize = recognize_object(model_name,ckpt_path,label_path,test_img_path,img_output)

假设有一个包含子文件夹C和D的文件夹Y。我希望将数据写入它们各自的文件夹中。目前子文件夹C的数据写得很好,但在写子文件夹D的数据时,文件夹C的数据也被附加到了D。这个问题与缩进或其他问题有关吗?

再次缩进第二个
img\u列表=[]
,它在文件夹循环之外。

有效。再次感谢。很抱歉为这些小问题打扰您:)@Ankit没问题,如果您再次陷入困境,请告诉我。我已解决了以前的问题,但在同一脚本中有一个新问题。我正在使用这些对象列表将数据写入csv文件。它将第二个文件夹的csv数据附加到第一个文件夹,第二个csv返回为空。又是缩进的东西还是别的?我是否应该将csv数据写入代码块转移到其他位置?第二个csv最终是什么?它只有第二个数据还是不存在?我将objects=[]再次放在print(objects)下面。这使得第一个csv正确写入,第二个csv也正确写入,但结果是空白