通过使用带有张量流的二元神经网络,Acc始终在0.5左右 将熊猫作为pd导入 将numpy作为np导入 进口PIL 导入PIL.Image 进口cv2 导入tensorflow作为tf 读取图像数据 文件_dirs=[“/”正“,“/”负”] def读取图

通过使用带有张量流的二元神经网络,Acc始终在0.5左右 将熊猫作为pd导入 将numpy作为np导入 进口PIL 导入PIL.Image 进口cv2 导入tensorflow作为tf 读取图像数据 文件_dirs=[“/”正“,“/”负”] def读取图,tensorflow,neural-network,Tensorflow,Neural Network,通过使用带有张量流的二元神经网络,Acc始终在0.5左右 将熊猫作为pd导入 将numpy作为np导入 进口PIL 导入PIL.Image 进口cv2 导入tensorflow作为tf 读取图像数据 文件_dirs=[“/”正“,“/”负”] def读取图像数据(文件目录): 文件=[] 对于范围内的i(20000): img_no=“%05d”%(i+1) img_dir=file_dir+“/”+img_no+“.jpg” img=cv2.imread(img\u dir,cv2.imrea

通过使用带有张量流的二元神经网络,Acc始终在0.5左右
将熊猫作为pd导入
将numpy作为np导入
进口PIL
导入PIL.Image
进口cv2
导入tensorflow作为tf
读取图像数据
文件_dirs=[“/”正“,“/”负”]
def读取图像数据(文件目录):
文件=[]
对于范围内的i(20000):
img_no=“%05d”%(i+1)
img_dir=file_dir+“/”+img_no+“.jpg”
img=cv2.imread(img\u dir,cv2.imread\u灰度)
如果img不是无:
files.append(img)
持续
img_dir=file_dir+“/”+img_no+“\u 1”+“.jpg”
img=cv2.imread(img\u dir,cv2.imread\u灰度)
files.append(img)
返回np.array(文件)
pos=读取图像数据(文件目录[0])
neg=读取图像数据(文件目录[1])
打印(类型(位置[0])#
打印(位置形状[0])#20000
打印(位置[0]。形状)#227
pos_label=np.array([np.float32(1),np.float32(0)])
neg_label=np.array([np.float32(0),np.float32(1)])
pos_labels=np.数组([pos_label for i in range(20000)])#[1,0],[1,0]。。。
neg_labels=np.数组([neg_label表示范围(20000)中的i)])#[0,1],[0,1]。。。
#将两个pos和neg组合到一个数据集
x=np.连接((正,负))
y=np.连接((正反标签))
#预处理
#扩大阴影
x=np。展开尺寸(x,轴=3)
打印(x.shape)#(40000227227,1)
def shuffle(数据集、标签、数量样本):
np.随机种子(10)
随机列表=np.arange(样本数)
np.random.shuffle(随机列表)
返回数据集[随机列表]、标签[随机列表]、随机列表
#比率=数据集中培训数据的百分比
def数据分割(x、y、速率、样本数):
速率=整数(速率*样本数)
x_列车=x[:费率]
x_测试=x[速率:]
y_列车=y[:费率]
y_试验=y[速率:]
返回x_列,x_测试,y_列,y_测试
#洗牌并拆分为训练和验证数据集
x_shuffled,y_shuffled,random_list=shuffle(x,y,x.shape[0])
x_序列,x_测试,y_序列,y_测试=数据分割(x_无序化,y_无序化,0.8,x.shape[0])
#规范化
x_列,x_测试,y_列,y_测试=x_列/255.0,x_测试/255.0,y_列,y_测试
def get_模型(IMG_宽度、IMG_高度、数量类别):
model=tf.keras.models.Sequential()
model.add(tf.keras.layers.Conv2D(filters=16,kernel\u size=(3,3),padding='valid',input\u shape=(IMG\u WIDTH,IMG\u HEIGHT,1),activation=“relu”))
添加(tf.keras.layers.AvgPool2D(池大小=(2,2)))
add(tf.keras.layers.Conv2D(过滤器=32,内核大小=(3,3),padding='valid',activation='tanh'))
添加(tf.keras.layers.AvgPool2D(池大小=(2,2)))
模型添加(tf.keras.layers.Dropout(0.2))
add(tf.keras.layers.Conv2D(过滤器=64,内核大小=(3,3),padding='valid',activation='tanh'))
添加(tf.keras.layers.AvgPool2D(池大小=(2,2)))
模型添加(tf.keras.layers.Dropout(0.2))
add(tf.keras.layers.Conv2D(过滤器=64,内核大小=(3,3),padding='valid',activation='sigmoid'))
添加(tf.keras.layers.AvgPool2D(池大小=(2,2)))
添加(tf.keras.layers.flatte())
model.add(tf.keras.layers.Dense(NUM_CATEGORIES*16,activation='relu'))
model.add(tf.keras.layers.Dense(NUM_CATEGORIES*8,activation='relu'))
添加(tf.keras.layers.Dense(NUM_CATEGORIES,activation='softmax'))
opt=tf.keras.optimizers.Adam(学习率=0.1)
compile(优化器=opt,loss='binary\u crossentropy',metrics=['accurity'])
回归模型
模型=get_模型(227227,2)
model.summary()
型号:“顺序_5”
_________________________________________________________________
层(类型)输出形状参数
=================================================================
conv2d_20(conv2d)(无、225、225、16)448
_________________________________________________________________
平均池2D池20(平均(无、112、112、16)0
_________________________________________________________________
conv2d_21(conv2d)(无、110、110、32)4640
_________________________________________________________________
平均池2D_21(平均值(无、55、55、32)0
_________________________________________________________________
辍学10(辍学)(无、55、55、32)0
_________________________________________________________________
conv2d_22(conv2d)(无、53、53、64)18496
_________________________________________________________________
平均池2D_22(平均(无、26、26、64)0
_________________________________________________________________
辍学11(辍学)(无、26、26、64)0
_________________________________________________________________
conv2d_23(conv2d)(无、24、24、64)36928
_________________________________________________________________
平均池2D_23(平均(无、12、12、64)0
_________________________________________________________________
展平5(展平)(无,9216)0
_________________________________________________________________
致密(致密)(无,32)294944
_________________________________________________________________
致密(致密)(无,16)528
_________________________________________________________________
密集型_17(密集型)(无,2)34
=================================================================
总参数:356018
可培训参数:356018
不可训练参数:0
_________________________________________________________________
模型拟合(x_序列,y_序列,历次=5)
评估模型(x_检验,y_检验,verbose=2)
纪元1/5
1000/1000[======================================================16秒/步-损耗:3.0167-精度:0.5013
纪元2/5
1000/1000[======================================================15秒/步-损耗:0.6963-精度:0.4942
纪元3/5
1000/1000[=======================================]-15秒/步-损耗:
import pandas as pd
import numpy as np
import PIL
import PIL.Image
import cv2
import tensorflow as tf
Reading Image Data
file_dirs = ["./Positive", "./Negative"]

def read_image_data(file_dir):
    files = []
    for i in range(20000):
        img_no = "%05d"% (i+1) 
        img_dir = file_dir + "/" + img_no + ".jpg"
        img = cv2.imread(img_dir,cv2.IMREAD_GRAYSCALE)
        if img is not None:
            files.append(img)
            continue
        img_dir = file_dir + "/" + img_no + "_1" + ".jpg"
        img = cv2.imread(img_dir,cv2.IMREAD_GRAYSCALE)
        files.append(img)
    return np.array(files)
pos = read_image_data(file_dirs[0])
neg = read_image_data(file_dirs[1])

print(type(pos[0])) # <class 'numpy.ndarray'>
print(pos.shape[0]) # 20000
print(pos[0].shape) # 227,227

pos_label = np.array([np.float32(1),np.float32(0)])
neg_label = np.array([np.float32(0),np.float32(1)])
pos_labels = np.array([pos_label for i in range(20000)]) # [1,0],[1,0], ...
neg_labels = np.array([neg_label for i in range(20000)]) # [0,1],[0,1], ...

# combine two pos and neg to one dataset
x = np.concatenate((pos,neg))
y = np.concatenate((pos_labels,neg_labels))
  
# Preprocessing
# expand dims
x = np.expand_dims(x, axis=3)
print(x.shape) # (40000,227,227,1)

def shuffle(dataset, labels, NUM_SAMPLES):
    np.random.seed(10)
    random_list = np.arange(NUM_SAMPLES)
    np.random.shuffle(random_list)
    return dataset[random_list], labels[random_list], random_list

# rate = percentage of training data in the dataset
def data_split(x, y, rate, NUM_SAMPLES):
    rate = int(rate*NUM_SAMPLES)
    x_train = x[:rate]
    x_test = x[rate:]
    y_train = y[:rate]
    y_test = y[rate:]
    return x_train, x_test, y_train, y_test

# shuffle and split into train and validation datasets
x_shuffled, y_shuffled, random_list = shuffle(x, y, x.shape[0])
x_train, x_test, y_train, y_test = data_split(x_shuffled, y_shuffled, 0.8, x.shape[0])

# normalization
x_train, x_test, y_train, y_test = x_train/255.0, x_test/255.0, y_train, y_test 

def get_model(IMG_WIDTH, IMG_HEIGHT, NUM_CATEGORIES):
    model = tf.keras.models.Sequential()
    model.add(tf.keras.layers.Conv2D(filters=16, kernel_size=(3,3), padding='valid', input_shape=(IMG_WIDTH, IMG_HEIGHT,1), activation="relu"))
    model.add(tf.keras.layers.AvgPool2D(pool_size=(2,2)))
    model.add(tf.keras.layers.Conv2D(filters=32, kernel_size=(3,3), padding='valid', activation="tanh"))
    model.add(tf.keras.layers.AvgPool2D(pool_size=(2,2)))
    model.add(tf.keras.layers.Dropout(0.2))
    model.add(tf.keras.layers.Conv2D(filters=64, kernel_size=(3,3), padding='valid', activation="tanh"))
    model.add(tf.keras.layers.AvgPool2D(pool_size=(2,2)))
    model.add(tf.keras.layers.Dropout(0.2))
    model.add(tf.keras.layers.Conv2D(filters=64, kernel_size=(3,3), padding='valid', activation="sigmoid"))
    model.add(tf.keras.layers.AvgPool2D(pool_size=(2,2)))
    model.add(tf.keras.layers.Flatten())
    model.add(tf.keras.layers.Dense(NUM_CATEGORIES*16, activation='relu'))
    model.add(tf.keras.layers.Dense(NUM_CATEGORIES*8, activation='relu'))
    model.add(tf.keras.layers.Dense(NUM_CATEGORIES, activation='softmax'))
    
    opt = tf.keras.optimizers.Adam(learning_rate=0.1)
    model.compile(optimizer=opt, loss='binary_crossentropy', metrics=['accuracy'])
    return model

model = get_model(227,227,2)

model.summary()

Model: "sequential_5"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
conv2d_20 (Conv2D)           (None, 225, 225, 16)      448       
_________________________________________________________________
average_pooling2d_20 (Averag (None, 112, 112, 16)      0         
_________________________________________________________________
conv2d_21 (Conv2D)           (None, 110, 110, 32)      4640      
_________________________________________________________________
average_pooling2d_21 (Averag (None, 55, 55, 32)        0         
_________________________________________________________________
dropout_10 (Dropout)         (None, 55, 55, 32)        0         
_________________________________________________________________
conv2d_22 (Conv2D)           (None, 53, 53, 64)        18496     
_________________________________________________________________
average_pooling2d_22 (Averag (None, 26, 26, 64)        0         
_________________________________________________________________
dropout_11 (Dropout)         (None, 26, 26, 64)        0         
_________________________________________________________________
conv2d_23 (Conv2D)           (None, 24, 24, 64)        36928     
_________________________________________________________________
average_pooling2d_23 (Averag (None, 12, 12, 64)        0         
_________________________________________________________________
flatten_5 (Flatten)          (None, 9216)              0         
_________________________________________________________________
dense_15 (Dense)             (None, 32)                294944    
_________________________________________________________________
dense_16 (Dense)             (None, 16)                528       
_________________________________________________________________
dense_17 (Dense)             (None, 2)                 34        
=================================================================
Total params: 356,018
Trainable params: 356,018
Non-trainable params: 0
_________________________________________________________________
model.fit(x_train, y_train, epochs=5)
model.evaluate(x_test, y_test, verbose=2)
Epoch 1/5
1000/1000 [==============================] - 16s 16ms/step - loss: 3.0167 - accuracy: 0.5013
Epoch 2/5
1000/1000 [==============================] - 15s 15ms/step - loss: 0.6963 - accuracy: 0.4942
Epoch 3/5
1000/1000 [==============================] - 15s 15ms/step - loss: 0.6953 - accuracy: 0.5016
Epoch 4/5
1000/1000 [==============================] - 15s 15ms/step - loss: 0.6957 - accuracy: 0.4999
Epoch 5/5
 679/1000 [===================>..........] - ETA: 5s - loss: 0.6947 - accuracy: 0.4983