Python 有没有可能前5名的准确度低于前1名的准确度?

Python 有没有可能前5名的准确度低于前1名的准确度?,python,keras,Python,Keras,在我的培训结果中: 我的代码是: import keras from keras import datasets from keras import layers from keras import models print(f'keras版本={keras.__version__}') #載入 cifar10 資料庫的訓練資料,並自動分為『訓練組』及『測試組』 (X_train, y_train),(X_test, y_test)= datasets.cifar10.load_data()

在我的培训结果中:

我的代码是:

import keras
from keras import datasets
from keras import layers
from keras import models
print(f'keras版本={keras.__version__}')
#載入 cifar10 資料庫的訓練資料,並自動分為『訓練組』及『測試組』
(X_train, y_train),(X_test, y_test)= datasets.cifar10.load_data()
#建立FNN model
model = models.Sequential()
#建立input層
#此例若沒加Flatten Layer則會出錯(dense_數字,此數字後面會變動,其餘都一樣):
#ValueError: Error when checking target: expected dense_30 to have 3 dimensions, but got array with shape (60000, 1)
model.add(layers.Flatten())
#建立隱藏層(此例中此隱藏層只有1個神經元)
#可以寫得較完整:model.add(layers.Dense(input_dim=28*28,units=2))
#units的舊式寫法為output_dim:model.add(layers.Dense(input_dim=28*28,output_dim=2))
model.add(layers.Dense(units=1))
#建立輸出層(輸出神經元至少>=10)
model.add(layers.Dense(units=10))
import functools
#若將k=5改成k=3就會變成計算Top-3
top5_acc = functools.partial(keras.metrics.top_k_categorical_accuracy, k=5)
#此名稱可自訂,當training時顯示用的
top5_acc.__name__ = 'top5_acc'
# 最佳化設定
#若loss設定為categorical_crossentropy則會出錯:
#ValueError: Error when checking target: expected dense_2 to have shape (10,) but got array with shape (1,)
model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd', metrics=['accuracy',top5_acc])
# 開始進行training
model.fit(
    x=X_train,
    y=y_train,
    epochs=1,
    verbose=1,
    validation_split=0.1
)
#若沒有呼叫
#input_shape = [28,28,1]
#build(input_shape)
#的話,唯有在fit()之後才能呼叫summary()
#否則會出錯:
#This model has not yet been built. Build the model first by calling build() or calling fit() with some data.
#Or specify input_shape or batch_input_shape in the first layer for automatic build.
#model.build(input_shape)
model.summary()
# 開始進行testing
test_loss,top_1_accuracy, top_5_accuracy = model.evaluate(X_test, y_test)
print(f'test_loss={test_loss}')
print(f'top_1_accuracy={top_1_accuracy}')
print(f'top_5_accuracy={top_5_accuracy}')
是否存在任何设置错误或概念错误

多谢各位~

=====================================

将精度更改为分类精度后:

import keras
from keras import datasets
from keras import layers
from keras import models
print(f'keras版本={keras.__version__}')
#載入 cifar10 資料庫的訓練資料,並自動分為『訓練組』及『測試組』
(X_train, y_train),(X_test, y_test)= datasets.cifar10.load_data()
#建立FNN model
model = models.Sequential()
#建立input層
#此例若沒加Flatten Layer則會出錯(dense_數字,此數字後面會變動,其餘都一樣):
#ValueError: Error when checking target: expected dense_30 to have 3 dimensions, but got array with shape (60000, 1)
model.add(layers.Flatten())
#建立隱藏層(此例中此隱藏層只有1個神經元)
#可以寫得較完整:model.add(layers.Dense(input_dim=28*28,units=2))
#units的舊式寫法為output_dim:model.add(layers.Dense(input_dim=28*28,output_dim=2))
model.add(layers.Dense(units=1))
#建立輸出層(輸出神經元至少>=10)
model.add(layers.Dense(units=10))
import functools
#若將k=5改成k=3就會變成計算Top-3
top5_acc = functools.partial(keras.metrics.top_k_categorical_accuracy, k=5)
#此名稱可自訂,當training時顯示用的
top5_acc.__name__ = 'top5_acc'
# 最佳化設定
#若loss設定為categorical_crossentropy則會出錯:
#ValueError: Error when checking target: expected dense_2 to have shape (10,) but got array with shape (1,)
model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd', metrics=['categorical_accuracy',top5_acc])
# 開始進行training
model.fit(
    x=X_train,
    y=y_train,
    epochs=1,
    verbose=1,
    validation_split=0.1
)
#若沒有呼叫
#input_shape = [28,28,1]
#build(input_shape)
#的話,唯有在fit()之後才能呼叫summary()
#否則會出錯:
#This model has not yet been built. Build the model first by calling build() or calling fit() with some data.
#Or specify input_shape or batch_input_shape in the first layer for automatic build.
#model.build(input_shape)
model.summary()
# 開始進行testing
test_loss,top_1_accuracy, top_5_accuracy = model.evaluate(X_test, y_test)
print(f'test_loss={test_loss}')
print(f'top_1_accuracy={top_1_accuracy}')
print(f'top_5_accuracy={top_5_accuracy}')
我得到:

==在我添加softmax之后===

import keras
from keras import datasets
from keras import layers
from keras import models
print(f'keras版本={keras.__version__}')
#載入 cifar10 資料庫的訓練資料,並自動分為『訓練組』及『測試組』
(X_train, y_train),(X_test, y_test)= datasets.cifar10.load_data()
#建立FNN model
model = models.Sequential()
#建立input層
#此例若沒加Flatten Layer則會出錯(dense_數字,此數字後面會變動,其餘都一樣):
#ValueError: Error when checking target: expected dense_30 to have 3 dimensions, but got array with shape (60000, 1)
model.add(layers.Flatten())
#建立隱藏層(此例中此隱藏層只有1個神經元)
#可以寫得較完整:model.add(layers.Dense(input_dim=28*28,units=2))
#units的舊式寫法為output_dim:model.add(layers.Dense(input_dim=28*28,output_dim=2))
model.add(layers.Dense(units=1))
#建立輸出層(輸出神經元至少>=10)
model.add(layers.Dense(units=10,activation='softmax'))
import functools
#若將k=5改成k=3就會變成計算Top-3
top5_acc = functools.partial(keras.metrics.top_k_categorical_accuracy, k=5)
#此名稱可自訂,當training時顯示用的
top5_acc.__name__ = 'top5_acc'
# 最佳化設定
#若loss設定為categorical_crossentropy則會出錯:
#ValueError: Error when checking target: expected dense_2 to have shape (10,) but got array with shape (1,)
model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd', metrics=['categorical_accuracy',top5_acc])
# 開始進行training
model.fit(
    x=X_train,
    y=y_train,
    epochs=1,
    verbose=1,
    validation_split=0.1
)
#若沒有呼叫
#input_shape = [28,28,1]
#build(input_shape)
#的話,唯有在fit()之後才能呼叫summary()
#否則會出錯:
#This model has not yet been built. Build the model first by calling build() or calling fit() with some data.
#Or specify input_shape or batch_input_shape in the first layer for automatic build.
#model.build(input_shape)
model.summary()
# 開始進行testing
test_loss,top_1_accuracy, top_5_accuracy = model.evaluate(X_test, y_test)
print(f'test_loss={test_loss}')
print(f'top_1_accuracy={top_1_accuracy}')
print(f'top_5_accuracy={top_5_accuracy}')
我得到:

=================================================

感谢@Thomas Schillaci的建议, 我找到了一个没有任何错误的:

import keras
from keras import datasets
from keras import layers
from keras import models
print(f'keras版本={keras.__version__}')
#載入 cifar10 資料庫的訓練資料,並自動分為『訓練組』及『測試組』
(X_train, y_train),(X_test, y_test)= datasets.cifar10.load_data()
#建立FNN model
model = models.Sequential()
#建立input層
#此例若沒加Flatten Layer則會出錯(dense_數字,此數字後面會變動,其餘都一樣):
#ValueError: Error when checking target: expected dense_30 to have 3 dimensions, but got array with shape (60000, 1)
model.add(layers.Flatten())
#建立隱藏層(此例中此隱藏層只有1個神經元)
#可以寫得較完整:model.add(layers.Dense(input_dim=28*28,units=2))
#units的舊式寫法為output_dim:model.add(layers.Dense(input_dim=28*28,output_dim=2))
model.add(layers.Dense(units=1))
#建立輸出層(輸出神經元至少>=10)
model.add(layers.Dense(units=10,activation='softmax'))
import functools

#底下寫法會怪怪的,如:top1=99.64、top-5=0.0
#若將k=5改成k=3就會變成計算Top-3
#top5_acc = functools.partial(keras.metrics.top_k_categorical_accuracy, k=5)
#此名稱可自訂,當training時顯示用的
#top5_acc.__name__ = 'top5_acc'
# 最佳化設定
#若loss設定為categorical_crossentropy則會出錯:
#ValueError: Error when checking target: expected dense_2 to have shape (10,) but got array with shape (1,)
#model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd', metrics=['categorical_accuracy',top5_acc])

#底下寫法會出錯:ValueError: '<lambda>' is not a valid scope name
#from keras import metrics
#top3 = lambda x,y: metrics.top_k_categorical_accuracy(x,y,3)
#top5 = lambda x,y: metrics.top_k_categorical_accuracy(x,y,5)
#model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd', metrics=['accuracy',top3,top5])

#底下寫法會出錯:ValueError: None values not supported.
#def top3(x,y):
#    metrics.top_k_categorical_accuracy(x,y,3)
#def top5(x,y):
#    metrics.top_k_categorical_accuracy(x,y,5)
#model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd', metrics=['accuracy',top3,top5])

#底下寫法training時正確:accuracy: 0.1371 - top_k_categorical_accuracy: 0.4191 - acc_top3: 0.2147
#但testing時出錯:
# top_1_accuracy=0.15530000627040863
# top_3_accuracy=0.0
# top_5_accuracy=0.0
#而且會顯示:top_k_categorical_accuracy而非顯示top-5
#45000/45000 [==============================] - 2s 52us/step - loss: 7.9220
#- accuracy: 0.1136 - top_k_categorical_accuracy: 0.4338 - acc_top3: 0.3794 - val_loss: 2.2809
# - val_accuracy: 0.1470 - val_top_k_categorical_accuracy: 0.2550 - val_acc_top3: 0.2412
# def acc_top3(y_true, y_pred):
    # return metrics.top_k_categorical_accuracy(y_true, y_pred, k=3)
# model.compile(loss='sparse_categorical_crossentropy',
              # optimizer='adam',
              # metrics=['accuracy', metrics.top_k_categorical_accuracy, acc_top3])

#底下寫法training時正確:accuracy: 0.1332 - top5_acc: 0.5029 - acc_top3: 0.3971
#但testing時出錯:
# top_1_accuracy=0.16220000386238098
# top_3_accuracy=0.08720000088214874
# top_5_accuracy=0.3935000002384186
#top5_acc = functools.partial(keras.metrics.top_k_categorical_accuracy, k=5)
#此名稱可自訂,當training時顯示用的
# top5_acc.__name__ = 'top5_acc'
# def acc_top3(y_true, y_pred):
   # return metrics.top_k_categorical_accuracy(y_true, y_pred, k=3)
# model.compile(loss='sparse_categorical_crossentropy',
             # optimizer='adam',
             # metrics=['accuracy', top5_acc, acc_top3])

#底下寫法training時正確:acc: 0.1314 - acc_top2: 0.4392 - acc_top3: 0.4797 - acc_top4: 0.5172 - acc_top5: 0.5492
#但testing時出錯:
# top_1_accuracy=0.13410000503063202
# top_2_accuracy=0.03440000116825104
# top_3_accuracy=0.034699998795986176
# top_4_accuracy=0.03669999912381172
# top_5_accuracy=0.042399998754262924
# from keras.metrics import top_k_categorical_accuracy
# def acc_top2(y_true, y_pred):
    # return top_k_categorical_accuracy(y_true, y_pred, k=2)
    
# def acc_top3(y_true, y_pred):
    # return top_k_categorical_accuracy(y_true, y_pred, k=3)

# def acc_top4(y_true, y_pred):
    # return top_k_categorical_accuracy(y_true, y_pred, k=4)

# def acc_top5(y_true, y_pred):
    # return top_k_categorical_accuracy(y_true, y_pred, k=5)
# model.compile(loss='sparse_categorical_crossentropy',
              # optimizer='adam',
              # metrics=['acc', acc_top2, acc_top3, acc_top4, acc_top5])

#底下寫法training時正確:acc: 0.1314 - acc_top2: 0.4392 - acc_top3: 0.4797 - acc_top4: 0.5172 - acc_top5: 0.5492
#但testing時出錯:
# top_1_accuracy=0.15760000050067902
# top_2_accuracy=0.0
# top_3_accuracy=0.125900000333786
# top_4_accuracy=0.48910000920295715
# top_5_accuracy=0.6032000184059143
# from keras.metrics import top_k_categorical_accuracy
# def acc_top2(y_true, y_pred):
    # return top_k_categorical_accuracy(y_true, y_pred, k=2)
    
# def acc_top3(y_true, y_pred):
    # return top_k_categorical_accuracy(y_true, y_pred, k=3)

# def acc_top4(y_true, y_pred):
    # return top_k_categorical_accuracy(y_true, y_pred, k=4)

# def acc_top5(y_true, y_pred):
    # return top_k_categorical_accuracy(y_true, y_pred, k=5)
# model.compile(loss='sparse_categorical_crossentropy',
              # optimizer='adam',
              # metrics=['accuracy', acc_top2, acc_top3, acc_top4, acc_top5])
             
from keras.metrics import top_k_categorical_accuracy
def acc_top2(y_true, y_pred):
    return top_k_categorical_accuracy(y_true, y_pred, k=2)
    
def acc_top3(y_true, y_pred):
    return top_k_categorical_accuracy(y_true, y_pred, k=3)

def acc_top4(y_true, y_pred):
    return top_k_categorical_accuracy(y_true, y_pred, k=4)

def acc_top5(y_true, y_pred):
    return top_k_categorical_accuracy(y_true, y_pred, k=5)
model.compile(loss='sparse_categorical_crossentropy',
              optimizer='adam',
              metrics=['categorical_accuracy', acc_top2, acc_top3, acc_top4, acc_top5])
#底下看似正確,但卻有一次例外,其他次全對,但無法reproduce該次錯誤
# model.compile(loss='sparse_categorical_crossentropy',
              # optimizer='adam',
              # metrics=['accuracy', metrics.top_k_categorical_accuracy])
# 開始進行training
model.fit(
    x=X_train,
    y=y_train,
    epochs=1,
    verbose=1,
    validation_split=0.1
)
#若沒有呼叫
#input_shape = [28,28,1]
#build(input_shape)
#的話,唯有在fit()之後才能呼叫summary()
#否則會出錯:
#This model has not yet been built. Build the model first by calling build() or calling fit() with some data.
#Or specify input_shape or batch_input_shape in the first layer for automatic build.
#model.build(input_shape)
model.summary()
# 開始進行testing
#test_loss,top_1_accuracy, top_5_accuracy, top_3_accuracy = model.evaluate(X_test, y_test)
#test_loss,top_1_accuracy, top_5_accuracy = model.evaluate(X_test, y_test)
test_loss,top_1_accuracy, top_2_accuracy, top_3_accuracy, top_4_accuracy, top_5_accuracy = model.evaluate(X_test, y_test)
print(model.metrics_names)
print(f'test_loss={test_loss}')
print(f'top_1_accuracy={top_1_accuracy}')
print(f'top_2_accuracy={top_2_accuracy}')
print(f'top_3_accuracy={top_3_accuracy}')
print(f'top_4_accuracy={top_4_accuracy}')
print(f'top_5_accuracy={top_5_accuracy}')
导入keras
从keras导入数据集
从keras导入图层
从keras导入模型
印刷品(f'keras)版本={keras.\uuuuuuuuuuuu版本}')
#載入 cifar10資料庫的訓練資料,並自動分為『訓練組』及『測試組』
(X_列,y_列),(X_测试,y_测试)=数据集。cifar10.负载数据()
#建立FNN模型
model=models.Sequential()
#建立输入層
#此例若沒加展平层則會出錯(浓密的_數字,此數字後面會變動,其餘都一樣):
#ValueError:检查目标时出错:预期密集_30具有3维,但得到形状为(60000,1)的数组
model.add(layers.flatte())
#建立隱藏層(此例中此隱藏層只有1.個神經元)
#可以寫得較完整:模型.添加(层.密集(输入尺寸=28*28,单位=2))
#单位的舊式寫法為输出尺寸:model.add(layers.Dense(输入尺寸=28*28,输出尺寸=2))
模型.添加(层.密度(单位=1))
#建立輸出層(輸出神經元至少>=10)
model.add(layers.density(单位=10,激活=softmax'))
导入功能工具
#底下寫法會怪怪的,如:top1=99.64、top-5=0.0
#若將k=5改成k=3就會變成計算前三名
#top5\u acc=functools.partial(keras.metrics.top\u k\u分类精度,k=5)
#此名稱可自訂,當训练時顯示用的
#top5_acc._名称_uu='top5_acc'
# 最佳化設定
#若损失設定為范畴交叉熵則會出錯:
#ValueError:检查目标时出错:预期稠密_2具有形状(10),但获得具有形状(1)的数组
#compile(loss='sparse'u categorical\u crossentropy',optimizer='sgd',metrics=['categorical\u acc',top5\u acc])
#底下寫法會出錯:ValueError:“”不是有效的作用域名称
#来自keras导入度量
#top3=λx,y:度量。top_k_分类精度(x,y,3)
#top5=λx,y:度量。top_k_分类精度(x,y,5)
#编译(loss='sparse\u categorical\u crossentropy',optimizer='sgd',metrics=['accurity',top3,top5])
#底下寫法會出錯:ValueError:不支持任何值。
#def top3(x,y):
#指标。最高分类精度(x、y、3)
#def top5(x,y):
#指标。最高分类精度(x、y、5)
#编译(loss='sparse\u categorical\u crossentropy',optimizer='sgd',metrics=['accurity',top3,top5])
#底下寫法训练時正確:准确度:0.1371-最高分类准确度:0.4191-最高分类准确度:0.2147
#但测试時出錯:
#最高精度=0.15530000627040863
#top_3_精度=0.0
#最高5度精度=0.0
#而且會顯示:最高分类准确率而非顯示前五名
#45000/45000[=======================================]-2s 52us/步-损耗:7.9220
#-准确率:0.1136-最高分类准确率:0.4338-最高分类准确率:0.3794-价值损失:2.2809
#-val_准确度:0.1470-val_top_k_分类准确度:0.2550-val_acc_top3:0.2412
#def acc_top3(y_true,y_pred):
#返回度量。最高分类精度(y_真,y_pred,k=3)
#model.compile(loss='sparse\u categorical\u crossentropy',
#优化器='adam',
#metrics=[“准确度”,metrics.top\u k\u分类准确度,acc\u top3])
#底下寫法训练時正確:精度:0.1332-前5名:0.5029-前3名:0.3971
#但测试時出錯:
#最高精度=0.1622000038238098
#最高精度=0.0872000008214874
#最高精度=0.393500002384186
#top5\u acc=functools.partial(keras.metrics.top\u k\u分类精度,k=5)
#此名稱可自訂,當训练時顯示用的
#top5_acc._名称_uu='top5_acc'
#def acc_top3(y_true,y_pred):
#返回度量。最高分类精度(y_真,y_pred,k=3)
#model.compile(loss='sparse\u categorical\u crossentropy',
#优化器='adam',
#指标=[“准确度”,前五名(acc,前三名)
#底下寫法训练時正確:acc:0.1314-acc_top2:0.4392-acc_top3:0.4797-acc_top4:0.5172-acc_top5:0.5492
#但测试時出錯:
#最高精度=0.1341000053063202
#最高2级精度=0.03440000116825104
#最高3度精度=0.03469998795986176
#最高精度=0.036699912381172
#最高5度精度=0.04239998754262924
#从keras.metrics导入顶级分类精度
#def acc_top2(y_true,y_pred):
#返回top_k_分类精度(y_true,y_pred,k=2)
#def acc_top3(y_true,y_pred):
#返回top_k_分类精度(y_true,y_pred,k=3)
#def acc_top4(y_true,y_pred):
#返回top_k_分类精度(y_true,y_pred,k=4)
#def acc_top5(y_true,y_pred):
#返回top_k_分类精度(y_true,y_pred,k=5)
#model.compile(loss='sparse\u categorical\u crossentropy',
#优化器='adam',
#指标=['acc',acc_top2,acc_top3,acc_top4,acc_top5])
#底下寫法训练時正確:acc:0.1314-acc_top2:0.4392-acc_top3:0.4797-acc_top4:0.5172-acc_top5:0.5492
#但测试時出錯:
#最高精度=0.15760006067902
#最高精度=0.0
#最高3_精度=0.12590000333786
#最高精度=0.48910000920295715
#最高5度精度=0.60320000184059143
#从keras.metrics导入顶级分类精度
#def acc_top2(y_true,y_pred):
#返回top_k_分类精度(y_true,y_pred,k=2)
#def acc_top3(y_true,y_pred):
#返回top_k_分类精度(y_true,y_pred,k=3)
#def acc_top4(y_true,y_pred):
#返回top_k_分类精度(y_true,y_pred,k=4)
#def acc_top5(y_true,y_pred):
#返回top_k_分类精度(y_true,y_pred,k=5)
#model.compile(loss='sparse\u categorical\u crossentropy',
#优化器='adam',
#指标=[‘准确度’、acc_top2、acc_top3、acc_top4、acc_top5])
从keras.metrics导入top_k_分类