Warning: file_get_contents(/data/phpspider/zhask/data//catemap/2/python/319.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
Python 在TensorFlow 2.0中,如何将TFRecord数据提供给keras模型?_Python_Tensorflow_Keras_Tfrecord - Fatal编程技术网

Python 在TensorFlow 2.0中,如何将TFRecord数据提供给keras模型?

Python 在TensorFlow 2.0中,如何将TFRecord数据提供给keras模型?,python,tensorflow,keras,tfrecord,Python,Tensorflow,Keras,Tfrecord,我尝试用深度神经网络(DNN)来解决输入数据有32个特征和16个标签的分类问题 他们看起来像 #输入数据 shape=(32,),dtype=float32, np.数组([-0.9349509,0.24052018,-0.29364416,1.2375807,-0.15996791, 0.32468656, 0.43856472, 0.00573635, -0.48105922, 0.09342893, 0.63206947, 0.44424117, 0.31256443, 0.0

我尝试用深度神经网络(DNN)来解决输入数据有32个特征和16个标签的分类问题

他们看起来像

#输入数据
shape=(32,),dtype=float32,
np.数组([-0.9349509,0.24052018,-0.29364416,1.2375807,-0.15996791,
0.32468656,  0.43856472,  0.00573635, -0.48105922,  0.09342893,
0.63206947,  0.44424117,  0.31256443,  0.09699771,  0.31004518,
0.8685253 ,  0.74120486,  0.65404135, -0.4084895 ,  0.07065713,
0.33964285, -0.20022233, -0.29489437,  0.40699714,  0.27028704,
0.74895304, -0.4846958 ,  0.22371463,  0.3422047 , -0.24379562,
0.38614622,  0.01282159])
#标签(类别)
shape=(16,),dtype=int64,np.array([0,0,0,0,0,1,0,0,0,0,0,0,0])
我想用超过1亿个数据(非常大的大小)来训练我的NN,所以不可能将整个数据集加载到python数组中。
在谷歌搜索之后,我发现
tf.TFRecord
帮助我解决了这个容量问题

我在TensorFlow官方网站上编写了TFRecord文件,但找不到如何将TFReocrd加载到Keras模型中

记录写入程序 我使用writer在TFRecord文件中编写数据集

writer=tf.io.TFRecordWriter(文件名)
对于i范围(样本数量):
...
write.write(序列化示例(输入数据、标签))
writer.close()
并遵循
serialize\u示例
方法

def序列化示例(输入数据、标签):
"""
创建准备写入文件的tf.Example消息。
"""
#创建一个字典,将功能名称映射到tf.Example-compatible数据类型。
特征={
“功能”:tf.train.feature(float\u list=tf.train.FloatList(value=input\u data)),
“标签”:tf.train.Feature(int64_list=tf.train.Int64List(value=label))
}
#使用tf.train.Example创建功能消息。
示例_proto=tf.train.example(
特征=tf.列车特征(特征=特征))
返回示例\u proto.SerializeToString()
TFR记录读取程序 在编写TFRecord之后,我通过解析它的字符串来了解如何读取文件

dataset=tf.data.TFRecordDataset(文件名=[filenames])
parsed_dataset=dataset.map(_parse_函数,num_parallel_calls=8)
最终\u数据集=已解析的\u数据集。无序(缓冲区大小=样本数)。批处理(10)
打印(已解析的_数据集)
# 
对于已解析_数据集中的已解析_记录。取(1):
打印(报告(解析的记录))
'''
{'feature':,
“标签”:
'''
并遵循
\u parse\u函数
方法

#创建功能的描述。
功能描述={
“功能”:tf.io.FixedLenFeature([32,],tf.float32),
“标签”:tf.io.FixedLenFeature([16,],tf.int64)
}
定义解析函数(示例proto):
#使用上面的字典解析输入`tf.Example`proto。
返回tf.io.parse\u single\u示例(示例\u proto,特性\u description)
饲料记录(培训) 到目前为止,它似乎运行得很顺利,但当我试图将此数据集提供给keras层时,它产生了错误

模型定义和执行培训

inputs = keras.Input(shape=(32, ), name='feature')
x = layers.Dense(1024, activation='linear', name='dense_input')(inputs)
outputs = layers.Dense(expected_output, activation='softmax', name='label')(x)

model = keras.Model(inputs=inputs, outputs = outputs)
model.compile(optimizer=tf.keras.optimizers.Adam(0.001), 
loss='categorical_crossentropy', metrics=['accuracy','categorical_crossentropy'])

model.fit(final_dataset)
输出将是

     1/Unknown - 0s 15ms/step
---------------------------------------------------------------------------
IndexError                                Traceback (most recent call last)
<ipython-input-41-bb3547f32c4a> in <module>()
      8 loss='categorical_crossentropy', metrics=['accuracy','categorical_crossentropy'])
      9 
---> 10 model.fit(final_dataset )

/home/dbadmin/anaconda3/lib/python3.7/site-packages/tensorflow_core/python/keras/engine/training.py in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, validation_freq, max_queue_size, workers, use_multiprocessing, **kwargs)
    726         max_queue_size=max_queue_size,
    727         workers=workers,
--> 728         use_multiprocessing=use_multiprocessing)
    729 
    730   def evaluate(self,

/home/dbadmin/anaconda3/lib/python3.7/site-packages/tensorflow_core/python/keras/engine/training_v2.py in fit(self, model, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, validation_freq, **kwargs)
    322                 mode=ModeKeys.TRAIN,
    323                 training_context=training_context,
--> 324                 total_epochs=epochs)
    325             cbks.make_logs(model, epoch_logs, training_result, ModeKeys.TRAIN)
    326 

/home/dbadmin/anaconda3/lib/python3.7/site-packages/tensorflow_core/python/keras/engine/training_v2.py in run_one_epoch(model, iterator, execution_function, dataset_size, batch_size, strategy, steps_per_epoch, num_samples, mode, training_context, total_epochs)
    121         step=step, mode=mode, size=current_batch_size) as batch_logs:
    122       try:
--> 123         batch_outs = execution_function(iterator)
    124       except (StopIteration, errors.OutOfRangeError):
    125         # TODO(kaftan): File bug about tf function and errors.OutOfRangeError?

/home/dbadmin/anaconda3/lib/python3.7/site-packages/tensorflow_core/python/keras/engine/training_v2_utils.py in execution_function(input_fn)
     84     # `numpy` translates Tensors to values in Eager mode.
     85     return nest.map_structure(_non_none_constant_value,
---> 86                               distributed_function(input_fn))
     87 
     88   return execution_function

/home/dbadmin/anaconda3/lib/python3.7/site-packages/tensorflow_core/python/eager/def_function.py in __call__(self, *args, **kwds)
    455 
    456     tracing_count = self._get_tracing_count()
--> 457     result = self._call(*args, **kwds)
    458     if tracing_count == self._get_tracing_count():
    459       self._call_counter.called_without_tracing()

/home/dbadmin/anaconda3/lib/python3.7/site-packages/tensorflow_core/python/eager/def_function.py in _call(self, *args, **kwds)
    501       # This is the first call of __call__, so we have to initialize.
    502       initializer_map = object_identity.ObjectIdentityDictionary()
--> 503       self._initialize(args, kwds, add_initializers_to=initializer_map)
    504     finally:
    505       # At this point we know that the initialization is complete (or less

/home/dbadmin/anaconda3/lib/python3.7/site-packages/tensorflow_core/python/eager/def_function.py in _initialize(self, args, kwds, add_initializers_to)
    406     self._concrete_stateful_fn = (
    407         self._stateful_fn._get_concrete_function_internal_garbage_collected(  # pylint: disable=protected-access
--> 408             *args, **kwds))
    409 
    410     def invalid_creator_scope(*unused_args, **unused_kwds):

/home/dbadmin/anaconda3/lib/python3.7/site-packages/tensorflow_core/python/eager/function.py in _get_concrete_function_internal_garbage_collected(self, *args, **kwargs)
   1846     if self.input_signature:
   1847       args, kwargs = None, None
-> 1848     graph_function, _, _ = self._maybe_define_function(args, kwargs)
   1849     return graph_function
   1850 

/home/dbadmin/anaconda3/lib/python3.7/site-packages/tensorflow_core/python/eager/function.py in _maybe_define_function(self, args, kwargs)
   2148         graph_function = self._function_cache.primary.get(cache_key, None)
   2149         if graph_function is None:
-> 2150           graph_function = self._create_graph_function(args, kwargs)
   2151           self._function_cache.primary[cache_key] = graph_function
   2152         return graph_function, args, kwargs

/home/dbadmin/anaconda3/lib/python3.7/site-packages/tensorflow_core/python/eager/function.py in _create_graph_function(self, args, kwargs, override_flat_arg_shapes)
   2039             arg_names=arg_names,
   2040             override_flat_arg_shapes=override_flat_arg_shapes,
-> 2041             capture_by_value=self._capture_by_value),
   2042         self._function_attributes,
   2043         # Tell the ConcreteFunction to clean up its graph once it goes out of

/home/dbadmin/anaconda3/lib/python3.7/site-packages/tensorflow_core/python/framework/func_graph.py in func_graph_from_py_func(name, python_func, args, kwargs, signature, func_graph, autograph, autograph_options, add_control_dependencies, arg_names, op_return_value, collections, capture_by_value, override_flat_arg_shapes)
    913                                           converted_func)
    914 
--> 915       func_outputs = python_func(*func_args, **func_kwargs)
    916 
    917       # invariant: `func_outputs` contains only Tensors, CompositeTensors,

/home/dbadmin/anaconda3/lib/python3.7/site-packages/tensorflow_core/python/eager/def_function.py in wrapped_fn(*args, **kwds)
    356         # __wrapped__ allows AutoGraph to swap in a converted function. We give
    357         # the function a weak reference to itself to avoid a reference cycle.
--> 358         return weak_wrapped_fn().__wrapped__(*args, **kwds)
    359     weak_wrapped_fn = weakref.ref(wrapped_fn)
    360 

/home/dbadmin/anaconda3/lib/python3.7/site-packages/tensorflow_core/python/keras/engine/training_v2_utils.py in distributed_function(input_iterator)
     71     strategy = distribution_strategy_context.get_strategy()
     72     outputs = strategy.experimental_run_v2(
---> 73         per_replica_function, args=(model, x, y, sample_weights))
     74     # Out of PerReplica outputs reduce or pick values to return.
     75     all_outputs = dist_utils.unwrap_output_dict(

/home/dbadmin/anaconda3/lib/python3.7/site-packages/tensorflow_core/python/distribute/distribute_lib.py in experimental_run_v2(self, fn, args, kwargs)
    758       fn = autograph.tf_convert(fn, ag_ctx.control_status_ctx(),
    759                                 convert_by_default=False)
--> 760       return self._extended.call_for_each_replica(fn, args=args, kwargs=kwargs)
    761 
    762   def reduce(self, reduce_op, value, axis):

/home/dbadmin/anaconda3/lib/python3.7/site-packages/tensorflow_core/python/distribute/distribute_lib.py in call_for_each_replica(self, fn, args, kwargs)
   1785       kwargs = {}
   1786     with self._container_strategy().scope():
-> 1787       return self._call_for_each_replica(fn, args, kwargs)
   1788 
   1789   def _call_for_each_replica(self, fn, args, kwargs):

/home/dbadmin/anaconda3/lib/python3.7/site-packages/tensorflow_core/python/distribute/distribute_lib.py in _call_for_each_replica(self, fn, args, kwargs)
   2130         self._container_strategy(),
   2131         replica_id_in_sync_group=constant_op.constant(0, dtypes.int32)):
-> 2132       return fn(*args, **kwargs)
   2133 
   2134   def _reduce_to(self, reduce_op, value, destinations):

/home/dbadmin/anaconda3/lib/python3.7/site-packages/tensorflow_core/python/autograph/impl/api.py in wrapper(*args, **kwargs)
    290   def wrapper(*args, **kwargs):
    291     with ag_ctx.ControlStatusCtx(status=ag_ctx.Status.DISABLED):
--> 292       return func(*args, **kwargs)
    293 
    294   if inspect.isfunction(func) or inspect.ismethod(func):

/home/dbadmin/anaconda3/lib/python3.7/site-packages/tensorflow_core/python/keras/engine/training_v2_utils.py in train_on_batch(model, x, y, sample_weight, class_weight, reset_metrics)
    262       y,
    263       sample_weights=sample_weights,
--> 264       output_loss_metrics=model._output_loss_metrics)
    265 
    266   if reset_metrics:

/home/dbadmin/anaconda3/lib/python3.7/site-packages/tensorflow_core/python/keras/engine/training_eager.py in train_on_batch(model, inputs, targets, sample_weights, output_loss_metrics)
    309           sample_weights=sample_weights,
    310           training=True,
--> 311           output_loss_metrics=output_loss_metrics))
    312   if not isinstance(outs, list):
    313     outs = [outs]

/home/dbadmin/anaconda3/lib/python3.7/site-packages/tensorflow_core/python/keras/engine/training_eager.py in _process_single_batch(model, inputs, targets, output_loss_metrics, sample_weights, training)
    250               output_loss_metrics=output_loss_metrics,
    251               sample_weights=sample_weights,
--> 252               training=training))
    253       if total_loss is None:
    254         raise ValueError('The model cannot be run '

/home/dbadmin/anaconda3/lib/python3.7/site-packages/tensorflow_core/python/keras/engine/training_eager.py in _model_loss(model, inputs, targets, output_loss_metrics, sample_weights, training)
    164 
    165         if hasattr(loss_fn, 'reduction'):
--> 166           per_sample_losses = loss_fn.call(targets[i], outs[i])
    167           weighted_losses = losses_utils.compute_weighted_loss(
    168               per_sample_losses,

IndexError: list index out of range

1/未知-0秒15毫秒/步
---------------------------------------------------------------------------
索引器回溯(最后一次最近调用)
在()
8损失=“分类交叉熵”,度量=[“准确性”,“分类交叉熵])
9
--->10模型拟合(最终数据集)
/home/dbadmin/anaconda3/lib/python3.7/site-packages/tensorflow_core/python/keras/engine/training.py in-fit(self、x、y、批处理大小、历元、冗余、回调、验证拆分、验证数据、随机、类权重、样本权重、初始历元、每个历元的步骤、验证步骤、验证频率、最大队列大小、工人、使用多处理、**kwargs)
726最大队列大小=最大队列大小,
727名工人=工人,
-->728使用多处理=使用多处理)
729
730 def评估(自我,
/home/dbadmin/anaconda3/lib/python3.7/site-packages/tensorflow\u core/python/keras/engine/training\u v2.py适合(self、model、x、y、批量大小、历元、冗长、回调、验证分割、验证数据、随机、类权重、样本权重、初始历元、每历元的步骤、验证步骤、验证频率、**kwargs)
322模式=ModeKeys.TRAIN,
323培训上下文=培训上下文,
-->324个总记录(每个记录=个记录)
325 cbks.生成日志(模型、历元日志、训练结果、模式键.训练)
326
/home/dbadmin/anaconda3/lib/python3.7/site-packages/tensorflow\u core/python/keras/engine/training\u v2.py in run\u one\u epoch(模型、迭代器、执行函数、数据集大小、批量大小、策略、每个epoch的步骤、样本数、模式、培训上下文、总epoch)
121步骤=步骤,模式=模式,大小=当前批次大小)作为批次日志:
请尝试:
-->123批处理输出=执行函数(迭代器)
124除外(StopIteration,errors.OutOfRangeError):
125#TODO(kaftan):关于tf函数和错误的文件错误。OutOfRangeError?
/home/dbadmin/anaconda3/lib/python3.7/site-packages/tensorflow\u core/python/keras/engine/training\u v2\u utils.py in execution\u函数(输入\u fn)
84#`numpy`以渴望模式将张量转换为值。
85返回nest.map\u结构(\u非\u无\u常量\u值,
--->86分布函数(输入函数)
87
88返回执行函数
/home/dbadmin/anaconda3/lib/python3.7/site-packages/tensorflow_core/python/eager/def_function.py in uuu调用(self,*args,**kwds)
455
456跟踪\u计数=自。\u获取\u跟踪\u计数()
-->457结果=自调用(*args,**kwds)
458如果跟踪计数==self.\u获取跟踪计数():
459 self.\u调用\u计数器。调用\u而不跟踪()
/主页/数据库管理员/anac
 dataset = tf.data.TFRecordDataset(filenames=[filenames])
 parsed_dataset = dataset.map(_parse_function, num_parallel_calls=8)
 final_dataset = parsed_dataset.shuffle(buffer_size=number_of_sample).batch(10)
 iterator = dataset.make_one_shot_iterator()
 parsed_record = iterator.get_next()
 feature, label = parsed_record['feature'], parsed_record['label']
 #keras 
 inputs = keras.Input(shape=(32, ), name='feature', tensor=feature)
 #same code you have
 model.compile(optimizer=tf.keras.optimizers.Adam(0.001), 
                               loss='categorical_crossentropy',
                               metrics=['accuracy','categorical_crossentropy'],
                               target_tensors=[label]

)
train_model.fit(epochs= 30,
            steps_per_epoch= your_datasetsize/batch)
    feature, label = parsed_record['feature'], parsed_record['label']
    ds.repeat(ds.shuffle(buffer_size=number_of_sample).batch(batch_size))
def _parse_function_same_side(example_proto):
    """Extracts features and labels.
  
    Args:
        example_proto: tf.Example protocol    
      Returns:
    A `tuple` `(features, labels)`:
      features: A 2D tensor representing the features
      labels: A tensor with the corresponding labels.
    """
    feature_description = {
        "features": tf.io.FixedLenFeature(4, tf.int64), 
        "label": tf.io.FixedLenFeature(1, tf.int64)
                }
    
    parsed_features = tf.io.parse_single_example(example_proto, feature_description)
    
    features = parsed_features['features']
    
    labels = tf.one_hot(parsed_features['label'],depth=len(hero_vocab))
    return features, labels
def _input_fn(input_filenames, num_epochs=None, 
              shuffle=True, batch_size=50,compression_type=""):
   
    ds=tf.data.TFRecordDataset(input_filenames,compression_type=compression_type)
    ds=ds.map(_parse_function)
    
    #only shuffle if shuffle flag
    if shuffle:
        ds = ds.shuffle(10000)
    
    #take only dataset of length batch_size
    ds = ds.batch(batch_size)
    
    #make sure you can repeatedly take datasets from the TFRecord
    ds = ds.repeat()
    
    # Return the dataset.
    return ds