Warning: file_get_contents(/data/phpspider/zhask/data//catemap/2/python/312.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
Python 如何在tf2.2中使用cosindecayrestart_Python - Fatal编程技术网

Python 如何在tf2.2中使用cosindecayrestart

Python 如何在tf2.2中使用cosindecayrestart,python,Python,这是我的密码 def make_model(nh, lr_scheduler): z = L.Input((nh,), name="Patient") x = L.Dense(100, activation="relu", name="d1")(z) x = L.Dense(100, activation="relu", name="d2")(x) #x = L.Dense(100, activ

这是我的密码

def make_model(nh, lr_scheduler):

z = L.Input((nh,), name="Patient")
x = L.Dense(100, activation="relu", name="d1")(z)
x = L.Dense(100, activation="relu", name="d2")(x)
#x = L.Dense(100, activation="relu", name="d3")(x)
p1 = L.Dense(3, activation="linear", name="p1")(x)
p2 = L.Dense(3, activation="relu", name="p2")(x)
preds = L.Lambda(lambda x: x[0] + tf.cumsum(x[1], axis=1), 
                 name="preds")([p1, p2])

model = M.Model(z, preds, name="CNN")
#model.compile(loss=qloss, optimizer="adam", metrics=[score])
model.compile(loss=mloss(0.8),
              optimizer=tf.keras.optimizers.Adam(lr=tf.keras.experimental.CosineDecayRestarts(0.1, iters/4, t_mul=2.0, m_mul=1.0, alpha=0.0,
               name=None), beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.01, amsgrad=False),
              metrics=[score])
return model
我只想尝试CosineDecayRestarts lr scheduler,并检查tf2.2 api,我认为使用它是正确的方法,但它给出了一个错误:


有人帮忙吗?

我也有同样的问题,所以我根据和自己写的

它用作回调(与其他调度程序一样)。它似乎工作得很好。希望你觉得有用

from collections.abc import Iterable
from tensorflow.keras.callbacks import *
from tensorflow.keras import backend as K
import tensorflow as tf
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.framework import constant_op
import math


class CosineDecayRestarts(tf.keras.callbacks.Callback):
    def __init__(self, initial_learning_rate, first_decay_steps, alpha=0.0, t_mul=2.0, m_mul=1.0):
        super(CosineDecayRestarts, self).__init__()
        self.initial_learning_rate = initial_learning_rate
        self.first_decay_steps = first_decay_steps
        self.alpha = alpha
        self.t_mul = t_mul
        self.m_mul = m_mul
        self.batch_step = 0

    def on_train_batch_begin(self, step, logs=None):
        if not hasattr(self.model.optimizer, "lr"):
            raise ValueError('Optimizer must have a "lr" attribute.')
        # Get the current learning rate from model's optimizer.
        lr = float(tf.keras.backend.get_value(self.model.optimizer.learning_rate))
        # Call schedule function to get the scheduled learning rate.
        scheduled_lr = self.schedule(self.batch_step, lr)
        # Set the value back to the optimizer before this epoch starts
        tf.keras.backend.set_value(self.model.optimizer.lr, scheduled_lr)
        self.batch_step += 1

    def schedule(self, step, lr):
        def compute_step(completed_fraction, geometric=False):
            """Helper for `cond` operation."""
            if geometric:
                i_restart = math_ops.floor(
                  math_ops.log(1.0 - completed_fraction * (1.0 - self.t_mul)) /
                  math_ops.log(self.t_mul))

                sum_r = (1.0 - self.t_mul**i_restart) / (1.0 - self.t_mul)
                completed_fraction = (completed_fraction - sum_r) / self.t_mul**i_restart

            else:
                i_restart = math_ops.floor(completed_fraction)
                completed_fraction -= i_restart

            return i_restart, completed_fraction

        completed_fraction = step / self.first_decay_steps

        i_restart, completed_fraction = control_flow_ops.cond(
          math_ops.equal(self.t_mul, 1.0),
          lambda: compute_step(completed_fraction, geometric=False),
          lambda: compute_step(completed_fraction, geometric=True))

        m_fac = self.m_mul**i_restart
        cosine_decayed = 0.5 * m_fac * (1.0 + math_ops.cos(
          constant_op.constant(math.pi) * completed_fraction))
        decayed = (1 - self.alpha) * cosine_decayed + self.alpha

        return math_ops.multiply(self.initial_learning_rate, decayed)