生活随笔
收集整理的這篇文章主要介紹了
TensorFlow 2.0 - 自定义模型、训练过程
小編覺得挺不錯的,現(xiàn)在分享給大家,幫大家做個參考.
文章目錄
學(xué)習(xí)于:簡單粗暴 TensorFlow 2
1. 自定義模型
- 重載 call() 方法,pytorch 是重載 forward() 方法
import tensorflow
as tf
X
= tf
.constant
([[1.0, 2.0, 3.0],[4.0, 5.0, 6.0]])
y
= tf
.constant
([[10.0],[20.0]])class Linear(tf
.keras
.Model
):def __init__(self
):super().__init__
()self
.dense
= tf
.keras
.layers
.Dense
(units
=1,activation
=None,kernel_initializer
=tf
.zeros_initializer
(),bias_initializer
=tf
.zeros_initializer
())def call(self
, input): output
= self
.dense
(input)return outputmodel
= Linear
()
optimizer
= tf
.keras
.optimizers
.SGD
(learning_rate
=0.001)for i
in range(100):with tf
.GradientTape
() as tape
: y_pred
= model
(X
)loss
= tf
.reduce_mean
(tf
.square
(y_pred
-y
)) grads
= tape
.gradient
(loss
, model
.variables
) optimizer
.apply_gradients
(grads_and_vars
=zip(grads
,model
.variables
))
2. 學(xué)習(xí)流程
- 加載手寫數(shù)字?jǐn)?shù)據(jù)集
class MNistLoader():def __init__(self
):data
= tf
.keras
.datasets
.mnist
(self
.train_data
, self
.train_label
),(self
.test_data
, self
.test_label
) = data
.load_data
()self
.train_data
= np
.expand_dims
(self
.train_data
.astype
(np
.float32
) / 255.0, axis
=-1)self
.test_data
= np
.expand_dims
(self
.test_data
.astype
(np
.float32
) / 255.0, axis
=-1)self
.train_label
= self
.train_label
.astype
(np
.int32
)self
.test_label
= self
.test_label
.astype
(np
.int32
)self
.num_train_data
, self
.num_test_data
= self
.train_data
.shape
[0], self
.test_data
.shape
[0]def get_batch(self
, batch_size
):idx
= np
.random
.randint
(0, self
.num_train_data
, batch_size
)return self
.train_data
[idx
, :], self
.train_label
[idx
]
class MLPmodel(tf
.keras
.Model
):def __init__(self
):super().__init__
()self
.flatten
= tf
.keras
.layers
.Flatten
()self
.dense1
= tf
.keras
.layers
.Dense
(units
=100, activation
='relu')self
.dense2
= tf
.keras
.layers
.Dense
(units
=10)def call(self
, input):x
= self
.flatten
(input)x
= self
.dense1
(x
)x
= self
.dense2
(x
)output
= tf
.nn
.softmax
(x
)return output
num_epochs
= 5
batch_size
= 50
learning_rate
= 1e-4
mymodel
= MLPmodel
()
data_loader
= MNistLoader
()
optimizer
= tf
.keras
.optimizers
.Adam
(learning_rate
=learning_rate
)num_batches
= int(data_loader
.num_train_data
//batch_size
* num_epochs
)
for idx
in range(num_batches
):X
,y
= data_loader
.get_batch
(batch_size
)with tf
.GradientTape
() as tape
: y_pred
= mymodel
(X
) loss
= tf
.keras
.losses
.sparse_categorical_crossentropy
(y_true
=y
, y_pred
=y_pred
)loss
= tf
.reduce_mean
(loss
)print("batch {}, loss {}".format(idx
, loss
.numpy
()))grads
= tape
.gradient
(loss
, mymodel
.variables
)optimizer
.apply_gradients
(grads_and_vars
=zip(grads
, mymodel
.variables
))
sparse_categorical_accuracy
= tf
.keras
.metrics
.SparseCategoricalAccuracy
()
num_batches
= int(data_loader
.num_test_data
// batch_size
)
for idx
in range(num_batches
):start
, end
= idx
*batch_size
, (idx
+1)*batch_sizey_pred
= mymodel
.predict
(data_loader
.test_data
[start
: end
])sparse_categorical_accuracy
.update_state
(y_true
=data_loader
.test_label
[start
:end
],y_pred
=y_pred
)
print("test 準(zhǔn)確率:{}".format(sparse_categorical_accuracy
.result
()))
總結(jié)
以上是生活随笔為你收集整理的TensorFlow 2.0 - 自定义模型、训练过程的全部內(nèi)容,希望文章能夠幫你解決所遇到的問題。
如果覺得生活随笔網(wǎng)站內(nèi)容還不錯,歡迎將生活随笔推薦給好友。