生活随笔
收集整理的這篇文章主要介紹了
GAN生成对抗网络-DCGAN原理与基本实现-深度卷积生成对抗网络03
小編覺得挺不錯的,現在分享給大家,幫大家做個參考.
什么是DCGAN
實現代碼
import tensorflow
as tf
from tensorflow
import keras
from tensorflow
.keras
import layers
import matplotlib
.pyplot
as plt
%matplotlib inline
import numpy
as np
import glob
import os
gpus
= tf
.config
.experimental
.list_physical_devices
(device_type
='GPU')
for gpu
in gpus
:tf
.config
.experimental
.set_memory_growth
(gpu
,True)
gpu_ok
= tf
.test
.is_gpu_available
()
print("tf version:", tf
.__version__
)
print("use GPU", gpu_ok
)
(train_images
,train_labels
),(test_images
,test_labels
) = tf
.keras
.datasets
.mnist
.load_data
()
train_images
= train_images
.reshape
(train_images
.shape
[0],28,28,1).astype
("float32")
train_images
= (train_images
-127.5)/127.5
BATCH_SIZE
= 256
BUFFER_SIZE
= 60000
datasets
= tf
.data
.Dataset
.from_tensor_slices
(train_images
)
datasets
= datasets
.shuffle
(BUFFER_SIZE
).batch
(BATCH_SIZE
)
編寫模型
def generator_model():model
= keras
.Sequential
() model
.add
(layers
.Dense
(7*7*256,input_shape
=(100,),use_bias
=False)) model
.add
(layers
.BatchNormalization
()) model
.add
(layers
.LeakyReLU
())model
.add
(layers
.Reshape
((7,7,256))) model
.add
(layers
.Conv2DTranspose
(128,(5,5),strides
=(1,1),padding
="same",use_bias
=False)) model
.add
(layers
.BatchNormalization
()) model
.add
(layers
.LeakyReLU
())model
.add
(layers
.Conv2DTranspose
(64,(5,5),strides
=(2,2),padding
="same",use_bias
=False)) model
.add
(layers
.BatchNormalization
()) model
.add
(layers
.LeakyReLU
())model
.add
(layers
.Conv2DTranspose
(1,(5,5),strides
=(2,2),padding
="same",use_bias
=False,activation
="tanh")) return model
def discriminator_model():model
= keras
.Sequential
()model
.add
(layers
.Conv2D
(64,(5,5),strides
=(2,2),padding
="same",input_shape
= (28,28,1)))model
.add
(layers
.LeakyReLU
())model
.add
(layers
.Dropout
(0.3))model
.add
(layers
.Conv2D
(128,(5,5),strides
=(2,2),padding
="same"))model
.add
(layers
.LeakyReLU
())model
.add
(layers
.Dropout
(0.3))model
.add
(layers
.Conv2D
(256,(5,5),strides
=(2,2),padding
="same"))model
.add
(layers
.LeakyReLU
())model
.add
(layers
.Flatten
())model
.add
(layers
.Dense
(1))return model
cross_entropy
= tf
.keras
.losses
.BinaryCrossentropy
(from_logits
=True)
def discriminator_loss(real_out
,fake_out
):read_loss
= cross_entropy
(tf
.ones_like
(real_out
),real_out
) fake_loss
= cross_entropy
(tf
.zeros_like
(fake_out
),fake_out
) return read_loss
+ fake_loss
def generator_loss(fake_out
):return cross_entropy
(tf
.ones_like
(fake_out
),fake_out
)
generator_opt
= tf
.keras
.optimizers
.Adam
(1e-4)
discriminator_opt
= tf
.keras
.optimizers
.Adam
(1e-4)
EPOCHS
= 100
noise_dim
= 100 num_exp_to_generate
= 16seed
= tf
.random
.normal
([num_exp_to_generate
,noise_dim
])
generator
= generator_model
()
discriminator
= discriminator_model
()
def train_step(images
):noise
= tf
.random
.normal
([BATCH_SIZE
,noise_dim
])with tf
.GradientTape
() as gen_tape
,tf
.GradientTape
() as disc_tape
: real_out
= discriminator
(images
,training
=True)gen_image
= generator
(noise
,training
=True)fake_out
= discriminator
(gen_image
,training
=True)gen_loss
= generator_loss
(fake_out
)disc_loss
= discriminator_loss
(real_out
,fake_out
)gradient_gen
= gen_tape
.gradient
(gen_loss
,generator
.trainable_variables
)gradient_disc
= disc_tape
.gradient
(disc_loss
,discriminator
.trainable_variables
)generator_opt
.apply_gradients
(zip(gradient_gen
,generator
.trainable_variables
))discriminator_opt
.apply_gradients
(zip(gradient_disc
,discriminator
.trainable_variables
))
def genrate_plot_image(gen_model
,test_noise
):pre_images
= gen_model
(test_noise
,training
=False)fig
= plt
.figure
(figsize
=(4,4))for i
in range(pre_images
.shape
[0]):plt
.subplot
(4,4,i
+1)plt
.imshow
((pre_images
[i
,:,:,0]+1)/2,cmap
="gray")plt
.axis
("off")plt
.show
()
def train(dataset
,epochs
):for epoch
in range(epochs
):for image_batch
in dataset
:train_step
(image_batch
)print(".",end
="")genrate_plot_image
(generator
,seed
)
train
(datasets
,EPOCHS
)
總結
以上是生活随笔為你收集整理的GAN生成对抗网络-DCGAN原理与基本实现-深度卷积生成对抗网络03的全部內容,希望文章能夠幫你解決所遇到的問題。
如果覺得生活随笔網站內容還不錯,歡迎將生活随笔推薦給好友。