生活随笔
收集整理的這篇文章主要介紹了
GAN生成对抗网络-CGAN原理与基本实现-条件生成对抗网络04
小編覺得挺不錯的,現在分享給大家,幫大家做個參考.
CGAN - 條件GAN
原始GAN的缺點
代碼實現
import tensorflow
as tf
from tensorflow
import keras
from tensorflow
. keras
import layers
import matplotlib
. pyplot
as plt
% matplotlib inline
import numpy
as np
import glob
gpu
= tf
. config
. experimental
. list_physical_devices
( device_type
= 'GPU' )
tf
. config
. experimental
. set_memory_growth
( gpu
[ 0 ] , True )
import tensorflow
. keras
. datasets
. mnist
as mnist
( train_image
, train_label
) , ( _
, _
) = mnist
. load_data
( )
train_image
= train_image
/ 127.5 - 1
train_image
= np
. expand_dims
( train_image
, - 1 )
train_image
. shape
dataset
= tf
. data
. Dataset
. from_tensor_slices
( ( train_image
, train_label
) )
AUTOTUNE
= tf
. data
. experimental
. AUTOTUNE
BATCH_SIZE
= 256
image_count
= train_image
. shape
[ 0 ]
noise_dim
= 50
dataset
= dataset
. shuffle
( image_count
) . batch
( BATCH_SIZE
)
def generator_model ( ) : seed
= layers
. Input
( shape
= ( ( noise_dim
, ) ) ) label
= layers
. Input
( shape
= ( ( ) ) ) x
= layers
. Embedding
( 10 , 50 , input_length
= 1 ) ( label
) x
= layers
. Flatten
( ) ( x
) x
= layers
. concatenate
( [ seed
, x
] ) x
= layers
. Dense
( 3 * 3 * 128 , use_bias
= False ) ( x
) x
= layers
. Reshape
( ( 3 , 3 , 128 ) ) ( x
) x
= layers
. BatchNormalization
( ) ( x
) x
= layers
. ReLU
( ) ( x
) x
= layers
. Conv2DTranspose
( 64 , ( 3 , 3 ) , strides
= ( 2 , 2 ) , use_bias
= False ) ( x
) x
= layers
. BatchNormalization
( ) ( x
) x
= layers
. ReLU
( ) ( x
)
x
= layers
. Conv2DTranspose
( 32 , ( 3 , 3 ) , strides
= ( 2 , 2 ) , padding
= 'same' , use_bias
= False ) ( x
) x
= layers
. BatchNormalization
( ) ( x
) x
= layers
. ReLU
( ) ( x
) x
= layers
. Conv2DTranspose
( 1 , ( 3 , 3 ) , strides
= ( 2 , 2 ) , padding
= 'same' , use_bias
= False ) ( x
) x
= layers
. Activation
( 'tanh' ) ( x
) model
= tf
. keras
. Model
( inputs
= [ seed
, label
] , outputs
= x
)
return model
def discriminator_model ( ) : image
= tf
. keras
. Input
( shape
= ( ( 28 , 28 , 1 ) ) ) label
= tf
. keras
. Input
( shape
= ( ( ) ) ) x
= layers
. Embedding
( 10 , 28 * 28 , input_length
= 1 ) ( label
) x
= layers
. Reshape
( ( 28 , 28 , 1 ) ) ( x
) x
= layers
. concatenate
( [ image
, x
] ) x
= layers
. Conv2D
( 32 , ( 3 , 3 ) , strides
= ( 2 , 2 ) , padding
= 'same' , use_bias
= False ) ( x
) x
= layers
. BatchNormalization
( ) ( x
) x
= layers
. LeakyReLU
( ) ( x
) x
= layers
. Dropout
( 0.5 ) ( x
) x
= layers
. Conv2D
( 32 * 2 , ( 3 , 3 ) , strides
= ( 2 , 2 ) , padding
= 'same' , use_bias
= False ) ( x
) x
= layers
. BatchNormalization
( ) ( x
) x
= layers
. LeakyReLU
( ) ( x
) x
= layers
. Dropout
( 0.5 ) ( x
) x
= layers
. Conv2D
( 32 * 4 , ( 3 , 3 ) , strides
= ( 2 , 2 ) , padding
= 'same' , use_bias
= False ) ( x
) x
= layers
. BatchNormalization
( ) ( x
) x
= layers
. LeakyReLU
( ) ( x
) x
= layers
. Dropout
( 0.5 ) ( x
) x
= layers
. Flatten
( ) ( x
) x1
= layers
. Dense
( 1 ) ( x
) model
= tf
. keras
. Model
( inputs
= [ image
, label
] , outputs
= x1
) return model
generator
= generator_model
( )
discriminator
= discriminator_model
( )
binary_cross_entropy
= tf
. keras
. losses
. BinaryCrossentropy
( from_logits
= True )
category_cross_entropy
= tf
. keras
. losses
. SparseCategoricalCrossentropy
( from_logits
= True )
def discriminator_loss ( real_output
, fake_output
) : real_loss
= binary_cross_entropy
( tf
. ones_like
( real_output
) , real_output
) fake_loss
= binary_cross_entropy
( tf
. zeros_like
( fake_output
) , fake_output
) total_loss
= real_loss
+ fake_loss
return total_loss
def generator_loss ( fake_output
) : fake_loss
= binary_cross_entropy
( tf
. ones_like
( fake_output
) , fake_output
) return fake_loss
generator_optimizer
= tf
. keras
. optimizers
. Adam
( 1e - 5 )
discriminator_optimizer
= tf
. keras
. optimizers
. Adam
( 1e - 5 )
@tf
. function
def train_step ( images
, labels
) : batchsize
= labels
. shape
[ 0 ] noise
= tf
. random
. normal
( [ batchsize
, noise_dim
] ) with tf
. GradientTape
( ) as gen_tape
, tf
. GradientTape
( ) as disc_tape
: generated_images
= generator
( ( noise
, labels
) , training
= True ) real_output
= discriminator
( ( images
, labels
) , training
= True ) fake_output
= discriminator
( ( generated_images
, labels
) , training
= True ) gen_loss
= generator_loss
( fake_output
) disc_loss
= discriminator_loss
( real_output
, fake_output
) gradients_of_generator
= gen_tape
. gradient
( gen_loss
, generator
. trainable_variables
) gradients_of_discriminator
= disc_tape
. gradient
( disc_loss
, discriminator
. trainable_variables
) generator_optimizer
. apply_gradients
( zip ( gradients_of_generator
, generator
. trainable_variables
) ) discriminator_optimizer
. apply_gradients
( zip ( gradients_of_discriminator
, discriminator
. trainable_variables
) )
noise_dim
= 50
num
= 10
noise_seed
= tf
. random
. normal
( [ num
, noise_dim
] )
cat_seed
= np
. random
. randint
( 0 , 10 , size
= ( num
, 1 ) )
print ( cat_seed
. T
)
def generate_images ( model
, test_noise_input
, test_cat_input
, epoch
) : print ( 'Epoch:' , epoch
+ 1 ) predictions
= model
( ( test_noise_input
, test_cat_input
) , training
= False ) predictions
= tf
. squeeze
( predictions
) fig
= plt
. figure
( figsize
= ( 10 , 1 ) ) for i
in range ( predictions
. shape
[ 0 ] ) : plt
. subplot
( 1 , 10 , i
+ 1 ) plt
. imshow
( ( predictions
[ i
, : , : ] + 1 ) / 2 ) plt
. axis
( 'off' ) plt
. show
( )
def train ( dataset
, epochs
) : for epoch
in range ( epochs
) : for image_batch
, label_batch
in dataset
: train_step
( image_batch
, label_batch
) if epoch
% 10 == 0 : generate_images
( generator
, noise_seed
, cat_seed
, epoch
) generate_images
( generator
, noise_seed
, cat_seed
, epoch
)
EPOCHS
= 200
train
( dataset
, EPOCHS
)
總結
以上是生活随笔 為你收集整理的GAN生成对抗网络-CGAN原理与基本实现-条件生成对抗网络04 的全部內容,希望文章能夠幫你解決所遇到的問題。
如果覺得生活随笔 網站內容還不錯,歡迎將生活随笔 推薦給好友。