from absl import loggingimport imageioimport PIL.Imageimport matplotlib.pyplot as pltimport numpy as npimport tensorflow as tftf.random.set_seed(0)import tensorflow_hub as hubfrom tensorflow_docs.vis import embedimport timetry:from google.colab import filesexceptImportError:passfrom IPython import displayfrom skimage import transform# We could retrieve this value from module.get_input_shapes() if we didn't know# beforehand which module we will be using.latent_dim =512# Interpolates between two vectors that are non-zero and don't both lie on a# line going through origin. First normalizes v2 to have the same norm as v1. # Then interpolates between the two vectors on the hypersphere.def interpolate_hypersphere(v1, v2, num_steps): v1_norm = tf.norm(v1) v2_norm = tf.norm(v2) v2_normalized = v2 * (v1_norm / v2_norm) vectors = []for step inrange(num_steps): interpolated = v1 + (v2_normalized - v1) * step / (num_steps -1) interpolated_norm = tf.norm(interpolated) interpolated_normalized = interpolated * (v1_norm / interpolated_norm) vectors.append(interpolated_normalized)return tf.stack(vectors)# Simple way to display an image.def display_image(image, fn ='image.jpeg'): image = tf.constant(image) image = tf.image.convert_image_dtype(image, tf.uint8) PIL.Image.fromarray(image.numpy()).save(fn)return embed.embed_file(fn)# Given a set of images, show an animation.def animate(images, fn ='animation.gif'): images = np.array(images) converted_images = np.clip(images *255, 0, 255).astype(np.uint8) imageio.mimsave(fn, converted_images)return embed.embed_file(fn)logging.set_verbosity(logging.ERROR)
1 Latent space interpolation
Latent space interpolation between two randomly initialized vectors. We will use a TF Hub module progan-128 that contains a pre-trained Progressive GAN.
After defining a loss function between the target image and the image generated by a latent space variable, we can use gradient descent to find variable values that minimize the loss.
def find_closest_latent_vector(initial_vector, num_optimization_steps, steps_per_image): images = [] losses = [] vector = tf.Variable(initial_vector) optimizer = tf.optimizers.Adam(learning_rate =0.01) loss_fn = tf.losses.MeanAbsoluteError(reduction ="sum")for step inrange(num_optimization_steps):if (step %100) ==0:print()print('.', end ='')with tf.GradientTape() as tape: image = progan(vector.read_value())['default'][0]if (step % steps_per_image) ==0: images.append(image.numpy()) target_image_difference = loss_fn(image, target_image[:,:,:3])# The latent vectors were sampled from a normal distribution. We can get# more realistic images if we regularize the length of the latent vector to # the average length of vector from this distribution. regularizer = tf.abs(tf.norm(vector) - np.sqrt(latent_dim)) loss = target_image_difference + regularizer losses.append(loss.numpy()) grads = tape.gradient(loss, [vector]) optimizer.apply_gradients(zip(grads, [vector]))return images, lossesnum_optimization_steps =200steps_per_image =5images, loss = find_closest_latent_vector( initial_vector, num_optimization_steps, steps_per_image )