TensorFlow.org에서 보기 | Google Colab에서 실행 | GitHub에서 소스 보기 | 노트북 다운로드 | TF Hub 모델보기 |
이 노트북은 TF Hub에서 사용할 수 있는 BigGAN 이미지 생성기의 데모입니다.
이러한 모델에 대한 자세한 내용은 arXiv에 관한 BigGAN 논문 [1]을 참조하세요.
런타임에 연결한 후 다음 지침에 따라 시작합니다.
- (선택 사항) 다른 이미지 해상도에 대한 BigGAN 생성기를 로드하려면 아래 첫 번째 코드 셀에서 선택한
module_path
를 업데이트합니다. - Runtime > Run all을 클릭하여 각 셀을 순서대로 실행합니다.
- 나중에 슬라이더와 드롭다운 메뉴를 사용하여 설정을 수정하면 대화형 시각화가 자동으로 업데이트됩니다.
- 그렇지 않으면 셀 옆의 Play 버튼을 눌러 출력을 수동으로 다시 렌더링합니다.
참고: 문제가 발생하는 경우, Runtime > Restart and run all...을 클릭하여 런타임을 다시 시작하고 모든 셀을 처음부터 다시 실행하면 도움이 될 수 있습니다.
[1] Andrew Brock, Jeff Donahue, and Karen Simonyan. Large Scale GAN Training for High Fidelity Natural Image Synthesis.
먼저 모듈 경로를 설정합니다. 기본적으로 <a href="https://tfhub.dev/deepmind/biggan-deep-256/1">https://tfhub.dev/deepmind/biggan-deep-256/1</a>
에서 256x256 이미지의 BigGAN-deep 생성기를 로드합니다. 128x128 또는 512x512 이미지를 생성하거나 원래 BigGAN 생성기를 사용하려면 활성 module_path
설정을 주석 처리하고 다른 설정에 대한 주석 처리를 제거합니다.
# BigGAN-deep models
# module_path = 'https://tfhub.dev/deepmind/biggan-deep-128/1' # 128x128 BigGAN-deep
module_path = 'https://tfhub.dev/deepmind/biggan-deep-256/1' # 256x256 BigGAN-deep
# module_path = 'https://tfhub.dev/deepmind/biggan-deep-512/1' # 512x512 BigGAN-deep
# BigGAN (original) models
# module_path = 'https://tfhub.dev/deepmind/biggan-128/2' # 128x128 BigGAN
# module_path = 'https://tfhub.dev/deepmind/biggan-256/2' # 256x256 BigGAN
# module_path = 'https://tfhub.dev/deepmind/biggan-512/2' # 512x512 BigGAN
설정
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
import os
import io
import IPython.display
import numpy as np
import PIL.Image
from scipy.stats import truncnorm
import tensorflow_hub as hub
2022-12-14 22:14:16.695225: W tensorflow/compiler/xla/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 'libnvinfer.so.7'; dlerror: libnvinfer.so.7: cannot open shared object file: No such file or directory 2022-12-14 22:14:16.695341: W tensorflow/compiler/xla/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 'libnvinfer_plugin.so.7'; dlerror: libnvinfer_plugin.so.7: cannot open shared object file: No such file or directory 2022-12-14 22:14:16.695353: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Cannot dlopen some TensorRT libraries. If you would like to use Nvidia GPU with TensorRT, please make sure the missing libraries mentioned above are installed properly. WARNING:tensorflow:From /tmpfs/src/tf_docs_env/lib/python3.9/site-packages/tensorflow/python/compat/v2_compat.py:107: disable_resource_variables (from tensorflow.python.ops.variable_scope) is deprecated and will be removed in a future version. Instructions for updating: non-resource variables are not supported in the long term
TF Hub에서 BigGAN 생성기 모듈 로드하기
tf.reset_default_graph()
print('Loading BigGAN module from:', module_path)
module = hub.Module(module_path)
inputs = {k: tf.placeholder(v.dtype, v.get_shape().as_list(), k)
for k, v in module.get_input_info_dict().items()}
output = module(inputs)
print()
print('Inputs:\n', '\n'.join(
' {}: {}'.format(*kv) for kv in inputs.items()))
print()
print('Output:', output)
Loading BigGAN module from: https://tfhub.dev/deepmind/biggan-deep-256/1 INFO:tensorflow:Saver not created because there are no variables in the graph to restore INFO:tensorflow:Saver not created because there are no variables in the graph to restore Inputs: z: Tensor("z:0", shape=(?, 128), dtype=float32) y: Tensor("y:0", shape=(?, 1000), dtype=float32) truncation: Tensor("truncation:0", shape=(), dtype=float32) Output: Tensor("module_apply_default/G_trunc_output:0", shape=(?, 256, 256, 3), dtype=float32)
BigGAN 이미지 샘플링 및 표시를 위한 몇 가지 함수 정의하기
input_z = inputs['z']
input_y = inputs['y']
input_trunc = inputs['truncation']
dim_z = input_z.shape.as_list()[1]
vocab_size = input_y.shape.as_list()[1]
def truncated_z_sample(batch_size, truncation=1., seed=None):
state = None if seed is None else np.random.RandomState(seed)
values = truncnorm.rvs(-2, 2, size=(batch_size, dim_z), random_state=state)
return truncation * values
def one_hot(index, vocab_size=vocab_size):
index = np.asarray(index)
if len(index.shape) == 0:
index = np.asarray([index])
assert len(index.shape) == 1
num = index.shape[0]
output = np.zeros((num, vocab_size), dtype=np.float32)
output[np.arange(num), index] = 1
return output
def one_hot_if_needed(label, vocab_size=vocab_size):
label = np.asarray(label)
if len(label.shape) <= 1:
label = one_hot(label, vocab_size)
assert len(label.shape) == 2
return label
def sample(sess, noise, label, truncation=1., batch_size=8,
vocab_size=vocab_size):
noise = np.asarray(noise)
label = np.asarray(label)
num = noise.shape[0]
if len(label.shape) == 0:
label = np.asarray([label] * num)
if label.shape[0] != num:
raise ValueError('Got # noise samples ({}) != # label samples ({})'
.format(noise.shape[0], label.shape[0]))
label = one_hot_if_needed(label, vocab_size)
ims = []
for batch_start in range(0, num, batch_size):
s = slice(batch_start, min(num, batch_start + batch_size))
feed_dict = {input_z: noise[s], input_y: label[s], input_trunc: truncation}
ims.append(sess.run(output, feed_dict=feed_dict))
ims = np.concatenate(ims, axis=0)
assert ims.shape[0] == num
ims = np.clip(((ims + 1) / 2.0) * 256, 0, 255)
ims = np.uint8(ims)
return ims
def interpolate(A, B, num_interps):
if A.shape != B.shape:
raise ValueError('A and B must have the same shape to interpolate.')
alphas = np.linspace(0, 1, num_interps)
return np.array([(1-a)*A + a*B for a in alphas])
def imgrid(imarray, cols=5, pad=1):
if imarray.dtype != np.uint8:
raise ValueError('imgrid input imarray must be uint8')
pad = int(pad)
assert pad >= 0
cols = int(cols)
assert cols >= 1
N, H, W, C = imarray.shape
rows = N // cols + int(N % cols != 0)
batch_pad = rows * cols - N
assert batch_pad >= 0
post_pad = [batch_pad, pad, pad, 0]
pad_arg = [[0, p] for p in post_pad]
imarray = np.pad(imarray, pad_arg, 'constant', constant_values=255)
H += pad
W += pad
grid = (imarray
.reshape(rows, cols, H, W, C)
.transpose(0, 2, 1, 3, 4)
.reshape(rows*H, cols*W, C))
if pad:
grid = grid[:-pad, :-pad]
return grid
def imshow(a, format='png', jpeg_fallback=True):
a = np.asarray(a, dtype=np.uint8)
data = io.BytesIO()
PIL.Image.fromarray(a).save(data, format)
im_data = data.getvalue()
try:
disp = IPython.display.display(IPython.display.Image(im_data))
except IOError:
if jpeg_fallback and format != 'jpeg':
print(('Warning: image was too large to display in format "{}"; '
'trying jpeg instead.').format(format))
return imshow(a, format='jpeg')
else:
raise
return disp
TensorFlow 세션 생성 및 변수 초기화하기
initializer = tf.global_variables_initializer()
sess = tf.Session()
sess.run(initializer)
특정 범주의 BigGAN 샘플 탐색하기
truncation
값을 변경해 봅니다.
(코드를 보려면 셀을 두 번 클릭합니다.)
Category-conditional sampling
num_samples = 10
truncation = 0.4
noise_seed = 0
category = "933) cheeseburger"
z = truncated_z_sample(num_samples, truncation, noise_seed)
y = int(category.split(')')[0])
ims = sample(sess, z, y, truncation=truncation)
imshow(imgrid(ims, cols=min(num_samples, 5)))
BigGAN 샘플 간에 보간하기
같은 noise_seed
로 다른 category
를 설정하거나 다른 noise_seed
로 같은 category
를 설정해 봅니다. 또는 더 과감하게 둘 모두 원하는 대로 설정해 보세요!
(Double-click on the cell to view code.)
Interpolation
num_samples = 2
num_interps = 5
truncation = 0.2
noise_seed_A = 0
category_A = "207) golden retriever"
noise_seed_B = 0
category_B = "8) hen"
def interpolate_and_shape(A, B, num_interps):
interps = interpolate(A, B, num_interps)
return (interps.transpose(1, 0, *range(2, len(interps.shape)))
.reshape(num_samples * num_interps, *interps.shape[2:]))
z_A, z_B = [truncated_z_sample(num_samples, truncation, noise_seed)
for noise_seed in [noise_seed_A, noise_seed_B]]
y_A, y_B = [one_hot([int(category.split(')')[0])] * num_samples)
for category in [category_A, category_B]]
z_interp = interpolate_and_shape(z_A, z_B, num_interps)
y_interp = interpolate_and_shape(y_A, y_B, num_interps)
ims = sample(sess, z_interp, y_interp, truncation=truncation)
imshow(imgrid(ims, cols=num_interps))