!pip install tensorflow-gpu
import numpy as np
def fizz_buzz(x):
"""fizz buzz の結果を返す関数.
>> print(fizz_buzz(np.arange(1, 16, 1)))
['1' '2' 'Fizz' '4' 'Buzz' 'Fizz' '7' '8' 'Fizz' 'Buzz' '11' 'Fizz' '13'
'14' 'FizzBuzz']
Args:
x(ndarray): fizz buzz 問題の対象
numpy配列のtypeはint を想定
この配列の次元数は問わない
また, 値は連続値である必要はない.
負の値の場合, pythonの% 演算結果に従う(エラー等は発生しない)
Return:
result(ndarray): fizz buzz の結果を格納した numpy配列(dtype=str)
"""
result = x.astype("str")
result[x % 3 == 0] = "Fizz"
result[x % 5 == 0] = "Buzz"
result[x % 15 == 0] = "FizzBuzz"
return result
print(fizz_buzz(np.arange(1, 16, 1)))
print(fizz_buzz(np.arange(100, 200).reshape(10, 10)))
['1' '2' 'Fizz' '4' 'Buzz' 'Fizz' '7' '8' 'Fizz' 'Buzz' '11' 'Fizz' '13'
'14' 'FizzBuzz']
[['Buzz' '101' 'Fizz' '103' '104' 'FizzBuzz' '106' '107' 'Fizz' '109']
['Buzz' 'Fizz' '112' '113' 'Fizz' 'Buzz' '116' 'Fizz' '118' '119']
['FizzBuzz' '121' '122' 'Fizz' '124' 'Buzz' 'Fizz' '127' '128' 'Fizz']
['Buzz' '131' 'Fizz' '133' '134' 'FizzBuzz' '136' '137' 'Fizz' '139']
['Buzz' 'Fizz' '142' '143' 'Fizz' 'Buzz' '146' 'Fizz' '148' '149']
['FizzBuzz' '151' '152' 'Fizz' '154' 'Buzz' 'Fizz' '157' '158' 'Fizz']
['Buzz' '161' 'Fizz' '163' '164' 'FizzBuzz' '166' '167' 'Fizz' '169']
['Buzz' 'Fizz' '172' '173' 'Fizz' 'Buzz' '176' 'Fizz' '178' '179']
['FizzBuzz' '181' '182' 'Fizz' '184' 'Buzz' 'Fizz' '187' '188' 'Fizz']
['Buzz' '191' 'Fizz' '193' '194' 'FizzBuzz' '196' '197' 'Fizz' '199']]
def to_class_indices(x, class_indices):
"""fizz buzz の結果をクラスインデックスに変換する関数
>> class_indices = dict(zip(("Fizz", "Buzz", "FizzBuzz", "Num"), range(4)))
>> x = np.arange(1, 16, 1)
>> x = fizz_buzz(x)
>> x = to_class_indices(x, class_indices=class_indices)
>> print(x)
[3 3 0 3 1 0 3 3 0 1 3 0 3 3 2]
Args:
x(ndarray): fizz buzz の結果を格納したnumpy配列(dtype=str)
fizz_buzz(x)の戻り値を想定
class_indices(dict): クラス名からクラスインデックスへのマッピングを含む辞書
ただし, クラス"Num" は最も大きいクラスインデックスにすること
Returns:
result(ndarray): クラスインデックスを格納したnumpy配列(dtype=int)
"""
result = np.zeros_like(x, dtype="int") + 3
for k, v in list(class_indices.items())[:-1]:
result[x == k] = v
return result
class_indices = dict(zip(("Fizz", "Buzz", "FizzBuzz", "Num"), range(4)))
x = np.arange(1, 16, 1)
x = fizz_buzz(x)
x = to_class_indices(x, class_indices=class_indices)
print(x)
[3 3 0 3 1 0 3 3 0 1 3 0 3 3 2]
def to_str(x, class_indices, num_array=None):
result = (np.zeros_like(x, dtype="int") - 1).astype("str")
if num_array is None:
num_array = np.arange(1, x.size + 1, 1)
for k, v in list(class_indices.items())[:-1]:
result[x == v] = k
result[result == "-1"] = num_array[result == "-1"]
return result
def binary_encode(x, num_digits=None):
x_size = x.size
if num_digits is None:
num_digits = len(format(np.max(x), "b"))
result = np.zeros((x_size, num_digits), dtype="int")
for idx in range(num_digits):
d = num_digits - idx - 1
result[:, d] = x >> idx & 1
return result
from tensorflow.keras.utils import to_categorical
class_indices = dict(zip(("Fizz", "Buzz", "FizzBuzz", "Num"), range(4)))
data = np.arange(1, 10001, 1)
x = binary_encode(data)
print(x.shape)
y = fizz_buzz(data)
y = to_class_indices(y, class_indices)
y = to_categorical(y)
print(y.shape)
x_test = x[:100]
y_test = y[:100]
x_train = x[100:9000]
y_train = y[100:9000]
x_val = x[9000:]
y_val = y[9000:]
(10000, 14)
(10000, 4)
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.optimizers import Adam
model = Sequential()
model.add(Dense(1000, input_dim=x.shape[1], activation="relu"))
model.add(Dense(100, activation="relu"))
model.add(Dense(4, activation="softmax"))
model.compile(
loss="categorical_crossentropy",
optimizer=Adam(lr=0.01),
metrics=["accuracy"]
)
model.summary()
n_epoch = 200
history = model.fit(
x_train,
y_train,
batch_size=128,
epochs=n_epoch,
validation_data=(x_val, y_val),
verbose=2
)
score = model.evaluate(x_test, y_test, verbose=0)
print("Test loss:", score[0])
print("Test accuracy:", score[1])
p = model.predict_classes(x_test)
print(to_str(p, class_indices))
Model: "sequential"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
dense (Dense) (None, 1000) 15000
_________________________________________________________________
dense_1 (Dense) (None, 100) 100100
_________________________________________________________________
dense_2 (Dense) (None, 4) 404
=================================================================
Total params: 115,504
Trainable params: 115,504
Non-trainable params: 0
_________________________________________________________________
Train on 8900 samples, validate on 1000 samples
Epoch 1/200
8900/8900 - 1s - loss: 1.1677 - accuracy: 0.5188 - val_loss: 1.1420 - val_accuracy: 0.5330
Epoch 2/200
8900/8900 - 0s - loss: 1.1442 - accuracy: 0.5334 - val_loss: 1.1420 - val_accuracy: 0.5330
...
Epoch 198/200
8900/8900 - 0s - loss: 1.1674e-04 - accuracy: 1.0000 - val_loss: 0.5460 - val_accuracy: 0.9320
Epoch 199/200
8900/8900 - 0s - loss: 1.1218e-04 - accuracy: 1.0000 - val_loss: 0.5280 - val_accuracy: 0.9330
Epoch 200/200
8900/8900 - 0s - loss: 1.0888e-04 - accuracy: 1.0000 - val_loss: 0.5399 - val_accuracy: 0.9320
Test loss: 0.03889203814440407
Test accuracy: 0.99
['1' '2' 'Fizz' '4' 'Buzz' 'Fizz' '7' '8' 'Fizz' 'Buzz' '11' 'Fizz' '13'
'14' 'FizzBuzz' '16' '17' 'Fizz' '19' 'Buzz' 'Fizz' '22' '23' 'Fizz' '25'
'26' 'Fizz' '28' '29' 'FizzBuzz' '31' '32' 'Fizz' '34' 'Buzz' 'Fizz' '37'
'38' 'Fizz' 'Buzz' '41' 'Fizz' '43' '44' 'FizzBuzz' '46' '47' 'Fizz' '49'
'Buzz' 'Fizz' '52' '53' 'Fizz' 'Buzz' '56' 'Fizz' '58' '59' 'FizzBuzz'
'61' '62' 'Fizz' '64' 'Buzz' 'Fizz' '67' '68' 'Fizz' 'Buzz' '71' 'Fizz'
'73' '74' 'FizzBuzz' '76' '77' 'Fizz' '79' 'Buzz' 'Fizz' '82' '83' 'Fizz'
'Buzz' '86' 'Fizz' '88' '89' 'FizzBuzz' '91' '92' 'Fizz' '94' 'Buzz'
'Fizz' '97' '98' 'Fizz' 'Buzz']
import matplotlib.pyplot as plt
%matplotlib inline
x = np.arange(1, n_epoch+1)
train_acc = history.history['accuracy']
train_loss = history.history['loss']
val_acc = history.history['val_accuracy']
val_loss = history.history['val_loss']
fig = plt.figure(figsize=(6, 8))
ax1 = fig.add_subplot(2, 1, 1)
ax1.plot(x, train_acc, linestyle="-", label="train")
ax1.plot(x, val_acc, linestyle=":", label="validation")
ax1.set_xlabel("Epochs")
ax1.set_ylabel("Accuracy")
ax1.set_title("Accuracy")
ax1.legend()
ax2 = fig.add_subplot(2, 1, 2)
ax2.plot(x, train_loss, linestyle="-", label="train")
ax2.plot(x, val_loss, linestyle=":", label="validation")
ax2.set_xlabel("Epochs")
ax2.set_ylabel("Loss")
ax2.set_title("Loss")
ax2.legend()
plt.tight_layout()
plt.show()
model.save("fizz_buzz.h5")
from tensorflow.keras.models import load_model
model = load_model("fizz_buzz.h5")
data = np.arange(1, 9000, 1)
x = binary_encode(data, 14)
p = model.predict_classes(x)
p = to_str(p, class_indices, data)
print(p)
y = fizz_buzz(data).astype("str")
acc = np.sum(y == p) / y.size
print("acc:", acc)
['1' '2' 'Fizz' ... 'Fizz' '8998' '8999']
acc: 0.999888876541838