只需要提供相关仅有素描的图片,系统通过算法可以进行图片的上色。
模型训练画稿进行筛选最适合画稿的颜色进行上色。
import tensorflow as tf
import keras
import numpy as np
from config import *
from keras.models import load_model
def ToGray(x):
R = x[:, :, :, 0:1]
G = x[:, :, :, 1:2]
B = x[:, :, :, 2:3]
return 0.30 * R + 0.59 * G + 0.11 * B
def RGB2YUV(x):
R = x[:, :, :, 0:1]
G = x[:, :, :, 1:2]
B = x[:, :, :, 2:3]
Y = 0.299 * R + 0.587 * G + 0.114 * B
U = 0.492 * (B - Y) + 128
V = 0.877 * (R - Y) + 128
return tf.concat([Y, U, V], axis=3)
def YUV2RGB(x):
Y = x[:, :, :, 0:1]
U = x[:, :, :, 1:2]
V = x[:, :, :, 2:3]
R = Y + 1.140 * (V - 128)
G = Y - 0.394 * (U - 128) - 0.581 * (V - 128)
B = Y + 2.032 * (U - 128)
return tf.concat([R, G, B], axis=3)
def VGG2RGB(x):
return (x + [103.939, 116.779, 123.68])[:, :, :, ::-1]
session = keras.backend.get_session()
with tf.device(device_A):
ipa = tf.placeholder(dtype=tf.float32, shape=(None, 1))
ip1 = tf.placeholder(dtype=tf.float32, shape=(None, None, None, 1))
ip3 = tf.placeholder(dtype=tf.float32, shape=(None, None, None, 3))
ip4 = tf.placeholder(dtype=tf.float32, shape=(None, None, None, 4))
ip3x = tf.placeholder(dtype=tf.float32, shape=(None, None, None, 3))
baby = load_model('baby.net')
baby_place = tf.concat([- 512 * tf.ones_like(ip4[:, :, :, 3:4]), 128 * tf.ones_like(ip4[:, :, :, 3:4]), 128 * tf.ones_like(ip4[:, :, :, 3:4])], axis=3)
baby_yuv = RGB2YUV(ip4[:, :, :, 0:3])
baby_alpha = tf.where(x=tf.zeros_like(ip4[:, :, :, 3:4]), y=tf.ones_like(ip4[:, :, :, 3:4]), condition=tf.less(ip4[:, :, :, 3:4], 128))
baby_hint = baby_alpha * baby_yuv + (1 - baby_alpha) * baby_place
baby_op = YUV2RGB(baby(tf.concat([ip1, baby_hint], axis=3)))
girder = load_model('girder.net')
gird_op = (1 - girder([1 - ip1 / 255.0, ip4, 1 - ip3 / 255.0])) * 255.0
reader = load_model('reader.net')
features = reader(ip3 / 255.0)
featuresx = reader(ip3x / 255.0)
head = load_model('head.net')
feed = [1 - ip1 / 255.0, (ip4[:, :, :, 0:3] / 127.5 - 1) * ip4[:, :, :, 3:4] / 255.0]
for _ in range(len(features)):
item = keras.backend.mean(features[_], axis=[1, 2])
itemx = keras.backend.mean(featuresx[_], axis=[1, 2])
feed.append(item * ipa + itemx * (1 - ipa))
nil0, nil1, head_temp = head(feed)
neck = load_model('neck.net')
nil2, nil3, neck_temp = neck(feed)
feed[0] = tf.clip_by_value(1 - tf.image.resize_bilinear(ToGray(VGG2RGB(head_temp) / 255.0), tf.shape(ip1)[1:3]), 0.0, 1.0)
nil4, nil5, head_temp = neck(feed)
head_op = VGG2RGB(head_temp)
neck_op = VGG2RGB(neck_temp)
with tf.device(device_B):
ip3B = tf.placeholder(dtype=tf.float32, shape=(None, None, None, 3))
tail = load_model('tail.net')
pads = 7
tail_op = tail(tf.pad(ip3B / 255.0, [[0, 0], [pads, pads], [pads, pads], [0, 0]], 'REFLECT'))[:, pads*2:-pads*2, pads*2:-pads*2, :] * 255.0
session.run(tf.global_variables_initializer())
tail.load_weights(‘tail.net’)
baby.load_weights(‘baby.net’)
head.load_weights(‘head.net’)
neck.load_weights(‘neck.net’)
girder.load_weights(‘girder.net’)
reader.load_weights(‘reader.net’)
def go_head(sketch, global_hint, local_hint, global_hint_x, alpha):
return session.run(head_op, feed_dict={
ip1: sketch[None, :, :, None], ip3: global_hint[None, :, :, :], ip4: local_hint[None, :, :, :], ip3x: global_hint_x[None, :, :, :], ipa: np.array([alpha])[None, :]
})[0].clip(0, 255).astype(np.uint8)
def go_neck(sketch, global_hint, local_hint, global_hint_x, alpha):
return session.run(neck_op, feed_dict={
ip1: sketch[None, :, :, None], ip3: global_hint[None, :, :, :], ip4: local_hint[None, :, :, :], ip3x: global_hint_x[None, :, :, :], ipa: np.array([alpha])[None, :]
})[0].clip(0, 255).astype(np.uint8)
def go_gird(sketch, latent, hint):
return session.run(gird_op, feed_dict={
ip1: sketch[None, :, :, None], ip3: latent[None, :, :, :], ip4: hint[None, :, :, :]
})[0].clip(0, 255).astype(np.uint8)
def go_tail(x):
return session.run(tail_op, feed_dict={
ip3B: x[None, :, :, :]
})[0].clip(0, 255).astype(np.uint8)
def go_baby(sketch, local_hint):
return session.run(baby_op, feed_dict={
ip1: sketch[None, :, :, None], ip4: local_hint[None, :, :, :]
})[0].clip(0, 255).astype(np.uint8)
import numpy as np
import cv2
from skimage.measure import block_reduce
def from_png_to_jpg(map):
if map.shape[2] ==3:
return map
color = map[:, :, 0:3].astype(np.float) / 255.0
alpha = map[:, :, 3:4].astype(np.float) / 255.0
reversed_color = 1 - color
final_color = (255.0 - reversed_color * alpha * 255.0).clip(0,255).astype(np.uint8)
return final_color
def k_resize(x, k):
if x.shape[0] < x.shape[1]:
s0 = k
s1 = int(x.shape[1] * (k / x.shape[0]))
s1 = s1 - s1 % 64
_s0 = 16 * s0
_s1 = int(x.shape[1] * (_s0 / x.shape[0]))
_s1 = (_s1 + 32) - (_s1 + 32) % 64
else:
s1 = k
s0 = int(x.shape[0] * (k / x.shape[1]))
s0 = s0 - s0 % 64
_s1 = 16 * s1
_s0 = int(x.shape[0] * (_s1 / x.shape[1]))
_s0 = (_s0 + 32) - (_s0 + 32) % 64
new_min = min(_s1, _s0)
raw_min = min(x.shape[0], x.shape[1])
if new_min < raw_min:
interpolation = cv2.INTER_AREA
else:
interpolation = cv2.INTER_LANCZOS4
y = cv2.resize(x, (_s1, _s0), interpolation=interpolation)
return y
def sk_resize(x, k):
if x.shape[0] < x.shape[1]:
s0 = k
s1 = int(x.shape[1] * (k / x.shape[0]))
s1 = s1 - s1 % 16
_s0 = 4 * s0
_s1 = int(x.shape[1] * (_s0 / x.shape[0]))
_s1 = (_s1 + 8) - (_s1 + 8) % 16
else:
s1 = k
s0 = int(x.shape[0] * (k / x.shape[1]))
s0 = s0 - s0 % 16
_s1 = 4 * s1
_s0 = int(x.shape[0] * (_s1 / x.shape[1]))
_s0 = (_s0 + 8) - (_s0 + 8) % 16
new_min = min(_s1, _s0)
raw_min = min(x.shape[0], x.shape[1])
if new_min < raw_min:
interpolation = cv2.INTER_AREA
else:
interpolation = cv2.INTER_LANCZOS4
y = cv2.resize(x, (_s1, _s0), interpolation=interpolation)
return y
def d_resize(x, d, fac=1.0):
new_min = min(int(d[1] * fac), int(d[0] * fac))
raw_min = min(x.shape[0], x.shape[1])
if new_min < raw_min:
interpolation = cv2.INTER_AREA
else:
interpolation = cv2.INTER_LANCZOS4
y = cv2.resize(x, (int(d[1] * fac), int(d[0] * fac)), interpolation=interpolation)
return y
def n_resize(x, d):
y = cv2.resize(x, (d[1], d[0]), interpolation=cv2.INTER_NEAREST)
return y
def s_resize(x, s):
if x.shape[0] < x.shape[1]:
s0 = x.shape[0]
s1 = int(float(s0) / float(s[0]) * float(s[1]))
else:
s1 = x.shape[1]
s0 = int(float(s1) / float(s[1]) * float(s[0]))
new_max = max(s1, s0)
raw_max = max(x.shape[0], x.shape[1])
if new_max < raw_max:
interpolation = cv2.INTER_AREA
else:
interpolation = cv2.INTER_LANCZOS4
y = cv2.resize(x, (s1, s0), interpolation=interpolation)
return y
def min_resize(x, m):
if x.shape[0] < x.shape[1]:
s0 = m
s1 = int(float(m) / float(x.shape[0]) * float(x.shape[1]))
else:
s0 = int(float(m) / float(x.shape[1]) * float(x.shape[0]))
s1 = m
new_max = max(s1, s0)
raw_max = max(x.shape[0], x.shape[1])
if new_max < raw_max:
interpolation = cv2.INTER_AREA
else:
interpolation = cv2.INTER_LANCZOS4
y = cv2.resize(x, (s1, s0), interpolation=interpolation)
return y
def max_resize(x, m):
if x.shape[0] > x.shape[1]:
s0 = m
s1 = int(float(m) / float(x.shape[0]) * float(x.shape[1]))
else:
s0 = int(float(m) / float(x.shape[1]) * float(x.shape[0]))
s1 = m
new_max = max(s1, s0)
raw_max = max(x.shape[0], x.shape[1])
if new_max < raw_max:
interpolation = cv2.INTER_AREA
else:
interpolation = cv2.INTER_LANCZOS4
y = cv2.resize(x, (s1, s0), interpolation=interpolation)
return y
def s_enhance(x, k=2.0):
p = cv2.cvtColor(x, cv2.COLOR_RGB2HSV).astype(np.float)
p[:, :, 1] *= k
p = p.clip(0, 255).astype(np.uint8)
return cv2.cvtColor(p, cv2.COLOR_HSV2RGB).clip(0, 255)
def ini_hint(x):
r = np.zeros(shape=(x.shape[0], x.shape[1], 4), dtype=np.float32)
return r
def opreate_gird_hint(gird, points, type, length):
h = gird.shape[0]
w = gird.shape[1]
for point in points:
x, y, r, g, b, t = point
if t == type:
x = int(x * w)
y = int(y * h)
l_ = max(0, x - length)
b_ = max(0, y - length)
r_ = min(w, x + length + 1)
t_ = min(h, y + length + 1)
gird[b_:t_, l_:r_, 2] = 1 - r / 255.0
gird[b_:t_, l_:r_, 1] = 1 - g / 255.0
gird[b_:t_, l_:r_, 0] = 1 - b / 255.0
gird[b_:t_, l_:r_, 3] = 1
return gird
def opreate_normal_hint(gird, points, type, length):
h = gird.shape[0]
w = gird.shape[1]
for point in points:
x, y, r, g, b, t = point
if t == type:
x = int(x * w)
y = int(y * h)
l_ = max(0, x - length)
b_ = max(0, y - length)
r_ = min(w, x + length + 1)
t_ = min(h, y + length + 1)
gird[b_:t_, l_:r_, 2] = r
gird[b_:t_, l_:r_, 1] = g
gird[b_:t_, l_:r_, 0] = b
gird[b_:t_, l_:r_, 3] = 255.0
return gird
def go_cvline(img):
x = cv2.Sobel(img, cv2.CV_16S, 1, 0)
y = cv2.Sobel(img, cv2.CV_16S, 0, 1)
absX = cv2.convertScaleAbs(x)
absY = cv2.convertScaleAbs(y)
r = 255 - cv2.addWeighted(absX, 0.5, absY, 0.5, 0)
return np.tile(np.min(r, axis=2, keepdims=True).clip(0, 255).astype(np.uint8), [1, 1, 3])
def go_passline(img):
o = img.astype(np.float32)
b = cv2.GaussianBlur(img, (7, 7), 0).astype(np.float32)
r = np.max(b - o, axis=2, keepdims=True)
r /= np.max(cv2.resize(r.clip(0, 255).astype(np.uint8), (64, 64), cv2.INTER_AREA))
r = (1 - r).clip(0, 1)
return np.tile((r * 255.0).clip(0, 255).astype(np.uint8), [1, 1, 3])
def min_k_down(x, k):
y = 255 - x.astype(np.float32)
y = block_reduce(y, (k, k), np.max)
y = 255 - y
return y.clip(0, 255).astype(np.uint8)
def min_k_down_c(x, k):
y = 255 - x.astype(np.float32)
y = block_reduce(y, (k, k, 1), np.max)
y = 255 - y
return y.clip(0, 255).astype(np.uint8)
def mini_norm(x):
y = x.astype(np.float32)
y = 1 - y / 255.0
y -= np.min(y)
y /= np.max(y)
return (255.0 - y * 80.0).astype(np.uint8)
def hard_norm(x):
o = x.astype(np.float32)
b = cv2.GaussianBlur(x, (3, 3), 0).astype(np.float32)
y = (o - b + 255.0).clip(0, 255)
y = 1 - y / 255.0
y -= np.min(y)
y /= np.max(y)
y[y < np.mean(y)] = 0
y[y > 0] = 1
return (255.0 - y * 255.0).astype(np.uint8)
def sensitive(x, s=15.0):
y = x.astype(np.float32)
y -= s
y /= 255.0 - s * 2.0
y *= 255.0
return y.clip(0, 255).astype(np.uint8)
def min_black(x):
return np.tile(np.min(x, axis=2, keepdims=True), [1, 1, 3])
def eye_black(x):
return cv2.cvtColor(cv2.cvtColor(x, cv2.COLOR_RGB2GRAY), cv2.COLOR_GRAY2RGB)
def cal_std(x):
y = (cv2.resize(x, (128, 128), cv2.INTER_AREA)).astype(np.float32)
return np.mean(np.var(y, axis=2))
def emph_line(x, y, c):
a = x.astype(np.float32)
b = y.astype(np.float32)[:, :, None] / 255.0
c = np.tile(c[None, None, ::-1], [a.shape[0], a.shape[1], 1])
return (a * b + c * (1 - b)).clip(0, 255).astype(np.uint8)
def de_line(x, y):
a = x.astype(np.float32)
b = y.astype(np.float32)[:, :, None] / 255.0
c = np.tile(np.array([255, 255, 255])[None, None, ::-1], [a.shape[0], a.shape[1], 1])
return (a * b + c * (1 - b)).clip(0, 255).astype(np.uint8)
def blur_line(x, y):
o = x.astype(np.float32)
b = cv2.GaussianBlur(x, (3, 3), 0).astype(np.float32)
k = y.astype(np.float32)[:, :, None] / 255.0
return (o * k + b * (1 - k)).clip(0, 255).astype(np.uint8)
def clip_15(x, s=15.0):
return ((x - s) / (255.0 - s - s)).clip(0, 1) * 255.0
def cv_denoise(x):
return cv2.fastNlMeansDenoisingColored(x, None, 3, 3, 7, 21)
from config import *
import re
import os
import cv2
import time
import json
import base64
import shutil
import datetime
import threading
import numpy as np
from bottle import route, run, static_file, request, BaseRequest, response
from ai import *
from tricks import *
BaseRequest.MEMFILE_MAX = 10000 * 1000
def get_request_image(name):
img = request.forms.get(name)
img = re.sub(’^data:image/.+;base64,’, ‘’, img)
img = base64.urlsafe_b64decode(img)
img = np.fromstring(img, dtype=np.uint8)
img = cv2.imdecode(img, -1)
return img
@route(’/filename:path’)
def send_static(filename):
return static_file(filename, root=‘game/’)
@route(’/’)
def send_static():
return static_file(“index.html”, root=‘game/’)
sketch_upload_pool = []
painting_pool = []
def handle_sketch_upload_pool():
if len(sketch_upload_pool) > 0:
room, sketch, method = sketch_upload_pool[0]
del sketch_upload_pool[0]
room_path = ‘game/rooms/’ + room
print('processing sketch in ’ + room_path)
if os.path.exists(room_path + ‘/sketch.improved.jpg’):
improved_sketch = cv2.imread(room_path + ‘/sketch.improved.jpg’)
print(‘lucky to find improved sketch’)
else:
improved_sketch = sketch.copy()
improved_sketch = min_resize(improved_sketch, 512)
improved_sketch = cv_denoise(improved_sketch)
improved_sketch = sensitive(improved_sketch, s=5.0)
improved_sketch = go_tail(improved_sketch)
cv2.imwrite(room_path + ‘/sketch.improved.jpg’, improved_sketch)
color_sketch = improved_sketch.copy()
std = cal_std(color_sketch)
print('std = ’ + str(std))
need_de_painting = (std > 100.0) and method == ‘rendering’
if method==‘recolorization’ or need_de_painting:
if os.path.exists(room_path + ‘/sketch.recolorization.jpg’) or os.path.exists(room_path + ‘/sketch.de_painting.jpg’):
print(‘lucky to find lined sketch’)
else:
improved_sketch = go_passline(color_sketch)
improved_sketch = min_k_down_c(improved_sketch, 2)
improved_sketch = cv_denoise(improved_sketch)
improved_sketch = go_tail(improved_sketch)
improved_sketch = sensitive(improved_sketch, s=5.0)
cv2.imwrite(room_path + ‘/sketch.recolorization.jpg’, min_black(improved_sketch))
if need_de_painting:
cv2.imwrite(room_path + ‘/sketch.de_painting.jpg’, min_black(improved_sketch))
print(‘In rendering mode, the user has uploaded a painting, and I have translated it into a sketch.’)
print(‘sketch lined’)
cv2.imwrite(room_path + ‘/sketch.colorization.jpg’, min_black(color_sketch))
cv2.imwrite(room_path + ‘/sketch.rendering.jpg’, eye_black(color_sketch))
print(‘sketch improved’)
return
def handle_painting_pool():
if len(painting_pool) > 0:
room, ID, sketch, alpha, reference, points, method, lineColor, line = painting_pool[0]
del painting_pool[0]
room_path = ‘game/rooms/’ + room
print('processing painting in ’ + room_path)
sketch_1024 = k_resize(sketch, 64)
if os.path.exists(room_path + ‘/sketch.de_painting.jpg’) and method == ‘rendering’:
vice_sketch_1024 = k_resize(cv2.imread(room_path + ‘/sketch.de_painting.jpg’, cv2.IMREAD_GRAYSCALE), 64)
sketch_256 = mini_norm(k_resize(min_k_down(vice_sketch_1024, 2), 16))
sketch_128 = hard_norm(sk_resize(min_k_down(vice_sketch_1024, 4), 32))
else:
sketch_256 = mini_norm(k_resize(min_k_down(sketch_1024, 2), 16))
sketch_128 = hard_norm(sk_resize(min_k_down(sketch_1024, 4), 32))
print(‘sketch prepared’)
if debugging:
cv2.imwrite(room_path + ‘/sketch.128.jpg’, sketch_128)
cv2.imwrite(room_path + ‘/sketch.256.jpg’, sketch_256)
baby = go_baby(sketch_128, opreate_normal_hint(ini_hint(sketch_128), points, type=0, length=1))
baby = de_line(baby, sketch_128)
for _ in range(16):
baby = blur_line(baby, sketch_128)
baby = go_tail(baby)
baby = clip_15(baby)
if debugging:
cv2.imwrite(room_path + ‘/baby.’ + ID + ‘.jpg’, baby)
print(‘baby born’)
composition = go_gird(sketch=sketch_256, latent=d_resize(baby, sketch_256.shape), hint=ini_hint(sketch_256))
if line:
composition = emph_line(composition, d_resize(min_k_down(sketch_1024, 2), composition.shape), lineColor)
composition = go_tail(composition)
cv2.imwrite(room_path + ‘/composition.’ + ID + ‘.jpg’, composition)
print(‘composition saved’)
painting_function = go_head
if method == ‘rendering’:
painting_function = go_neck
print('method: ’ + method)
result = painting_function(
sketch=sketch_1024,
global_hint=k_resize(composition, 14),
local_hint=opreate_normal_hint(ini_hint(sketch_1024), points, type=2, length=2),
global_hint_x=k_resize(reference, 14) if reference is not None else k_resize(composition, 14),
alpha=(1 - alpha) if reference is not None else 1
)
result = go_tail(result)
cv2.imwrite(room_path + ‘/result.’ + ID + ‘.jpg’, result)
cv2.imwrite(‘results/’ + room + ‘.’ + ID + ‘.jpg’, result)
if debugging:
cv2.imwrite(room_path + ‘/icon.’ + ID + ‘.jpg’, max_resize(result, 128))
return
@route(’/upload_sketch’, method=‘POST’)
def upload_sketch():
room = request.forms.get(“room”)
previous_step = request.forms.get(“step”)
if previous_step == ‘sample’:
new_room_id = datetime.datetime.now().strftime(’%b%dH%HM%MS%S’) + ‘R’ + str(np.random.randint(100, 999))
shutil.copytree(‘game/samples/’ + room, ‘game/rooms/’ + new_room_id)
print(‘copy ’ + ‘game/samples/’ + room + ’ to ’ + ‘game/rooms/’ + new_room_id)
room = new_room_id
ID = datetime.datetime.now().strftime(‘H%HM%MS%S’)
method = request.forms.get(“method”)
if room == ‘new’:
room = datetime.datetime.now().strftime(’%b%dH%HM%MS%S’) + ‘R’ + str(np.random.randint(100, 999))
room_path = ‘game/rooms/’ + room
os.makedirs(room_path, exist_ok=True)
sketch = from_png_to_jpg(get_request_image(‘sketch’))
cv2.imwrite(room_path + ‘/sketch.original.jpg’, sketch)
print(‘original_sketch saved’)
else:
room_path = ‘game/rooms/’ + room
sketch = cv2.imread(room_path + ‘/sketch.original.jpg’)
print('sketch upload pool get request: ’ + method)
sketch_upload_pool.append((room, sketch, method))
while True:
time.sleep(0.1)
if os.path.exists(room_path + ‘/sketch.’ + method + ‘.jpg’):
break
time.sleep(1.0)
return room + ‘_’ + ID
@route(’/request_result’, method=‘POST’)
def request_result():
room = request.forms.get(“room”)
previous_step = request.forms.get(“step”)
if previous_step == ‘sample’:
new_room_id = datetime.datetime.now().strftime(’%b%dH%HM%MS%S’) + ‘R’ + str(np.random.randint(100, 999))
shutil.copytree(‘game/samples/’ + room, ‘game/rooms/’ + new_room_id)
print('copy ’ + ‘game/samples/’ + room + ’ to ’ + ‘game/rooms/’ + new_room_id)
room = new_room_id
ID = datetime.datetime.now().strftime(‘H%HM%MS%S’)
room_path = ‘game/rooms/’ + room
options_str = request.forms.get(“options”)
if debugging:
with open(room_path + ‘/options.’ + ID + ‘.json’, ‘w’) as f:
f.write(options_str)
options = json.loads(options_str)
method = options[“method”]
sketch = cv2.imread(room_path + ‘/sketch.’ + method + ‘.jpg’, cv2.IMREAD_GRAYSCALE)
alpha = float(options[“alpha”])
points = options[“points”]
for _ in range(len(points)):
points[][1] = 1 - points[][1]
if options[“hasReference”]:
reference = from_png_to_jpg(get_request_image(‘reference’))
cv2.imwrite(room_path + ‘/reference.’ + ID + ‘.jpg’, reference)
reference = s_enhance(reference)
else:
reference = None
print('request result room = ’ + str(room) + ', ID = ’ + str(ID))
lineColor = np.array(options[“lineColor”])
line = options[“line”]
painting_pool.append([room, ID, sketch, alpha, reference, points, method, lineColor, line])
while True:
time.sleep(0.1)
if os.path.exists(room_path + ‘/result.’ + ID + ‘.jpg’):
break
time.sleep(1.0)
return room + ‘_’ + ID
@route(’/get_sample_list’, method=‘POST’)
def get_sample_list():
all_names = []
for (root, dirs, files) in os.walk(“game/samples”):
all_names = dirs
break
all_names.sort()
result = json.dumps(all_names)
return result
@route(’/save_as_sample’, method=‘POST’)
def save_as_sample():
room = request.forms.get(“room”)
step = request.forms.get(“step”)
previous_path = ‘game/rooms/’ + room
new_path = ‘game/samples/’ + room
os.makedirs(new_path, exist_ok=True)
def transfer(previous_file_name, new_file_name=None):
if new_file_name is None:
new_file_name = previous_file_name
if os.path.exists(previous_path + '/' + previous_file_name):
shutil.copy(previous_path + '/' + previous_file_name, new_path + '/' + new_file_name)
transfer('sketch.original.jpg')
transfer('sketch.improved.jpg')
transfer('sketch.colorization.jpg')
transfer('sketch.rendering.jpg')
transfer('sketch.recolorization.jpg')
transfer('sketch.de_painting.jpg')
transfer('result.' + step + '.jpg', 'result.sample.jpg')
transfer('reference.' + step + '.jpg', 'reference.sample.jpg')
transfer('icon.' + step + '.jpg', 'icon.sample.jpg')
transfer('composition.' + step + '.jpg', 'composition.sample.jpg')
transfer('options.' + step + '.json', 'options.sample.json')
print('saved')
return 'ok'
def server_loop():
while True:
time.sleep(0.173)
try:
handle_sketch_upload_pool()
handle_painting_pool()
except Exception as e:
print(e)
os.makedirs(‘game/rooms’, exist_ok=True)
os.makedirs(‘results’, exist_ok=True)
threading.Thread(target=server_loop).start()
if multiple_process:
run(host=“0.0.0.0”, port=80, server=‘paste’)
else:
run(host=“0.0.0.0”, port=8000, server=‘paste’)