fig() + image(img_gray, cmap='gray')
print(type(img_gray))
print(img_gray.shape)
printMat2D(img_gray)
val = img_gray[18, 16]
print(val)
img_gray_copy = image_copy(img_gray)
img_gray_copy[18, 16] = 255
fig() + image(img_gray_copy, cmap='gray')
img_gray_copy = image_copy(img_gray)
img_gray_copy[18:25, 16] = 255
fig() + image(img_gray_copy, cmap='gray')
img_gray_copy = image_copy(img_gray)
img_gray_copy[18:25, 16:25] = 255
fig() + image(img_gray_copy, cmap='gray')
img_gray_copy = image_copy(img_gray)
img_gray_copy[8:25, 8:25] = 255
fig() + image(img_gray_copy, cmap='gray')
img_gray_copy = image_copy(img_gray)
img_gray_copy[3:23, 22] = 255
img_gray_copy[2:23, 7] = 255
img_gray_copy[23, 7:23] = 255
img_gray_copy[2, 8:23] = 255
fig() + image(img_gray_copy, cmap='gray')
fig() + image(img_shibe)
print(img_shibe.shape)
red, green, blue = color_split(img_shibe)
fig(1, 3) + [image(red, cmap='Reds'), image(green, cmap='Greens'), image(blue, cmap='Blues')]
merged_img = color_merge(red, green, blue)
fig() + image(merged_img)
r = img_shibe[:, :, 0]
g = img_shibe[:, :, 1]
b = img_shibe[:, :, 2]
fig(1, 3) + [image(r, cmap='Reds'), image(g, cmap='Greens'), image(b, cmap='Blues')]
fig() + image(img_raw)
img_bgr = image_copy(img_raw)
tmp = image_copy(img_bgr[:, :, 0])
img_bgr[:, :, 0] = img_bgr[:, :, 2]
img_bgr[:, :, 2] = tmp
fig() + image(img_bgr)
img_warm = image_copy(img_raw)
img_warm = image_int2float(img_warm)
img_warm[:, :, 2] = img_warm[:, :, 2] / 2
img_warm = image_float2int(img_warm)
fig() + image(img_warm)
mat = [
[0, 0, 1],
[0, 1, 0],
[1, 0, 0]
]
img_bgr = image_copy(img_raw)
img_bgr = map_color_space(img_bgr, mat)
fig() + image(img_bgr)
mat = [[0, 0, 1],
[0, 1, 0],
[1, 0, 0]]
img_bgr = image_copy(img_raw)
img_bar_warm = image_int2float(img_bgr)
img_bar_warm[:, :, 2] /= 2
img_bar_warm = image_float2int(img_bar_warm)
fig() + image(img_bar_warm)
mat = [
[0.393, 0.769, 0.189],
[0.349, 0.686, 0.168],
[0.272, 0.534, 0.131]
]
img_retro = image_copy(img_raw)
img_retro = image_int2float(img_retro)
img_retro = map_color_space(img_retro, mat)
img_retro = bound(img_retro, 0, 255)
img_retro = image_float2int(img_retro)
fig() + image(img_retro)
rgb2lms_mat = [
[17.8824, 43.5161, 4.11935],
[3.45565, 27.1554, 3.86714],
[0.0299566, 0.184309, 1.46709]
]
lms2rgb_mat = [
[8.09444479e-02, -1.30504409e-01, 1.16721066e-01],
[-1.02485335e-02, 5.40193266e-02, -1.13614708e-01],
[-3.65296938e-04, -4.12161469e-03, 6.93511405e-01]
]
lms_mix_mat = [
[0, 2.02344, -2.52581],
[0, 1, 0],
[0, 0, 1]
]
img_cb = image_copy(img_raw)
img_cb = image_int2float(img_cb)
img_cb = map_color_space(img_cb, rgb2lms_mat)
img_cb = map_color_space(img_cb, lms_mix_mat)
img_cb = map_color_space(img_cb, lms2rgb_mat)
img_cb = bound(img_cb, 0, 255)
img_cb = image_float2int(img_cb)
fig() + image(img_raw)
fig() + image(img_cb)
import itertools
import random
mat = [
[],
[],
[]
]
for item, _ in itertools.product(mat, range(3)):
item.append(random.random())
img_retro = image_copy(img_raw)
img_retro = image_int2float(img_retro)
img_retro = map_color_space(img_retro, mat)
img_retro = bound(img_retro, 0, 255)
img_retro = image_float2int(img_retro)
fig() + image(img_retro)
img_woodcut = image_copy(img_wzj)
img_woodcut = rgb2gray(img_woodcut)
img_woodcut = binary_threshold(img_woodcut, 170, 0, 255)
fig() + image(img_woodcut, cmap='gray')
def groundglass_code(img, k=3):
res = image_copy(img)
H, W = get_image_size(img)
for i in range(H):
for j in range(W):
offset_i = random_int(-k, k)
offset_j = random_int(-k, k)
src_i = i + offset_i
src_j = j + offset_j
src_i = bound(src_i, 0, H - 1)
src_j = bound(src_j, 0, W - 1)
res[i, j, :] = img[src_i, src_j, :]
return res
img_groundglass = groundglass(img_sand,3)
fig() + image(img_sand)
fig() + image(img_groundglass)
img_groundglass_ex = groundglass(img_sand,8)
fig() + image(img_sand)
fig() + image(img_groundglass_ex)
def oil_painting_code(img, num_bin=2, region_size =4):
H, W = get_image_size(img)
res = image_copy(img)
bin_assignment = compute_bin_assignment(img, num_bin)
for i in range(0, H):
for j in range(0, W):
bb = get_bounding_box(i, j, region_size, H, W)
y1, y2, x1, x2 = bb
img_region = img[y1:y2, x1:x2, :]
bin_assignment_region = bin_assignment[y1:y2, x1:x2]
region_r, region_g, region_b = get_most_frequent_color(img_region, bin_assignment_region)
res[y1:y2, x1:x2, 0] = region_r
res[y1:y2, x1:x2, 1] = region_g
res[y1:y2, x1:x2, 2] = region_b
return res
img_painting = oil_painting(img_sand,3,4)
fig() + image(img_sand)
fig() + image(img_painting)
img_painting = oil_painting(img_sand,20,4)
fig() + image(img_sand)
fig() + image(img_painting)
回顾图像扭曲的映射函数公式:
u , v = T ( x , y ) u,v = T(x, y) u,v=T(x,y)
T()将原图的(x, y)对应到新图的(u, v)像素点上去。然而,在计算机实现的时候,这个过程是相反的:对于新图上的一个像素点,我们希望找到它在原图上对应的源点。因此,编写程序时,映射函数公式其实应该写成:
x , y = T − 1 ( u , v ) x, y = T^{-1}(u, v) x,y=T−1(u,v)
坍缩映射函数:
x ′ = x ∗ ( 1 − p ∗ r ) / ( 1 − p ) x' = x * (1 - p * r) / (1 - p) x′=x∗(1−p∗r)/(1−p)
y ′ = y ∗ ( 1 − p ∗ r ) / ( 1 − p ) y' = y * (1 - p * r) / (1 - p) y′=y∗(1−p∗r)/(1−p)
r = x 2 + y 2 r = \sqrt{x^2 + y^2} r=x2+y2
def warp1(x, y):
p = 0.4
r = (x**2 + y**2)**0.5
xp = x * (1 - p*r) / (1 - p)
yp = y * (1 - p*r) / (1 - p)
return xp, yp
img_anchor_warp = global_len_warp(img_anchor, warp1)
fig() + image(img_anchor)
fig() + image(img_anchor_warp)
def warp1(x, y):
p = 0.4
r = (x**2 + y**2)**0.5
xp = x * (1 - p*r) / (1 - p)
yp = y * (1 - p*r) / (1 - p)
return xp, yp
img_warp_boy = global_len_warp(img_boy, warp1)
fig() + image(img_boy)
fig() + image(img_warp_boy)
def warp1_ex(x, y):
p = 0.2
r = (x**2 + y**2)**0.5
xp = x * (1 - p*r) / (1 - p)
yp = y * (1 - p*r) / (1 - p)
return xp, yp
img_warp_boy_ex = global_len_warp(img_boy, warp1_ex)
fig() + image(img_boy)
fig() + image(img_warp_boy_ex)
棱镜扭曲函数:
x ′ = s i n ( x ) ∗ x 2 x' = sin(x) * x^2 x′=sin(x)∗x2
y ′ = s i n ( y ) ∗ y 2 y' = sin(y) * y^2 y′=sin(y)∗y2
def warp2(x, y):
xp = sin(x) * x**2
yp = sin(y) * y**2
return xp, yp
img_warp_anchor_ex2 = global_len_warp(img_anchor, warp2)
fig() + image(img_warp_anchor_ex2)
def warp2(x, y):
xp = sin(x) * x**2
yp = sin(y) * y**2
return xp, yp
img_warp_boy2 = global_len_warp(img_boy, warp2)
fig() + image(img_warp_boy2)
def warp_ex(x, y):
xp = sin(x) * x
yp = sin(y) * y
return xp, yp
img_warp_boy_insane = global_len_warp(img_boy, warp_ex)
fig() + image(img_warp_boy_insane)
warp_params = [(125, 125), (0, 80), 120]
img_warp_local_anchor = local_warp_image(img_anchor, warp_params)
warp_params = [(125, 125), (0, 80), 60]
img_warp_local_anchor2 = local_warp_image(img_anchor, warp_params)
fig() + image(img_warp_local_anchor)
fig() + image(img_warp_local_anchor2)
warp_params = [(100, 100), (0, 20), 50]
img_warp_local_boy = local_warp_image(img_boy, warp_params)
fig() + image(img_warp_local_boy)
在一系列关于人脸研究中,科学家们发现人脸部某些重要位置坐标的定位对于后续任务,如人脸对齐、分割等十分重要,这些点被称为人脸关键点。按照精度上升,有68,104,和240点。下图就是68点的分布情况:
[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-SI8gmBw5-1667613871201)(https://gitee.com/liuyun492/frontiers_in_-computer_-science/raw/img/Typora/%E4%B8%8B%E8%BD%BD.png)]
points = detect_keypoints(img_boy)
print(points)
img_boy_with_points = draw_points(img_boy, points)
fig() + image(img_boy_with_points)
mouth_left = points[48]
mouth_right = points[54]
left_eyebrow = points[19]
right_eyebrow = points[24]
cust_points = [mouth_left, mouth_right, left_eyebrow, right_eyebrow]
img_boy_cust = draw_points(img_boy, cust_points)
fig() + image(img_boy_cust)
left_eye = points[36:42]
right_eye = points[42:48]
mouth = points[48:60]
img_boy_face = draw_points(img_boy, left_eye + right_eye + mouth)
fig() + image(img_boy_face)
mouth_up = points[51]
mouth_down = points[57]
mouth_center_y = int((mouth_up[0] + mouth_down[0] + mouth_left[0] + mouth_right[0]) / 4)
mouth_center_x = int((mouth_up[1] + mouth_down[1] + mouth_left[1] + mouth_right[1]) / 4)
mouth_center = (mouth_center_y, mouth_center_x)
img_boy_mouth = draw_points(img_boy, mouth_center)
fig() + image(img_boy_mouth)
points = detect_keypoints(img_wzj)
img_with_points = draw_points(img_wzj, points)
fig() + image(img_with_points)
mouth_left = points[48]
mouth_right = points[54]
left_eyebrow = points[19]
right_eyebrow = points[24]
cust_points = [mouth_left, mouth_right, left_eyebrow, right_eyebrow]
img_with_points = draw_points(img_wzj, cust_points)
fig() + image(img_with_points)
warp_params = [
[left_eyebrow, (0, -5), 35],
[right_eyebrow, (0, -5), 35],
[mouth_left, (-5, -5), 40],
[mouth_right, (5, -5), 40]
]
sticker = make_sticker(img_wzj, warp_params)
fig() + gif(sticker)
这个大家就自己做吧,其实也不用非得写的,随便一张网图的url就行
url = "https://xxxxx"
img_url = imread(url)