3d人脸重建 facescape 测试

https://github.com/2706853499/3DScene

facescape数据库中obj文件渲染,使用opencv绑定纹理或对三角形颜色平滑 FaceScape:大规模高质量3D人脸数据集和详细的可固定3D人脸预测(CVPR2020)

c++ widows解决方案,调用pcl opencv,有美颜效果,侧脸有瑕疵,有学习价值。

开源地址:

https://github.com/zhuhao-nju/facescape

依赖项:

https://github.com/yanght321/Detailed3DFace

1.6 demo_bilinear_fit.ipynb

import numpy as np, cv2, trimesh
from src.facescape_fitter import facescape_fitter
from src.utility import show_img_arr
from src.renderer import render_orthcam

fs_fitter = facescape_fitter(fs_file = "./bilinear_model_v1.6/facescape_bm_v1.6_847_50_52_id_front.npz", 
                             lm_file = "./predef/shape_predictor_68_face_landmarks.dat")

src_img = cv2.imread("./test_data/chan.jpg")

kp2d = fs_fitter.detect_kp2d(src_img) # extract 2D key points
mesh, _ = fs_fitter.fit_kp2d(kp2d) # fit model

# ========== Visualize Result ==========
# transform to orthogonal camera coordinate
mesh_tm = trimesh.Trimesh(vertices = mesh.vertices.copy(), 
                          faces = fs_fitter.fv_indices_front-1, 
                          process = False)
mesh_tm.vertices[:, :2] = mesh_tm.vertices[:, 0:2] - np.array([src_img.shape[1] / 2, src_img.shape[0] / 2])
mesh_tm.vertices = mesh_tm.vertices / src_img.shape[0] * 2
mesh_tm.vertices[:, 2] = mesh_tm.vertices[:, 2] - 10

# render texture image and depth
rend_depth, rend_tex = render_orthcam(mesh_tm, (1, 1), 
                                      rend_size = tuple(src_img.shape[:2]), 
                                      flat_shading=False)
mask = np.stack((rend_depth!=0, )*3, 2)
res_img = src_img.copy()
res_img[mask] = rend_tex[mask]

# visualize
vis_scale = 512. / np.max(src_img.shape[:2])
sc_img = cv2.resize(src_img, (round(src_img.shape[1]*vis_scale), 
                              round(src_img.shape[0]*vis_scale)))

sc_res_img = cv2.resize(res_img, (round(src_img.shape[1]*vis_scale), 
                                  round(src_img.shape[0]*vis_scale)))
show_img_arr(np.concatenate((sc_img, sc_res_img), 1), bgr_mode = True)

 报错:

  File "D:\project\facescape\facescape-master\toolkit\src\facescape_fitter.py", line 12, in __init__
    self.exp_gmm = sklearn.mixture.GaussianMixture(n_components = len(self.exp_gmm_means), 
AttributeError: 'facescape_fitter' object has no attribute 'exp_gmm_means'

属性列表:

'shape_bm_core'  'color_bm_core'  'color_bm_mean'  'fv_indices'  'ft_indices'  'fv_indices_front'  'ft_indices_front'  'vc_dict_front'  'v_indices_front'  'vert_num'  'face_num'  'frontal_vert_num'  'frontal_face_num'  'texcoords'  'facial_mask'  'sym_dict'  'lm_list_v16'  'vert_10to16_dict'  'vert_16to10_dict'

没有exp开头的属性

1.3 的demo运行demo报错:

  File "D:\project\facescape\Detailed3DFace-master\bilinear_model.py", line 35, in __init__
    self.exp_gmm = pickle.load(f)
ModuleNotFoundError: No module named 'sklearn.mixture.gaussian_mixture'

项目需要的版本好像是0.21.2,安装试试:

pip install scikit-learn==0.21.2 --user

依赖项写的是:

scikit-learn>=0.23.2

facescape数据集地址:

https://facescape.nju.edu.cn/

多角度,大侧脸:

https://nbviewer.jupyter.org/github/zhuhao-nju/facescape/blob/master/toolkit/demo_mview_projection.ipynb

import sys, cv2, json
import numpy as np
import src.renderer as renderer
import src.utility as util

# test_num is the camera index
def projection_test(test_num, scale=1.0):

    # read params
    with open("../samples/sample_mview_data/4_anger/params.json", 'r') as f:
        params = json.load(f)

    # extract KRt dist
    K = np.array(params['%d_K' % test_num])
    Rt = np.array(params['%d_Rt' % test_num])
    dist = np.array(params['%d_distortion' % test_num], dtype = np.float)
    h_src = params['%d_height' % test_num]
    w_src = params['%d_width' % test_num]

    # scale h and w
    h, w = int(h_src * scale), int(w_src * scale)
    K[:2,:] = K[:2,:] * scale

    # read image
    src_img = cv2.imread("../samples/sample_mview_data/4_anger/%d.jpg" % test_num)
    src_img = cv2.resize(src_img, (w, h))

    # undistort image
    undist_img = cv2.undistort(src_img, K, dist)

    # read and render mesh
    mesh_dirname = "../samples/sample_mview_data/4_anger.ply"
    _, rend_img = renderer.render_cvcam(mesh_dirname, K, Rt, rend_size=(h, w))

    # project and show
    mix_img = cv2.addWeighted(rend_img, 0.5, undist_img, 0.5, 0)
    concat_img = np.concatenate((undist_img, mix_img, rend_img), axis = 1)
    
    return concat_img

util.show_img_arr(projection_test(49, 0.05), bgr_mode = True)
util.show_img_arr(projection_test(50, 0.05), bgr_mode = True)

3d人脸重建 facescape 测试_第1张图片

3d人脸重建 facescape 测试_第2张图片

render:

# render multi-view model
import cv2, json, os
import numpy as np
import src.renderer as renderer

cam_idx = 49
mesh_dirname = "../samples/sample_mview_data/4_anger.ply"

# read params to find a camera setting
with open("../samples/sample_mview_data/4_anger/params.json", 'r') as f:
    params = json.load(f)

# extract KRt
K = np.array(params['%d_K' % cam_idx])
Rt = np.array(params['%d_Rt' % cam_idx])
h_src = params['%d_height' % cam_idx]
w_src = params['%d_width' % cam_idx]

# scale K RT h w
scale = 0.2
h, w = int(h_src * scale), int(w_src * scale)
K[:2,:] = K[:2,:] * scale

# render
rend_depth, rend_img = renderer.render_cvcam(mesh_dirname, K, Rt, rend_size=(h, w))

# save image and depth
os.makedirs("./demo_output/", exist_ok = True)
cv2.imwrite("./demo_output/mview_rend_view%d.jpg" % cam_idx, rend_img)
rend_depth_vis = rend_depth - np.min(rend_depth[rend_depth!=0])
rend_depth_vis = (rend_depth_vis / np.max(rend_depth_vis) * 255).astype(np.uint8)
cv2.imwrite("./demo_output/mview_depth_view%d.jpg" % cam_idx, rend_depth_vis)
print("results saved to ./demo_output/")

render tu:

# render multi-view model
import cv2, json, os, trimesh
import numpy as np
import src.renderer as renderer

# read tu base mesh
tu_base_mesh = trimesh.load("../samples/sample_tu_model/1_neutral.obj", process=False)

# extract K Rt
K = np.array([[2000, 0, 256],
              [0, 2000, 256],
              [0, 0, 1]], dtype=np.float64)

Rt = np.array([[1, 0, 0, 0],
               [0, -1, 0, 0],
               [0, 0, -1, 1200]], dtype=np.float64)
h, w = 512, 512
tu_base_mesh.visual.material.diffuse = np.array([255, 255, 255, 255], dtype=np.uint8)


# render texture image and depth
rend_depth, rend_tex = renderer.render_cvcam(tu_base_mesh, K, Rt, rend_size=(h, w), 
                                             flat_shading=True)
# render color image
_, rend_color = renderer.render_cvcam(tu_base_mesh, K, Rt, rend_size=(h, w), 
                                      flat_shading=False)

# render shade image
tu_base_mesh.visual.material.image = np.ones((1, 1, 3), dtype=np.uint8)*255
_, rend_shade = renderer.render_cvcam(tu_base_mesh, K, Rt, rend_size=(h, w), 
                                      flat_shading=False)

# save all
rend_depth_vis = rend_depth.copy()
rend_depth_vis[rend_depth!=0] = rend_depth_vis[rend_depth!=0] - np.min(rend_depth[rend_depth!=0])
rend_depth_vis = (rend_depth_vis / np.max(rend_depth_vis) * 255).astype(np.uint8)

# save image and depth
os.makedirs("./demo_output/", exist_ok = True)
cv2.imwrite("./demo_output/tu_tex.jpg", rend_tex)
cv2.imwrite("./demo_output/tu_color.jpg", rend_color)
cv2.imwrite("./demo_output/tu_shade.jpg", rend_shade)
rend_depth_vis = rend_depth - np.min(rend_depth[rend_depth!=0])
rend_depth_vis = (rend_depth_vis / np.max(rend_depth_vis) * 255).astype(np.uint8)
cv2.imwrite("./demo_output/tu_depth.jpg", rend_depth_vis)
print("results saved to ./demo_output/")

demo_bilinear_basic.ipynb 代码:

from src.facescape_bm import facescape_bm
from src.renderer import render_cvcam
from src.utility import show_img_arr

np.random.seed(1000)

model = facescape_bm("./bilinear_model_v1.6/facescape_bm_v1.6_847_50_52_id_front.npz")

# create random identity vector
random_id_vec = (np.random.random(50) - 0.5) * 0.1
if random_id_vec[0]>0:
    random_id_vec = -random_id_vec
    
# create random expression vector
exp_vec = np.zeros(52)
exp_vec[np.random.randint(52)] = 1
    
# creat random color vector
random_color_vec = (np.random.random(100) - 0.5) * 100

# generate and save full head mesh
mesh_full = model.gen_full(random_id_vec, exp_vec)
mesh_full.export("./demo_output/bm_v16_result_full.obj")

# generate and save facial mesh
mesh_face = model.gen_face(random_id_vec, exp_vec)
mesh_face.export("./demo_output/bm_v16_result_face.obj")

# generate and save facial mesh with rough vertex color
mesh_face_color = model.gen_face_color(random_id_vec, exp_vec, random_color_vec)
mesh_face_color.export("./demo_output/bm_v16_result_face_color.obj", enable_vc = True)

print("Results saved to './demo_output/'")

# render generated meshes
Rt = np.array([[1, 0, 0, 0],
               [0, -1, 0, 0],
               [0, 0, -1, 500]], dtype=np.float64)

depth_full, image_full = render_cvcam(trimesh.Trimesh(vertices = mesh_full.vertices, 
                                                      faces = mesh_full.faces_v-1),
                                      Rt = Rt)

depth_face, image_face = render_cvcam(trimesh.Trimesh(vertices = mesh_face.vertices, 
                                                      faces = mesh_face.faces_v-1),
                                      Rt = Rt)

depth_face_color, image_face_color = render_cvcam(trimesh.Trimesh(
                                             vertices = mesh_face_color.vertices, 
                                             faces = mesh_face_color.faces_v-1,
                                             vertex_colors = mesh_face_color.vert_colors),
                                                  Rt = Rt)

# show rendered images
merge_img = np.concatenate((image_full, image_face, image_face_color), 1)

show_img_arr(merge_img, bgr_mode = True)

# new Rt for rendering
Rt = np.array([[1, 0, 0, 0],
               [0, -1, 0, 0],
               [0, 0, -1, 1000]], dtype=np.float64)

random_faces_list = []
for i in range(6):
    # create random identity vector
    random_id_vec = np.random.normal(model.id_mean, np.sqrt(model.id_var))
    
    # create random expression vector
    exp_vec = np.zeros(52)
    exp_vec[0] = 1

    # generate full head mesh
    mesh_full = model.gen_full(random_id_vec, exp_vec)

    # render
    depth_full, image_full = render_cvcam(trimesh.Trimesh(vertices = mesh_full.vertices, 
                                                      faces = mesh_full.faces_v-1),
                                          Rt = Rt)

    random_faces_list.append(image_full)

# show rendered images
merge_faces_img = np.concatenate(random_faces_list, 1)
show_img_arr(merge_faces_img, bgr_mode = True)

import pickle, os

# triangle faces
with open('./data/predef_front_faces.pkl', 'rb') as f:
    faces_front = pickle.load(f)

with open('./data/front_indices.pkl', 'rb') as f:
    indices_front = pickle.load(f)
    
with open('./data/predef_faces.pkl', 'rb') as f:
    faces_full = pickle.load(f)
    
# texture coordinates
with open('./data/predef_texcoords.pkl', 'rb') as f:
    texcoords = pickle.load(f)

# bilinear model with 52 expression parameters and 50 identity parameters
# We perform Tucker decomposition only along the identity dimension to reserve the semantic meaning of parameters in expression dimension as specific blendshape weights
core_tensor = np.load('./data/core_847_50_52.npy')
factors_id = np.load('./data/factors_id_847_50_52.npy')

matrix_tex = np.load('./data/matrix_text_847_100.npy')
mean_tex = np.load('./data/mean_text_847_100.npy')
factors_tex = np.load('./data/factors_tex_847_100.npy')

id = factors_id[0]
exp = np.zeros(52)
exp[0] = 1

core_tensor = core_tensor.transpose((2, 1, 0))
mesh_vertices_full = core_tensor.dot(id).dot(exp).reshape((-1, 3))
mesh_vertices_front = mesh_vertices_full[indices_front]

tex = mean_tex + matrix_tex.dot(factors_tex[0])
tex = tex.reshape((-1, 3)) / 255

os.makedirs("./demo_output/", exist_ok = True)
with open('./demo_output/bm_v10_result_full.obj', "w") as f:
    for i in range(mesh_vertices_full.shape[0]):
        f.write("v %.6f %.6f %.6f\n" % (mesh_vertices_full[i, 0], mesh_vertices_full[i, 1], mesh_vertices_full[i, 2]))
    for i in range(len(texcoords)):
        f.write("vt %.6f %.6f\n" % (texcoords[i][0], texcoords[i][1]))
    for face in faces_full:
        face_vertices, face_normals, face_texture_coords, material = face
        f.write("f %d/%d %d/%d %d/%d\n" % (
            face_vertices[0], face_texture_coords[0], face_vertices[1], face_texture_coords[1], face_vertices[2],
            face_texture_coords[2]))

with open('./demo_output/bm_v10_result_face_color.obj', "w") as f:
    for i in range(mesh_vertices_front.shape[0]):
        f.write("v %.6f %.6f %.6f %.6f %.6f %.6f\n" % (
            mesh_vertices_front[i, 0], mesh_vertices_front[i, 1], mesh_vertices_front[i, 2], tex[i, 2], tex[i, 1], tex[i, 0]))
    for face in faces_front:
        face_vertices, face_normals, face_texture_coords, material = face
        f.write("f %d %d %d\n" % (face_vertices[0], face_vertices[1], face_vertices[2]))
print("Results saved to './demo_output/'")

你可能感兴趣的:(3D视觉)