首頁 > 軟體

Python first-order-model實現讓照片動起來

2022-06-24 18:00:58

前言

看到一個很有意思的專案,其實在之前就在百度飛漿等平臺上看到類似的實現效果。

可以將照片按照視訊的表情,動起來。看一下專案給出的效果。

專案地址:first-order-model專案地址

還是老樣子,不管作者給出的種種效果,自己測試一下。

資源下載和安裝

我們先看一下README關於專案的基本資訊,可以看出除了表情驅動照片,還可以姿態遷移。

模型檔案提供了線上的下載地址。

檔案很大而且難下,我下好了放到我的雲盤上,可以從下面雲盤下載。

連結 提取碼:ikix

模型檔案放到根目錄下新建的checkpoint資料夾下。

將requirements.txt中的依賴安裝一下。

安裝補充 

在測試README中的命令的時候,如果出現一下報錯。

Traceback (most recent call last):
  File "demo.py", line 17, in <module>
    from animate import normalize_kp
  File "D:spyderfirst-order-modelanimate.py", line 7, in <module>
    from frames_dataset import PairedDataset
  File "D:spyderfirst-order-modelframes_dataset.py", line 10, in <module>
    from augmentation import AllAugmentationTransform
  File "D:spyderfirst-order-modelaugmentation.py", line 13, in <module>
    import torchvision
  File "C:Usershuyi.condaenvsfomlibsite-packagestorchvision__init__.py", line 2, in <module>
    from torchvision import datasets
  File "C:Usershuyi.condaenvsfomlibsite-packagestorchvisiondatasets__init__.py", line 9, in <module>
    from .fakedata import FakeData
  File "C:Usershuyi.condaenvsfomlibsite-packagestorchvisiondatasetsfakedata.py", line 3, in <module>
    from .. import transforms
  File "C:Usershuyi.condaenvsfomlibsite-packagestorchvisiontransforms__init__.py", line 1, in <module>
    from .transforms import *
  File "C:Usershuyi.condaenvsfomlibsite-packagestorchvisiontransformstransforms.py", line 16, in <module>
    from . import functional as F
  File "C:Usershuyi.condaenvsfomlibsite-packagestorchvisiontransformsfunctional.py", line 5, in <module>
    from PIL import Image, ImageOps, ImageEnhance, PILLOW_VERSION
ImportError: cannot import name 'PILLOW_VERSION' from 'PIL' (C:Usershuyi.condaenvsfomlibsite-packagesPIL__init__.py)

這個問題主要是我使用的pillow版本過高的原因,如果不想找對應的低版本,可以按照我的方式解決。 

1、修改functional.py程式碼,將PILLOW_VERSION調整為__version__。

2、將imageio升級。

pip install --upgrade imageio -i https://pypi.douban.com/simple

3、安裝imageio_ffmpeg模組。

pip install imageio-ffmpeg -i https://pypi.douban.com/simple

工具程式碼驗證

官方給出的使用方法我就不重複測試,大家可以按照下面的命令去測試一下。

這裡我推薦一個視覺化的庫gradio,下面我將demo.py的程式碼改造了一下。

新的工具檔案程式碼如下:

#!/user/bin/env python
# coding=utf-8
"""
@project : first-order-model
@author  : 劍客阿良_ALiang
@file   : hy_gradio.py
@ide    : PyCharm
@time   : 2022-06-23 14:35:28
"""
import uuid
from typing import Optional
 
import gradio as gr
import matplotlib
 
matplotlib.use('Agg')
import os, sys
import yaml
from argparse import ArgumentParser
from tqdm import tqdm
 
import imageio
import numpy as np
from skimage.transform import resize
from skimage import img_as_ubyte
import torch
from sync_batchnorm import DataParallelWithCallback
 
from modules.generator import OcclusionAwareGenerator
from modules.keypoint_detector import KPDetector
from animate import normalize_kp
from scipy.spatial import ConvexHull
 
if sys.version_info[0] < 3:
    raise Exception("You must use Python 3 or higher. Recommended version is Python 3.7")
 
 
def load_checkpoints(config_path, checkpoint_path, cpu=False):
    with open(config_path) as f:
        config = yaml.load(f)
 
    generator = OcclusionAwareGenerator(**config['model_params']['generator_params'],
                                        **config['model_params']['common_params'])
    if not cpu:
        generator.cuda()
 
    kp_detector = KPDetector(**config['model_params']['kp_detector_params'],
                             **config['model_params']['common_params'])
    if not cpu:
        kp_detector.cuda()
 
    if cpu:
        checkpoint = torch.load(checkpoint_path, map_location=torch.device('cpu'))
    else:
        checkpoint = torch.load(checkpoint_path)
 
    generator.load_state_dict(checkpoint['generator'])
    kp_detector.load_state_dict(checkpoint['kp_detector'])
 
    if not cpu:
        generator = DataParallelWithCallback(generator)
        kp_detector = DataParallelWithCallback(kp_detector)
 
    generator.eval()
    kp_detector.eval()
 
    return generator, kp_detector
 
 
def make_animation(source_image, driving_video, generator, kp_detector, relative=True, adapt_movement_scale=True,
                   cpu=False):
    with torch.no_grad():
        predictions = []
        source = torch.tensor(source_image[np.newaxis].astype(np.float32)).permute(0, 3, 1, 2)
        if not cpu:
            source = source.cuda()
        driving = torch.tensor(np.array(driving_video)[np.newaxis].astype(np.float32)).permute(0, 4, 1, 2, 3)
        kp_source = kp_detector(source)
        kp_driving_initial = kp_detector(driving[:, :, 0])
 
        for frame_idx in tqdm(range(driving.shape[2])):
            driving_frame = driving[:, :, frame_idx]
            if not cpu:
                driving_frame = driving_frame.cuda()
            kp_driving = kp_detector(driving_frame)
            kp_norm = normalize_kp(kp_source=kp_source, kp_driving=kp_driving,
                                   kp_driving_initial=kp_driving_initial, use_relative_movement=relative,
                                   use_relative_jacobian=relative, adapt_movement_scale=adapt_movement_scale)
            out = generator(source, kp_source=kp_source, kp_driving=kp_norm)
 
            predictions.append(np.transpose(out['prediction'].data.cpu().numpy(), [0, 2, 3, 1])[0])
    return predictions
 
 
def find_best_frame(source, driving, cpu=False):
    import face_alignment
 
    def normalize_kp(kp):
        kp = kp - kp.mean(axis=0, keepdims=True)
        area = ConvexHull(kp[:, :2]).volume
        area = np.sqrt(area)
        kp[:, :2] = kp[:, :2] / area
        return kp
 
    fa = face_alignment.FaceAlignment(face_alignment.LandmarksType._2D, flip_input=True,
                                      device='cpu' if cpu else 'cuda')
    kp_source = fa.get_landmarks(255 * source)[0]
    kp_source = normalize_kp(kp_source)
    norm = float('inf')
    frame_num = 0
    for i, image in tqdm(enumerate(driving)):
        kp_driving = fa.get_landmarks(255 * image)[0]
        kp_driving = normalize_kp(kp_driving)
        new_norm = (np.abs(kp_source - kp_driving) ** 2).sum()
        if new_norm < norm:
            norm = new_norm
            frame_num = i
    return frame_num
 
 
def h_interface(input_image: str):
    parser = ArgumentParser()
    opt = parser.parse_args()
    opt.config = "./config/vox-256.yaml"
    opt.checkpoint = "./checkpoint/vox-cpk.pth.tar"
    opt.source_image = input_image
    opt.driving_video = "./data/input/ts.mp4"
    opt.result_video = "./data/result/{}.mp4".format(uuid.uuid1().hex)
    opt.relative = True
    opt.adapt_scale = True
    opt.cpu = True
    opt.find_best_frame = False
    opt.best_frame = False
    # source_image = imageio.imread(opt.source_image)
    source_image = opt.source_image
    reader = imageio.get_reader(opt.driving_video)
    fps = reader.get_meta_data()['fps']
    driving_video = []
    try:
        for im in reader:
            driving_video.append(im)
    except RuntimeError:
        pass
    reader.close()
 
    source_image = resize(source_image, (256, 256))[..., :3]
    driving_video = [resize(frame, (256, 256))[..., :3] for frame in driving_video]
    generator, kp_detector = load_checkpoints(config_path=opt.config, checkpoint_path=opt.checkpoint, cpu=opt.cpu)
 
    if opt.find_best_frame or opt.best_frame is not None:
        i = opt.best_frame if opt.best_frame is not None else find_best_frame(source_image, driving_video, cpu=opt.cpu)
        print("Best frame: " + str(i))
        driving_forward = driving_video[i:]
        driving_backward = driving_video[:(i + 1)][::-1]
        predictions_forward = make_animation(source_image, driving_forward, generator, kp_detector,
                                             relative=opt.relative, adapt_movement_scale=opt.adapt_scale, cpu=opt.cpu)
        predictions_backward = make_animation(source_image, driving_backward, generator, kp_detector,
                                              relative=opt.relative, adapt_movement_scale=opt.adapt_scale, cpu=opt.cpu)
        predictions = predictions_backward[::-1] + predictions_forward[1:]
    else:
        predictions = make_animation(source_image, driving_video, generator, kp_detector, relative=opt.relative,
                                     adapt_movement_scale=opt.adapt_scale, cpu=opt.cpu)
    imageio.mimsave(opt.result_video, [img_as_ubyte(frame) for frame in predictions], fps=fps)
    return opt.result_video
 
 
if __name__ == "__main__":
    demo = gr.Interface(h_interface, inputs=[gr.Image(shape=(500, 500))], outputs=[gr.Video()])
 
    demo.launch()
    # h_interface("C:\Users\huyi\Desktop\xx3.jpg")

程式碼說明

1、將原demo.py中的main函數內容,重新編輯為h_interface方法,輸入是想要驅動的圖片。

2、其中driving_video引數使用了我自己錄製的一段表情視訊ts.mp4,我建議在使用的時候可以自己用手機錄製一段替換。

3、使用gradio來生成方法的頁面,下面會展示給大家看。

4、使用uuid為結果視訊命名。

執行結果如下

Running on local URL:  http://127.0.0.1:7860/
To create a public link, set `share=True` in `launch()`.

開啟原生的地址:http://localhost:7860/

可以看到我們實現的互動介面如下:

我們上傳一下我準備的樣例圖片,提交製作。

看一下執行的紀錄檔,如下圖。

看一下製作結果。

由於上傳不了視訊,我將視訊轉成了gif。

還是蠻有意思的,具體的引數調優我就不弄了,大家可能根據需要調整我提供的方法裡面的引數。

以上就是Python first-order-model實現讓照片動起來的詳細內容,更多關於Python 照片動起來的資料請關注it145.com其它相關文章!


IT145.com E-mail:sddin#qq.com