Skip to content

Commit

Permalink
rough version
Browse files Browse the repository at this point in the history
  • Loading branch information
ZPdesu committed Dec 18, 2021
1 parent 69e4e92 commit 6371f88
Show file tree
Hide file tree
Showing 100 changed files with 8,549 additions and 0 deletions.
139 changes: 139 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,139 @@


# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class

# C extensions
*.so

# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
pip-wheel-metadata/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST

# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec

# Installer logs
pip-log.txt
pip-delete-this-directory.txt

# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/

# Translations
*.mo
*.pot

# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal

# Flask stuff:
instance/
.webassets-cache

# Scrapy stuff:
.scrapy

# Sphinx documentation
docs/_build/

# PyBuilder
target/

# Jupyter Notebook
.ipynb_checkpoints

# IPython
profile_default/
ipython_config.py

# pyenv
.python-version

# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock

# PEP 582; used by e.g. github.com/David-OConnor/pyflow
__pypackages__/

# Celery stuff
celerybeat-schedule
celerybeat.pid

# SageMath parsed files
*.sage.py

# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/

# Spyder project settings
.spyderproject
.spyproject

# Rope project settings
.ropeproject

# mkdocs documentation
/site

# mypy
.mypy_cache/
.dmypy.json
dmypy.json

# Pyre type checker
.pyre/

wandb/
*.lmdb/
*.pkl
*.pt
.idea
cache
pretrained_models/seg.pth
50 changes: 50 additions & 0 deletions align_face.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,50 @@
import dlib
from pathlib import Path
import argparse
import torchvision
from utils.drive import open_url
from utils.shape_predictor import align_face
import PIL

parser = argparse.ArgumentParser(description='Align_face')

parser.add_argument('-unprocessed_dir', type=str, default='unprocessed', help='directory with unprocessed images')
parser.add_argument('-output_dir', type=str, default='input/face', help='output directory')

parser.add_argument('-output_size', type=int, default=1024, help='size to downscale the input images to, must be power of 2')
parser.add_argument('-seed', type=int, help='manual seed to use')
parser.add_argument('-cache_dir', type=str, default='cache', help='cache directory for model weights')

###############
parser.add_argument('-inter_method', type=str, default='bicubic')



args = parser.parse_args()

cache_dir = Path(args.cache_dir)
cache_dir.mkdir(parents=True, exist_ok=True)

output_dir = Path(args.output_dir)
output_dir.mkdir(parents=True,exist_ok=True)

print("Downloading Shape Predictor")
f=open_url("https://drive.google.com/uc?id=1huhv8PYpNNKbGCLOaYUjOgR1pY5pmbJx", cache_dir=cache_dir, return_path=True)
predictor = dlib.shape_predictor(f)

for im in Path(args.unprocessed_dir).glob("*.*"):
faces = align_face(str(im),predictor)

for i,face in enumerate(faces):
if(args.output_size):
factor = 1024//args.output_size
assert args.output_size*factor == 1024
face_tensor = torchvision.transforms.ToTensor()(face).unsqueeze(0).cuda()
face_tensor_lr = face_tensor[0].cpu().detach().clamp(0, 1)
face = torchvision.transforms.ToPILImage()(face_tensor_lr)
if factor != 1:
face = face.resize((args.output_size, args.output_size), PIL.Image.LANCZOS)
if len(faces) > 1:
face.save(Path(args.output_dir) / (im.stem+f"_{i}.png"))
else:
face.save(Path(args.output_dir) / (im.stem + f".png"))
5 changes: 5 additions & 0 deletions bash.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
#!/usr/bin/env bash

python main.py --im_path1 90.png --im_path2 15.png --im_path3 117.png


Empty file added datasets/__init__.py
Empty file.
39 changes: 39 additions & 0 deletions datasets/image_dataset.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
from torch.utils.data import Dataset
from PIL import Image
import PIL
from utils import data_utils
import torchvision.transforms as transforms
import os

class ImagesDataset(Dataset):

def __init__(self, opts, image_path=None):
if not image_path:
image_root = opts.input_dir
self.image_paths = sorted(data_utils.make_dataset(image_root))
elif type(image_path) == str:
self.image_paths = [image_path]
elif type(image_path) == list:
self.image_paths = image_path

self.image_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])])
self.opts = opts

def __len__(self):
return len(self.image_paths)

def __getitem__(self, index):
im_path = self.image_paths[index]
im_H = Image.open(im_path).convert('RGB')
im_L = im_H.resize((256, 256), PIL.Image.LANCZOS)
im_name = os.path.splitext(os.path.basename(im_path))[0]
if self.image_transform:
im_H = self.image_transform(im_H)
im_L = self.image_transform(im_L)

return im_H, im_L, im_name



Binary file modified docs/Barbershop.pdf
Binary file not shown.
Loading

0 comments on commit 6371f88

Please sign in to comment.