diff --git a/huggingface/HF/NN/CInterpolantVisualization.py b/huggingface/HF/NN/CInterpolantVisualization.py index 1a0cac1..6b872e2 100644 --- a/huggingface/HF/NN/CInterpolantVisualization.py +++ b/huggingface/HF/NN/CInterpolantVisualization.py @@ -1,4 +1,5 @@ import hashlib, json, os, time, glob, sys +import tensorflow as tf import numpy as np import math import imageio @@ -358,10 +359,11 @@ def _process(self, def _blurred(self, VT_fps, VT_seed, VT_initialValues, VT_resolution, VT_maxBlurRadius, VT_blurSteps, + targetResolution, # replace the target resolution with the VT_resolution **kwargs ): VT_resolution = int(VT_resolution) - assert 64 <= VT_resolution <= 512, 'Unexpected resolution' + assert 128 <= VT_resolution <= 512, 'Unexpected resolution' videoFileName = self._generateFilename( VT_fps=VT_fps, VT_seed=VT_seed, VT_initialValues=VT_initialValues, VT_resolution=VT_resolution, VT_maxBlurRadius=VT_maxBlurRadius, VT_blurSteps=VT_blurSteps, @@ -369,6 +371,7 @@ def _blurred(self, ) blurRadius = np.linspace(0.0, float(VT_maxBlurRadius), int(VT_blurSteps))[::-1] + blurRadius = blurRadius.astype(np.float32) initialValues = self._initialValuesFor( initialValues=VT_initialValues, seed=VT_seed, N=VT_resolution ** 2 ) @@ -421,7 +424,7 @@ def __call__(self, if VT_kind is None: return self._model(**kwargs) VT_seed = None if VT_seed < 0 else int(VT_seed) VT_fps = int(VT_fps) - assert VT_kind in ['trajectories', 'process'], f'Unexpected VT_kind: {VT_kind}' + assert VT_kind in ['trajectories', 'process', 'blurred'], f'Unexpected VT_kind: {VT_kind}' if VT_kind == 'trajectories': return self._trajectories( diff --git a/huggingface/HF/UI/areas/visualizeArea.py b/huggingface/HF/UI/areas/visualizeArea.py index 31dbb80..3f1ab0c 100644 --- a/huggingface/HF/UI/areas/visualizeArea.py +++ b/huggingface/HF/UI/areas/visualizeArea.py @@ -33,7 +33,7 @@ def imageArea(submit, parameters): Here you can visualize the image restoration process step by step. ''') VT_resolution = gr.Slider( - label='Resolution', value=64, minimum=8, maximum=512, step=1 + label='Resolution', value=128, minimum=128, maximum=512, step=1 ) # Radio buttons for the displayed values. 'value' or 'x0 VT_show = gr.Radio( @@ -53,6 +53,34 @@ def imageArea(submit, parameters): **parameters, outputs={'video': video} ) + return + +def blurVideoArea(submit, parameters): + with gr.Tab(label='Blurred'): + VT_resolution = gr.Slider( + label='Resolution', value=128, minimum=128, maximum=512, step=1 + ) + VT_maxBlurRadius = gr.Slider( + label='Max Blur Radius', value=8, minimum=1, maximum=100, step=1 + ) + VT_blurSteps = gr.Slider( + label='Blur Steps', value=10, minimum=1, maximum=100, step=1 + ) + button = gr.Button(value='Visualize') + video = gr.Video(label='Video', interactive=False) + # Hidden kind parameter + kind = gr.Textbox(label='Kind', value='blurred', visible=False) + # bind button + submit( + btn=button, + VT_kind=kind, + VT_resolution=VT_resolution, + VT_maxBlurRadius=VT_maxBlurRadius, + VT_blurSteps=VT_blurSteps, + **parameters, + outputs={'video': video} + ) + return def visualizeArea(submit): with gr.Accordion('Visualize Area', open=False): @@ -73,5 +101,6 @@ def visualizeArea(submit): ) trajectoriesArea(submit, parameters) imageArea(submit, parameters) + blurVideoArea(submit, parameters) pass return \ No newline at end of file